blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c14501d3441074ccd1a6440e004dda16fa07704 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/DiceDesign/examples/rss3d.Rd.R | 159b01f8c53755c23f930eb79d98a3728b5a502e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,400 | r | rss3d.Rd.R | library(DiceDesign)
### Name: rss3d
### Title: 3D graphical tool for defect detection of Space-Filling Designs.
### Aliases: rss3d
### Keywords: design
### ** Examples
# An orthogonal array in 3D
data(OA131)
# centering the design points of this 7-levels design
OA <- (OA131 + 0.5)/7
# 2D projections onto coordinate axis
pairs(OA, xlim=c(0,1), ylim=c(0,1))
# Now let us look at the 3D properties with the 3D RSS (requires the rgl package)
rss <- rss3d(OA, lower=c(0,0,0), upper=c(1,1,1))
# The worst direction detected is nearly proportional to (2,-1,2)
# (type "?OA131" for explanations about this linear orthogonal array)
print(rss$worst.dir)
# Now, scramble this design
# X <- (OA131 + matrix(runif(49*3, 49, 3)))/7
# or load the design obtained this way
data(OA131_scrambled)
OA2 <- OA131_scrambled
# no feature is detected by the 2D RSS:
rss <- rss2d(OA2, lower=c(0,0,0), upper=c(1,1,1))
# 4 clusters are detected by the 3D RSS:
rss <- rss3d(OA2, lower=c(0,0,0), upper=c(1,1,1))
# Defect detection of 8D Sobol sequences
# All triplets of dimensions are tried to detect the worst defect
# (according to the specified goodness-of-fit statistic).
# requires randtoolbox library to generate the Sobol sequence
## Not run:
##D library(randtoolbox)
##D d <- 8
##D n <- 10*d
##D rss <- rss3d(design=sobol(n=n, dim=d), lower=rep(0,d), upper=rep(1,d))
## End(Not run)
|
b4745d7befda99af7cbcfa13abab8d62e496a1ef | ce53531f6eaf4c087122289774872b425d772d06 | /man/mixomics_splsda_optimize.Rd | 64a2eadbc00e7bc784945cec8b6ae261be1f5986 | [
"MIT"
] | permissive | antonvsdata/amp | bf449a87c162b2093c88cf8b49333a92ee631076 | b2999f2741c260fd7752ab8818e1b6d17b522971 | refs/heads/master | 2020-05-01T14:44:04.961091 | 2020-01-07T11:41:12 | 2020-01-07T11:41:12 | 177,528,217 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,219 | rd | mixomics_splsda_optimize.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariate.R
\name{mixomics_splsda_optimize}
\alias{mixomics_splsda_optimize}
\title{sPLS-DA}
\usage{
mixomics_splsda_optimize(object, y, ncomp_max, dist, n_features = c(1:10,
seq(20, 300, 10)), ...)
}
\arguments{
\item{object}{a MetaboSet object}
\item{y}{character, column name of the grouping variable to predict}
\item{ncomp_max}{numeric, the maximum number of components to try}
\item{dist}{the distance metric to use, one of "max.dist", "mahalanobis.dist", "centroids.dist"}
\item{n_features}{the number of features to try for each component}
\item{...}{any parameters passed to \code{mixOmics::plsda}}
}
\value{
an object of class "splsdsa"
}
\description{
A wrapper for fitting an sPLS-DA model using splsda function of the mixOmics package.
Automatically evaluates performance with different number of components and
different number of features per component, then chooses the optimal number of components and
optimal number of features for each component.
}
\examples{
set.seed(38)
plsda_res <- mixomics_splsda_optimize(merged_sample, dist = "max.dist", y = "Group", ncomp_max = 2)
}
|
4a7dade954b8b38af447e099ef218634d7b2c2f6 | ea3435d66f8cbebb4a46981386f5afe3b67e4d00 | /R/guess-datetime-format.R | c5235b1412b08599d9d96c59e3b2636168068a20 | [] | no_license | ramnathv/intellidate | 0ae1eb4c4514513a9af91b6dc7eec56bdbe00641 | 497df967e67d7f691721b162fe169503abc21519 | refs/heads/master | 2020-06-02T05:13:47.819698 | 2011-12-15T15:04:42 | 2011-12-15T15:04:42 | 2,976,112 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 688 | r | guess-datetime-format.R | #' Guess strptime format of a date time string
#'
#' @param string date time string whose format is to be guessed
#' @return a string consisting of strptime tokens
#' @export
guess_datetime_format <- function(string, default = 'mdy'){
# SPLIT: at the time string
strings <- split_datetime_string(string)
# APPLY: guess format of each substring
date_str1 <- guess_date_format(strings[[1]], default)[1]
time_str <- guess_time_format(strings[[2]])
date_str2 <- guess_date_format(strings[[3]], default)[1]
# COMBINE: put them back together in the same order
date_tm <- Filter(Negate(is.na), c(date_str1, time_str, date_str2))
return(paste(date_tm, collapse = ""))
}
|
225d66774dacc2bd91ac3ad4ceca36d74322ba99 | 9b2d67e2ce7bc6a2bd267ea3f01bc9960b156ec8 | /EXERCICE.R | fb09140e40798b7f27f3668ad7c3af12eddf59ce | [] | no_license | Reda066/DataVisualisation | 52ab1e70b88b84b39c82fa0f5670d03d30bafb95 | ca8fb99d4fa3b03d2384994021d090fa8c602108 | refs/heads/main | 2023-01-07T08:50:09.927238 | 2020-11-10T09:32:03 | 2020-11-10T09:32:03 | 311,599,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,384 | r | EXERCICE.R | install.packages("readr")
library(readr)
install.packages("tibble")
library(tibble)
install.packages("dplyr")
library(dplyr)
install.packages("tidyr")
library(tidyr)
install.packages("stringr")
library(stringr)
install.packages("ggplot2")
library(ggplot2)
install.packages("scales")
library(scales)
round_2 <- read_csv('data/results_pres_elections_dept_2017_round_2.csv')
class(round_2)
round_2$region_name # use the $ symbol to access variables
#We can select variables based on their names
round_2 %>% select(region_name, 'LE PEN', MACRON)
#We can select variables based on their names, positions
round_2 %>% select(1:5)
#We can select variables based on their names, positions, by excluding variables
round_2 %>% select(-c(3:7), -region_code)
#select contain
round_2 %>% select(contains("vote"))
#Filtering by one criterion
round_2 %>% filter(region_name == "Provence-Alpes-Côte d'Azur")
#Filtering by multiple criteria within a single logical expression
round_2 %>% filter(registered_voters > 100000 & present_voters > 100000)
#Tri croissant
round_2 %>% arrange(registered_voters)
#Creating a new variable that gives the voting rate per department
round_2 %>%
mutate(voting_rate = present_voters/registered_voters) %>%
select(c(1:4), voting_rate, everything())
#Creating a new variable that gives the rank of department according to the
#number of votes for Emmanuel Macron
round_2 %>%
mutate(rank = min_rank(desc(MACRON))) %>%
select(dept_name, MACRON, rank) %>%
arrange(rank)
#Summarise
#Recovering the total number of votes over the country
round_2 %>%
summarise(total_votes = sum(votes_cast))
#Total number of votes per region
round_2 %>% group_by(region_name) %>%
summarise(total_votes = sum(votes_cast))
##############################################################################
geo_data <- read_csv("data/coordinates_regions_2016.csv")
#The left_join() function joins tibbles x and y by returning all rows from x, and all columns from x and y
round_2 %>% left_join(geo_data, by=c("region_code"="insee_reg")) %>%
select(region_code, region_name, latitude, longitude, everything())
#Using dplyr::bind_rows() function, we combine combine two tibbles to obtain a single tibble
#with results from both rounds of the presidential election.
round_1 <- read_csv('data/results_pres_elections_dept_2017_round_1.csv')
results <- round_1 %>% mutate(round = "Round 1") %>%
bind_rows(round_2 %>% mutate(round = "Round 2"))
round_2 %>% gather(candidate, votes, c(`LE PEN`, MACRON)) %>%
arrange(region_name, dept_name) %>%
select(region_name, candidate, votes, everything())
# Example 1. Calculating the number of votes per candidate and department
#Using the input data format
round_2 %>% group_by(region_name) %>%
summarise(votesLePen = sum(`LE PEN`),
votesMacron = sum(MACRON),
.groups='drop')
#?Using the data format after applying tidyr::gather()
round_2 %>% group_by(region_name, candidate) %>%
summarise(votes = sum(votes),
.groups='drop')
#?Example 2. Identifying the winner candidate per department
round_2 %>%
group_by(dept_name) %>%
mutate(rank = min_rank(desc(votes))) %>%
arrange(dept_name, rank) %>%
mutate(winner = if_else(rank == 1, TRUE, FALSE)) %>%
select(dept_name, candidate, votes, rank, winner)
round_2 %>% spread(candidate, votes) %>%
select(region_name, `LE PEN`, MACRON, everything())
#############Abstract Data Visualization###############
plot_df <- round_2 %>% group_by(region_code, region_name, candidate) %>%
summarise(votes = sum(votes))
plot <- plot +
geom_col(aes(x = region_name, y = votes, fill = candidate),
position = 'dodge')
plot <- plot + scale_y_continuous(labels = number_format(scale = 1/1000000, suffix = 'M'))
plot + scale_fill_brewer(palette = 'Set1')
plot <- plot + scale_fill_manual(values = c('#003171', '#ffea00'))
plot <- plot + theme_bw() + theme(axis.text.x = element_text(angle = 60, hjust = 1))
plot <- plot + labs(title = "Presidential elections of 2017",
subtitle = "Votes per region and candidate",
caption = "Data source: https://www.data.gouv.fr/en/posts/les-donnees-des-elections/",
y = "Number of votes", x = "Region") +
guides(fill = guide_legend(title = 'Candidate'))
# Summarized chunk code of the bar chart
ggplot(plot_df) +
geom_col(aes(x = region_name, y = votes, fill = candidate), # geometric object
position = 'dodge') +
scale_y_continuous(labels = number_format(scale = 1/1000000, # y axis format
suffix = 'M')) +
scale_fill_manual(values = c('#003171', '#ffea00')) + # fill colors
theme_bw() + # theme
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = 'bottom') +
labs(title = "Presidential elections of 2017", # title and labels
subtitle = "Votes per region and candidate",
caption = "Data source: https://www.data.gouv.fr/
en/posts/les-donnees-des-elections/",
y = "Number of votes", x = "Region") +
guides(fill = guide_legend(title = 'Candidate')) # legend
#Combining geometric objects
missing_votes <- round_2 %>%
distinct(region_code, dept_code, .keep_all = TRUE) %>% # keep only one observation per department
group_by(region_code, region_name) %>%
summarise(blank_ballot = sum(blank_ballot), null_ballot = sum(null_ballot),
absent_voters = sum(absent_voters)) %>%
gather(category, value, c(3:5))
ggplot(plot_df, aes(x = region_name)) + # common aesthetics
geom_col(aes(y = votes, fill = candidate), position = 'dodge') +
# geom_line object for a second variable
geom_line(data = missing_votes, # new data
aes(y = value,
linetype = category,
group = category)) + # aesthetics
scale_y_continuous(labels = number_format(scale = 1/1000000,
suffix = 'M')) +
scale_fill_manual(values = c('#003171', '#ffea00')) +
theme_bw() + theme(axis.text.x = element_text(angle = 60, hjust = 1),
legend.position = 'right') +
labs(title = "Presidential elections of 2017",
y = "Number of votes", x = "Region") +
guides(fill = guide_legend(title = 'Candidate'),
linetype = guide_legend(title = '')) + # title of linetype legend
scale_linetype_discrete(labels = c("Absent Voters", "Blank Ballot",
"Null Ballot")) # labels for each linetype
#Decomposition components: facets
ggplot(results, aes(x = region_name)) +
geom_col(aes(y = votes, fill = candidate),
position = 'fill') + # to generate stacked bars
scale_y_continuous(labels = percent_format()) + # y axis format as percent
scale_fill_brewer(palette = 'Paired') +
theme_bw() + theme(legend.position = 'bottom') +
labs(title = "Results of presidential elections of 2017",
y = "Proportion of votes", x = "Region") +
guides(fill = guide_legend(title = 'Candidate'),
linetype = guide_legend(title = '')) +
scale_linetype_discrete(labels = c("Absent Voters", "Blank Ballot", "Null Ballot")) +
# define cols as the number of different values for the variable "round"
facet_grid(cols = vars(round)) +
coord_flip() # flip coordinate system
#############GEOSPATIAL DATA###############
install.packages('sf')
library(sf)
regions_sf <- st_read('data/shapefile/contours-geographiques-des-regions-2019.shp')
#Geospatial data manipulation
data_sf <- regions_sf %>%
left_join(plot_df, by = c('insee_reg'='region_code'))
as_tibble(data_sf) # print sf objects in a nice format
#Static thematic maps with ggplot2
ggplot(data_sf) +
geom_sf(aes(fill = votes)) +
facet_grid(cols = vars(candidate)) +
scale_fill_viridis_c(name = 'Number of Votes',
labels = number_format(scale = 1/1000000, suffix = 'M')) +
guides(fill = guide_colourbar(title.position = 'top')) +
theme_minimal() +
theme(legend.position = "bottom", legend.key.width = unit(2, 'cm'))
install.packages('leaflet')
library(leaflet)
plot_df <- round_2 %>% distinct(region_code, dept_code, .keep_all = TRUE) %>%
group_by(region_code, region_name) %>%
summarise(present_voters = sum(present_voters), registered_voters = sum(registered_voters),
voting_rate = present_voters/registered_voters, .groups = "drop")
plot_sf <- regions_sf %>% left_join(plot_df, by = c('insee_reg'='region_code'))
quants <- quantile(plot_sf$voting_rate, probs = seq(from = 0, to = 1, by = 0.2))
color_scale <- colorBin("YlOrRd", domain = plot_sf$voting_rate, bins = quants)
map_leaflet <- leaflet(data = plot_sf) %>%
addProviderTiles(providers$OpenStreetMap) %>%
addPolygons(fillColor = ~color_scale(voting_rate), fillOpacity = 0.7,
color = "white", weight = .5, opacity = 1, dashArray = "3") %>%
addLegend(pal = color_scale, values = ~voting_rate, opacity = 0.7,
title = "Voting rate", position = "topright")
install.packages('htmlwidgets')
library(htmlwidgets)
saveWidget(map_leaflet, "leaflet_map.html") |
82c1cfecc98adffc681bd7f099ed0a63bca3ed38 | a75fbd8055c551645a94d7500d60b7537effbd4f | /man/post_pred.Rd | 860daa1361922a36cae9c447256f144400e82a26 | [] | no_license | schmettow/bayr | ee8d99188d7910ea2487a462a09b83c4bcfc47d2 | 8190009a00490cd9429eb356635ae293d71e9145 | refs/heads/master | 2023-03-08T20:49:42.073479 | 2023-02-27T12:02:02 | 2023-02-27T12:02:02 | 53,658,287 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,394 | rd | post_pred.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/markup_helpers.R, R/postpred_extraction.R
\name{print.tbl_post_pred}
\alias{print.tbl_post_pred}
\alias{print.tbl_predicted}
\alias{knit_print.tbl_post_pred}
\alias{knit_print.tbl_predicted}
\alias{post_pred}
\alias{mtx_post_pred}
\alias{mtx_post_pred.brmsfit}
\alias{mtx_post_pred.stanreg}
\title{posterior predictive extraction}
\usage{
\method{print}{tbl_post_pred}(x, ...)
\method{print}{tbl_predicted}(x, ...)
\method{knit_print}{tbl_post_pred}(x, ...)
\method{knit_print}{tbl_predicted}(x, ...)
post_pred(model, scale = "obs", model_name, thin = 1)
mtx_post_pred(model, ...)
\method{mtx_post_pred}{brmsfit}(model, model_name, newdata = NULL, thin = 1, ...)
\method{mtx_post_pred}{stanreg}(model, model_name, newdata = NULL, thin = 1, ...)
}
\arguments{
\item{model}{Bayesian model object}
\item{scale}{"response" or "lin_pred"}
\item{model_name}{provides a name for the model}
\item{newdata}{new data to predict from}
\item{thin}{thinning factor}
}
\value{
tbl_postpred object with MCMC draws
chains are stored in a
long table with the following columns:
chain iter Obs value type order
(fctr) (int) (int) (dbl) (chr) (int)
}
\description{
MCMC predicted values are extracted from a Bayesian (regression) object
and returned as a tbl_post_pred object
}
\author{
Martin Schmettow
}
|
0d0ea3ce14317e765128670db7542d41d9837da9 | c9b9a57b169d2bba38d774991cf86f7e82e94522 | /explore/Code/firms/Geocode_Florida.R | 8bdbfa91a2a999266ca1969446974d39d89f15a7 | [
"MIT"
] | permissive | Andrew-Kao/thesis | e110d448457a4240b429241db30feba58594eeae | c3fbd69a6775e40215c9ae9b5b27cb3532f57992 | refs/heads/master | 2023-03-15T23:11:36.994870 | 2023-03-13T17:35:44 | 2023-03-13T17:35:44 | 189,903,573 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,948 | r | Geocode_Florida.R | # This file is used to interface with the US Census Geocode for Florida Businesses
# Located here: https://geocoding.geo.census.gov/
# SETUP -------------------------------------------------------------------
library(dplyr)
library(data.table)
library(httr)
library(jsonlite)
library(purrr)
library(stringr)
if (Sys.info()["user"] == "AndrewKao") {
setwd('~/Documents/College/All/thesis/explore/Data/firms/florida')
}
options(stringsAsFactors = FALSE)
addresses <- read.csv("FLAddresses.csv") %>%
mutate(lat = 0, long = 0) %>%
mutate(PRINC_ADD_1 = sapply(PRINC_ADD_1, URLencode), PRINC_CITY = sapply(PRINC_CITY,URLencode))
url <- "https://geocoding.geo.census.gov/" # geographies for other info (tiger etc.)
### Step 1: Iterate through all the postcodes for a collection of line-ups
# http://developer.tmsapi.com/docs/data_v1_1/lineups/Lineups_by_postal_code
# call API
call = 'geocoder/locations/address?'
size <- nrow(addresses)
progress <- 1
while (progress < size) {
raw_output <- GET(url = url, path = paste0(call,'street=',addresses$PRINC_ADD_1[progress],'&city=',addresses$PRINC_CITY[progress],
'&state=',addresses$PRINC_STATE[progress],'&zip=',addresses$PRINC_ZIP5[progress],
'&benchmark=Public_AR_Current&format=json'))
text_output <- rawToChar(raw_output$content)
api_output <- fromJSON(text_output)
if (!is.null(api_output$result$addressMatches$coordinates$y)) {
addresses$lat[progress] = api_output$result$addressMatches$coordinates$y
addresses$long[progress] = api_output$result$addressMatches$coordinates$x
}
progress <- progress + 1
}
## save matches
match_address <- addresses %>%
filter(lat != 0)
write.csv(match_address,"FloridaAddresses_gov.csv")
## extract uncompleted ones
unmatch_address <- addresses %>%
filter(lat == 0)
write.csv(match_address,"FloridaAddresses_fail.csv")
|
886a26196330cf164e56b877424d6c45703b1fec | cfa1cfb6b9a39102a0cc7df5abf974cec2d472cc | /helpers.R | c42356bca2f696c67da0cee01775ca11bfef2c2e | [] | no_license | MrMaksimize/ExData_Plotting1 | 78e3a17e0719ad46062d661f10d48093fb034c8b | edd6ec351daafca89aa1232f66f27f42012fe058 | refs/heads/master | 2021-01-22T01:44:06.048234 | 2015-06-06T00:10:53 | 2015-06-06T00:10:53 | 36,845,317 | 0 | 0 | null | 2015-06-04T03:18:04 | 2015-06-04T03:18:03 | null | UTF-8 | R | false | false | 1,097 | r | helpers.R |
getData <- function() {
# Auto Install Packages
list.of.packages <- c("dplyr", "sqldf", "lubridate")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
## Bring in the libs.
library(dplyr)
library(sqldf)
library(lubridate)
## Download datasets if needed.
if (!file.exists('./household_power_consumption.txt')) {
if (!file.exists('hcp.zip')) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="./hcp.zip", method="curl")
}
unzip("./hcp.zip", exdir = ".", overwrite = TRUE)
}
if (!exists('pcons')) {
pcons <- read.csv.sql(
'household_power_consumption.txt',
sql = "SELECT * FROM file WHERE Date = '1/2/2007' OR Date = '2/2/2007'",
sep=";"
)
## Do some Data Prep.
## Dates
pcons <- pcons %>%
mutate(Date = paste(Date, Time)) %>%
mutate(Date = parse_date_time(Date, "%d/%m/%Y %H:%M:%S"))
}
pcons
}
|
b8eeda5ed4db56d7877f983d16e3e9d0af1611a0 | 40fcdf019a47552d6d7df9e2fd5f15b3d71587ab | /R/plot_Box.R | 264a27082fb991f61f64455b5fba308db39fe0ee | [] | no_license | cran/grnnR | fa2f3f577abfb01fc25be10f263f64e4bc7c5bc2 | 6661951f61aef9067c73e97d799d06fb04b7eb92 | refs/heads/master | 2020-05-17T10:05:01.998635 | 2005-12-15T00:00:00 | 2005-12-15T00:00:00 | 17,718,845 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,549 | r | plot_Box.R | "plot_Box" <-
function (T, T_hat, NOVAL=NA) {
# INITIALIZATION
ERR <- abs (T-T_hat);
ERR [is.na(ERR)] <- NOVAL;
m <- min (ERR);
n <- max (ERR);
# MAIN
box1 <- boxplot (ERR, plot=FALSE);
l1 <- box1$n;
l2 <- box1$stats[1,1];
l3 <- box1$stats[2,1];
l4 <- box1$stats[3,1];
l5 <- box1$stats[4,1];
l6 <- box1$stats[5,1];
l7 <- box1$conf[1,1];
l8 <- box1$conf[2,1];
l9 <- max (box1$out);
box1stats1 <- prettyNum ( c(l3, l4, l5, l6, max (T)), trim=TRUE,
digits=3);
box1text1 <- c("- lower hinge", "- median", "- upper hinge",
"- upper wisker extreme");
box1stats2 <- prettyNum ( c(l1, l2, l7, l8), trim=TRUE, digits=3);
box1text2 <- c("observations: ", "lwr wisk extrm: ", "lwr notch: ",
"upr notch: ");
bxp1 <- bxp (box1, notch=FALSE, boxwex=0.2, axes=FALSE);
box ();
axis (2, at=box1stats1, labels=TRUE, tick=TRUE, cex.axis=0.6, las=2);
mtext ( paste ("NOVAL parameter = ", NOVAL), side=3, adj=0.5, cex=0.6);
text (x= c(1.1,1.1,1.1,1.1), y= c(l3,l4,l5,l6), labels=box1text1,
cex=0.6, xpd=NA, pos=4);
legMatrix <- matrix ( t( c(box1text2, box1stats2)), nrow=4,
ncol=2, byrow=FALSE );
legend ( 1.1, y=l9, legend=legMatrix, pch=NULL, bty="n", ncol=2,
xjust=1, y.intersp=0.9, cex=0.6 );
title ( "grnnR test-error box/whisker plot" );
}
|
d10813f4d9d66ac52c98445e6f69ab700597b503 | 7b8e7e28104237e7e461db06cf637866e4edd773 | /man/transition_graph.Rd | 3724d1fd5c60103a3a6968c36566ecc74cc46c5d | [] | no_license | bayesball/PitchSequence | 3f3d7837bfe68b3721c7cda2f2fef54ce54bd794 | 2a4812795daf44ab87baeac364ea28cfcd770cca | refs/heads/main | 2023-02-17T01:31:52.769493 | 2021-01-18T17:41:12 | 2021-01-18T17:41:12 | 325,057,410 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 446 | rd | transition_graph.Rd | \name{transition_graph}
\alias{transition_graph}
\title{
Graph of Count Transitions
}
\description{
Constructs a graph of the count probability transition matrix
}
\usage{
transition_graph(S, title = "")
}
\arguments{
\item{S}{
Data frame giving the count transition probabilities
}
\item{title}{
Prefix string to add to the graph title
}
}
\value{
graph of the count transition probabilities
}
\author{
Jim Albert
}
|
6405ed433e3ae67dc285ff5784c42cbf76714c06 | c70c9dc1a49703e57498bb141084c5e430bb8e09 | /code/4_posterior_predictives.R | 1d03d5bb15fd45c6fb34a219e51ada01df3046a6 | [] | no_license | ballardtj/dynamic_modelling | 3dd8dac452805958f8de69acb86cc62b3a726025 | ae6f5c85b7faa5022dbb5e4a68a46fa1329a951f | refs/heads/master | 2021-10-10T23:37:58.573613 | 2019-01-19T05:28:07 | 2019-01-19T05:28:07 | 151,516,324 | 1 | 0 | null | 2018-11-01T00:04:15 | 2018-10-04T04:00:50 | R | UTF-8 | R | false | false | 4,029 | r | 4_posterior_predictives.R | #NOTE: This script is used to create the figure that displays the posterior predictives
#for the two mixture model and the sample level model (which is equivilant to a one mixture model).
#clear workspace
rm(list=ls())
#load packages
library(tidyverse)
#load data
dat=read.csv("../raw_data/goal_data.csv")
#prepare data for stan
load(file="../output/1_sample_fit.RData")
load(file="../output/5_two_mixture_fit.RData")
#extract parameters from fit object
parms_two_mixture =rstan::extract(fit_two_mixture)
parms_one_mixture =rstan::extract(fit_sample)
#Get mean mixture weight for each subject
mean_mix_weights = data.frame(subject=1:60,
mean_weight=apply(parms_two_mixture$mix_weight,2,mean))
#Join mean mix weights into data frame
dat2= left_join(dat,mean_mix_weights) %>%
mutate(class = factor((mean_weight>0.5)*1,levels=c(1,0),labels=c('Mixture 1','Mixture 2')))
#Get 100 samples for posterior predictives
samples_used = sample(1:4000,size=100)
pp_list=list()
for(i in 1:100){
pp_list[[i]]=dat2 %>%
mutate(predicted_goal_2 = parms_two_mixture$sampled_goal[samples_used[i],],
predicted_goal_1 = parms_one_mixture$sampled_goal[samples_used[i],]) %>%
group_by(class,condition,trial) %>%
summarise(predicted_goal_1 = mean(predicted_goal_1),
predicted_goal_2 = mean(predicted_goal_2),
observed_goal = mean(goal),
observed_goal_hi = observed_goal + sd(goal)/sqrt(length(goal)),
observed_goal_lo = observed_goal - sd(goal)/sqrt(length(goal)))
}
#Get mean of goal posterior predictive distribution for each model
pp_means = bind_rows(pp_list) %>%
ungroup() %>%
mutate(condition = factor(condition,levels=c('approach','avoidance'),labels=c('Approach','Avoidance'))) %>%
group_by(class,condition,trial) %>%
summarise(Predicted_1 = mean(predicted_goal_1),
Predicted_2 = mean(predicted_goal_2),
Observed = mean(observed_goal)) %>%
gather(key=Source,value=goal_mean,Predicted_1:Observed)
#Get upper and lower bound bound of 95% CI on posterior predictive distribution for each model
pp_CIs = bind_rows(pp_list) %>%
ungroup() %>%
mutate(condition = factor(condition,levels=c('approach','avoidance'),labels=c('Approach','Avoidance'))) %>%
group_by(class,condition,trial) %>%
summarise(predicted_goal_hi_1 = quantile(predicted_goal_1,0.975),
predicted_goal_lo_1 = quantile(predicted_goal_1,0.025),
predicted_goal_hi_2 = quantile(predicted_goal_2,0.975),
predicted_goal_lo_2 = quantile(predicted_goal_2,0.025))
#Get standard error of observed mean goal for each trial
pp_SEs = bind_rows(pp_list) %>%
ungroup() %>%
mutate(condition = factor(condition,levels=c('approach','avoidance'),labels=c('Approach','Avoidance'))) %>%
group_by(class,condition,trial) %>%
summarise(observed_goal_hi = mean(observed_goal_hi),
observed_goal_lo = mean(observed_goal_lo))
#Function to mimic ggplot default colour specification
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
#Generate plot
pp_fig = ggplot(data=pp_CIs,aes(x=factor(trial))) +
facet_grid(condition~class) +
geom_ribbon(aes(ymin=predicted_goal_lo_1,ymax=predicted_goal_hi_1),alpha=0.1,fill=gg_color_hue(3)[[2]]) +
geom_ribbon(aes(ymin=predicted_goal_lo_2,ymax=predicted_goal_hi_2),alpha=0.1,fill=gg_color_hue(3)[[3]]) +
geom_point(data=subset(pp_means,Source=="Observed"),aes(y=goal_mean,group=Source,colour=Source)) +
geom_line(data=subset(pp_means,Source!="Observed"),aes(y=goal_mean,group=Source,colour=Source)) +
geom_errorbar(data=pp_SEs,aes(ymin=observed_goal_lo,ymax=observed_goal_hi,group=1),width=0.2,col=gg_color_hue(3)[1]) +
theme_minimal() +
scale_color_manual(labels=c('Observed','One-mixture Model','Two-mixture Model'),values=gg_color_hue(3)) +
labs(x="Trial",y="Goal Level")
#save figure
ggsave(file="../figures/posterior_predictive_panel.pdf",plot=pp_fig,height=5,width=7)
|
accb534d43dd61639eb14894510ebd81d9f0059e | 553907775f08a05b140c2d616bdfb695dc1f27d0 | /rcode/stats2.r | d4bafc173500651e358216a7305374ef3abcd0af | [] | no_license | OdessaR/android-runner | 3742acc15339943dba486df5fb1914fd716a57f8 | a2e739cade0a15f269b7d9c2c1878328dffeb02d | refs/heads/master | 2023-02-13T18:03:29.043922 | 2021-01-12T08:56:26 | 2021-01-12T08:56:26 | 291,691,667 | 0 | 0 | null | 2020-08-31T11:03:45 | 2020-08-31T11:03:45 | null | UTF-8 | R | false | false | 15,926 | r | stats2.r | library(magrittr) #to use %>% notation
library(tidyverse)
library(dplyr)
library(plyr)
library(ggplot2)
library(car)
library(gridExtra)
#get csv paths of test folder
#csv_paths_test <- list.files(path="./data/test",
# recursive=TRUE,
# pattern="^Joule.*\\.csv",
# full.names=TRUE)
aggr_memoized_file <- 'data/memoized/Aggregated_Results_Trepn.csv'
aggr_nonmemoized_file <- 'data/nonmemoized/Aggregated_Results_Trepn.csv'
m <- read_csv(aggr_memoized_file) %>%
filter(grepl('test', subject)) %>%
mutate(experiment="memoized",
parameters="memoized-noparams",
experiment_noparams="memoized-noparams"
)
n <- read_csv(aggr_nonmemoized_file) %>%
filter(grepl('test', subject)) %>%
mutate(experiment="nonmemoized",
parameters="nonmemoized-noparams",
experiment_noparams="nonmemoized-noparams"
)
new_column_names <- c("device", "subject", "browser", "bp_delta_uw", "bp_raw_uw", "cpu_load", "memory_usage_kb", "experiment", "parameters","experiment_noparams" ) #bp = battery power
colnames(m) <- new_column_names
colnames(n) <- new_column_names
m1 <- read_csv(aggr_memoized_file) %>%
filter(!grepl('test', subject)) %>%
mutate(experiment="memoized",
parameters="memoized-multipleparams",
experiment_params="memoized-params")
n1 <- read_csv(aggr_nonmemoized_file) %>%
filter(!grepl('test', subject)) %>%
mutate(experiment="nonmemoized",
parameters="nonmemoized-multipleparams",
experiment_params="nonmemoized-params")
new_column_names <- c("device", "subject", "browser", "bp_delta_uw", "bp_raw_uw", "cpu_load", "memory_usage_kb", "experiment", "parameters","experiment_params" ) #bp = battery power
colnames(m1) <- new_column_names
colnames(n1) <- new_column_names
#Experimenting with data transformations
combined_data_noparams <- bind_rows(m,m1) %>%
mutate(memory_usage_mb = memory_usage_kb/1000,
bp_delta_joule = (bp_delta_uw/1000000)*600,
bp_delta_uw_log = log(bp_delta_uw),
bp_delta_uw_sqrt = sqrt(bp_delta_uw),
bp_delta_uw_reciprocal = 1/bp_delta_uw,
cpu_load_log = log(cpu_load),
cpu_load_sqrt = sqrt(cpu_load),
cpu_load_reciprocal = 1/cpu_load,
memory_usage_kb_log = log(memory_usage_kb),
memory_usage_kb_sqrt = sqrt(memory_usage_kb),
memory_usage_kb_reciprocal = 1/memory_usage_kb,)
#Experimenting with data transformations
combined_data_params <- bind_rows(n,n1) %>%
mutate(memory_usage_mb = memory_usage_kb/1000,
bp_delta_joule = (bp_delta_uw/1000000)*600,
bp_delta_uw_log = log(bp_delta_uw),
bp_delta_uw_sqrt = sqrt(bp_delta_uw),
bp_delta_uw_reciprocal = 1/bp_delta_uw,
cpu_load_log = log(cpu_load),
cpu_load_sqrt = sqrt(cpu_load),
cpu_load_reciprocal = 1/cpu_load,
memory_usage_kb_log = log(memory_usage_kb),
memory_usage_kb_sqrt = sqrt(memory_usage_kb),
memory_usage_kb_reciprocal = 1/memory_usage_kb,)
combined_data <- bind_rows(combined_data_noparams,combined_data_params)
#get data from csv files
#test_data <- csv_paths_test %>%
# lapply(read_csv) %>%
# bind_rows
#add new factor to data which are taken from the path (memoized an non_memoized)
#test_data['experiment'] <-csv_paths_test %>%
# strsplit('/', fixed=TRUE) %>%
# rapply(nth, n=4) %>%
# factor()
#-Check assumptions
#--Assumption 1: Are the two samples independents? Yes
#--Assumption 2: Normal distribution in both groups?
par(mfrow=c(2,2))
check_normality <- function(data) {
plot(density(data))
qqnorm(data)
hist(data)
shapiro.test(data)
}
#Shapiro-Wilk normality test. The p-value should be greater than 0.05, then it is a normal distribution.
a <- combined_data %>%
filter(experiment == 'memoized') %>%
select(bp_delta_joule)
#check_normality()
#$#############################
mu <- ddply(combined_data, "experiment", summarise, grp.mean=mean(bp_delta_joule))
to_string <- as_labeller(c(`memoized` = "Memoized", `nonmemoized` = "Non-Memoized"))
p<-ggplot(combined_data, aes(x=bp_delta_joule))+ labs(title="Density Curves: Battery Power in Joule",x="Battery Power (Joule)", y = "Density") +
geom_density(fill="gray")+facet_grid(experiment ~ ., labeller = to_string)
# Add mean lines
plot1 <- p+geom_vline(data=mu, aes(xintercept=grp.mean, color="red"),
linetype="dashed")+theme(legend.position="none")
plot2 <- ggplot(combined_data, aes(sample=bp_delta_joule)) +labs(title="Q-Q Plots: Battery Power in Joule",x="Theoretical Quantiles", y = "Sample Quantiles") +
stat_qq() +stat_qq_line(color="red") +facet_grid(experiment ~ ., labeller = to_string)
grid.arrange(plot1, plot2, ncol=2, nrow = 1)
#$#############################
mu <- ddply(combined_data, "experiment", summarise, grp.mean=mean(cpu_load))
to_string <- as_labeller(c(`memoized` = "Memoized", `nonmemoized` = "Non-Memoized"))
p<-ggplot(combined_data, aes(x=cpu_load))+ labs(title="Density Curves: CPU load in Percentage",x="CPU load (%)", y = "Density") +
geom_density(fill="gray")+facet_grid(experiment ~ ., labeller = to_string)
# Add mean lines
plot1 <- p+geom_vline(data=mu, aes(xintercept=grp.mean, color="red"),
linetype="dashed")+theme(legend.position="none")
plot2 <- ggplot(combined_data, aes(sample=cpu_load)) +labs(title="Q-Q Plots: CPU load in Percentage",x="Theoretical Quantiles", y = "Sample Quantiles") +
stat_qq() +stat_qq_line(color="red") +facet_grid(experiment ~ ., labeller = to_string)
grid.arrange(plot1, plot2, ncol=2, nrow = 1)
#$#############################
mu <- ddply(combined_data, "experiment", summarise, grp.mean=mean(memory_usage_mb))
to_string <- as_labeller(c(`memoized` = "Memoized", `nonmemoized` = "Non-Memoized"))
p<-ggplot(combined_data, aes(x=memory_usage_mb))+ labs(title="Density Curves: Memory Usage in mb",x="Memory Usage (mb)", y = "Density") +
geom_density(fill="gray")+facet_grid(experiment ~ ., labeller = to_string)
# Add mean lines
plot1 <- p+geom_vline(data=mu, aes(xintercept=grp.mean, color="red"),
linetype="dashed")+theme(legend.position="none")
plot2 <- ggplot(combined_data, aes(sample=memory_usage_mb)) +labs(title="Q-Q Plots: Memory Usage in mb",x="Theoretical Quantiles", y = "Sample Quantiles") +
stat_qq() +stat_qq_line(color="red") +facet_grid(experiment ~ ., labeller = to_string)
grid.arrange(plot1, plot2, ncol=2, nrow = 1)
#Experimenting with data transformations
# combined_data %>%
# filter(experiment == 'memoized') %>%
# select(bp_delta_uw_log) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'nonmemoized') %>%
# select(bp_delta_uw_log) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'memoized') %>%
# select(bp_delta_uw_sqrt) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'nonmemoized') %>%
# select(bp_delta_uw_sqrt) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'memoized') %>%
# select(bp_delta_uw_reciprocal) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'nonmemoized') %>%
# select(bp_delta_uw_reciprocal) %>%
# unlist() %>%
# check_normality
#CPU LOAD
combined_data %>%
filter(experiment == 'memoized') %>%
select(cpu_load) %>%
unlist() %>%
check_normality
combined_data %>%
filter(experiment == 'nonmemoized') %>%
select(cpu_load) %>%
unlist() %>%
check_normality
#Experimenting with data transformations
# combined_data %>%
# filter(experiment == 'memoized') %>%
# select(cpu_load_log) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'nonmemoized') %>%
# select(cpu_load_log) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'memoized') %>%
# select(cpu_load_sqrt) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'nonmemoized') %>%
# select(cpu_load_sqrt) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'memoized') %>%
# select(cpu_load_reciprocal) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'nonmemoized') %>%
# select(cpu_load_reciprocal) %>%
# unlist() %>%
# check_normality
#MEMORY USGE
combined_data %>%
filter(experiment == 'memoized') %>%
select(memory_usage_kb) %>%
unlist() %>%
check_normality
combined_data %>%
filter(experiment == 'nonmemoized') %>%
select(memory_usage_kb) %>%
unlist() %>%
check_normality
#Experimenting with data transformations
# combined_data %>%
# filter(experiment == 'memoized') %>%
# select(memory_usage_kb_log) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'nonmemoized') %>%
# select(memory_usage_kb_log) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'memoized') %>%
# select(memory_usage_kb_sqrt) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'nonmemoized') %>%
# select(memory_usage_kb_sqrt) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'memoized') %>%
# select(memory_usage_kb_reciprocal) %>%
# unlist() %>%
# check_normality
#
# combined_data %>%
# filter(experiment == 'nonmemoized') %>%
# select(memory_usage_kb_reciprocal) %>%
# unlist() %>%
# check_normality
# mann whitney tests
wilcox.test(n$bp_delta_uw, m$bp_delta_uw)
wilcox.test(n$memory_usage_kb, m$memory_usage_kb)
wilcox.test(n$cpu_load, m$cpu_load)
# different syntax same result
wilcox.test(combined_data$bp_delta_joule~combined_data$experiment, data = combined_data, exact = FALSE)
wilcox.test(combined_data$memory_usage_mb~combined_data$experiment, data = combined_data, exact = FALSE)
wilcox.test(combined_data$cpu_load~combined_data$experiment, data = combined_data, exact = FALSE)
#--Assumption 3: Homogeneity in variances?
res.ftest <- var.test(bp_delta_uw ~ experiment, data = combined_data)
res.ftest #p-value should be greater than 0.05
res.ftest <- var.test(cpu_load ~ experiment, data = combined_data)
res.ftest #p-value should be greater than 0.05
res.ftest <- var.test(memory_usage_kb ~ experiment, data = combined_data)
res.ftest #p-value should be greater than 0.05
#added check for difference in parameters
res.ftest <- var.test(bp_delta_uw ~ experiment_noparams, data = combined_data)
res.ftest #p-value should be greater than 0.05
res.ftest <- var.test(bp_delta_uw ~ experiment_params, data = combined_data)
res.ftest #p-value should be greater than 0.05
#-T-test is used to compare two means. var.equal is set to TRUE when the variance is equal.
res.ttest <- t.test(bp_delta_uw ~ experiment, data = combined_data, var.equal=TRUE)
res.ttest
res.ttest <- t.test(cpu_load ~ experiment, data = combined_data, var.equal=TRUE)
res.ttest
res.ttest <- t.test(memory_usage_kb ~ experiment, data = combined_data, var.equal=TRUE)
res.ttest
#added check for difference in parameters
res.ttest <- t.test(bp_delta_uw ~ experiment_noparams, data = combined_data, var.equal=TRUE)
res.ttest
res.ttest <- t.test(bp_delta_uw ~ experiment_params, data = combined_data, var.equal=TRUE)
res.ttest
#effect size to see how big the effect is when the t.test resulting p-value is below 0.05
require(effsize)
VD.A(bp_delta_joule ~ experiment, data = combined_data)
VD.A(cpu_load ~ experiment, data = combined_data)
VD.A(memory_usage_mb ~ experiment, data = combined_data)
VD.A(bp_delta_joule ~ experiment_noparams, data = combined_data)
# VISUALIZATION
# FOR BP_DELTA_UW - copy this for the other variables
ggplot(combined_data, aes(y=bp_delta_joule, x=experiment, fill=experiment)) +
#limits are possible
#ylim(50, 55) +
#add labels
xlab("Experiments") + ylab("Battery Power Delta in uW") +
#interesting looking shape represents the distribution
geom_violin(trim=FALSE, alpha=1, show.legend = FALSE) +
#add boxplots
geom_boxplot(show.legend = FALSE) +
#add points
#stat_summary(fun=mean, color='black', geom ='point', show.legend = FALSE)
#battery
ggplot1 <- ggplot(combined_data, aes(y=bp_delta_joule, x=experiment, fill=experiment)) +
labs(title="Boxplots: Comparison of Energy Consumption in Joule between experiments") +
xlab("Combined") +
ylab("") +
#points
geom_jitter(width=.1, show.legend = FALSE) +
#add boxplots
geom_boxplot(show.legend = FALSE) +
#add points
stat_summary(fun=mean, color='black', geom ='point', show.legend = FALSE)
ggplot2 <- ggplot(subset(combined_data, !is.na(experiment_noparams)), aes(y=bp_delta_joule, x=experiment_noparams, fill=experiment_noparams, na.rm = TRUE), na.rm = TRUE) +
xlab("No Parameter Functions") + ylim(0,800) +
ylab("") +
#points
geom_jitter(width=.1, show.legend = FALSE, na.rm = TRUE) +
#add boxplots
geom_boxplot(show.legend = FALSE, na.rm = TRUE) +
#add points
stat_summary(fun=mean, color='black', geom ='point', show.legend = FALSE, na.rm = TRUE)
ggplot3 <- ggplot(subset(combined_data, !is.na(experiment_params)), aes(y=bp_delta_joule, x=experiment_params, fill=experiment_params, na.rm = TRUE)) +
xlab("Multiple Parameter Functions") +
ylab("") +
#points
geom_jitter(width=.1, show.legend = FALSE) +
#add boxplots
geom_boxplot(show.legend = FALSE) +
#add points
stat_summary(fun=mean, color='black', geom ='point', show.legend = FALSE)
grid.arrange(ggplot1, ggplot2, ggplot3, ncol=1, nrow = 3, left= "Energy Consumption (Joule)")
#cpu_load
ggplot(combined_data, aes(y=cpu_load, x=experiment, fill=experiment)) +
xlab("Experiments") + ylab("CPU Load (%)") +
labs(title="Boxplots: Comparison of CPU Load in Percentage between experiments") +
#points
geom_jitter(width=.1, show.legend = FALSE) +
#add boxplots
geom_boxplot(show.legend = FALSE) +
#add points
stat_summary(fun=mean, color='black', geom ='point', show.legend = FALSE)
#memory_usage_kb
ggplot(combined_data, aes(y=memory_usage_mb, x=experiment, fill=experiment)) +
xlab("Experiments") + ylab("Memory Usage (mb)") +
labs(title="Boxplots: Comparison of Memory Usage in mb between experiments") +
#points
geom_jitter(width=.1, show.legend = FALSE) +
#add boxplots
geom_boxplot(show.legend = FALSE) +
#add points
stat_summary(fun=mean, color='black', geom ='point', show.legend = FALSE)
#qqplot with beautiful line
#ggplot(combined_data, aes(sample=bp_delta_joule))+stat_qq(color="blue")+geom_qq_line(color="black")
##density plot
#ggplot(combined_data, aes(x=bp_delta_joule)) +
# geom_density()
#memory_usage_kb
plot1 <- ggplot(combined_data, aes(y=bp_delta_joule, x=parameters, fill=parameters)) +
xlab("") + ylab("Battery Power (joule)") +
labs(title="Boxplots: Comparison of multiple parameters against no parameters") +
#points
geom_jitter(width=.1, show.legend = FALSE) +
#add boxplots
geom_boxplot(show.legend = FALSE) +
#add points
stat_summary(fun=mean, color='black', geom ='point', show.legend = FALSE)
#memory_usage_kb
plot2 <- ggplot(combined_data, aes(y=cpu_load, x=parameters, fill=parameters)) +
xlab("") + ylab("CPU load (%)") +
#points
geom_jitter(width=.1, show.legend = FALSE) +
#add boxplots
geom_boxplot(show.legend = FALSE) +
#add points
stat_summary(fun=mean, color='black', geom ='point', show.legend = FALSE)
#memory_usage_kb
plot3 <- ggplot(combined_data, aes(y=memory_usage_mb, x=parameters, fill=parameters)) +
xlab("Parameters") + ylab("Memory Usage (mb)") +
#points
geom_jitter(width=.1, show.legend = FALSE) +
#add boxplots
geom_boxplot(show.legend = FALSE) +
#add points
stat_summary(fun=mean, color='black', geom ='point', show.legend = FALSE)
grid.arrange(plot1, plot2, plot3, ncol=1, nrow = 3)
|
eeff9c0ff0372ac22f78c50087c3453509a47e58 | a66de884ff4e8c5c983d9b30c6ed46da34d82250 | /APD_Mapping.R | 0ad39c7eafb8e810a846f92c775fcf9dac9d671d | [] | no_license | furuutsuponchisamurai/APD-Analysis | c93dc2bfbfc3bfdefa021460c6aecc4e6cc5390f | 9e30f5b406d925a8114635f067cac022d074673d | refs/heads/master | 2020-03-08T04:54:29.593563 | 2018-08-05T15:20:41 | 2018-08-05T15:20:41 | 127,933,964 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,408 | r | APD_Mapping.R | library(tidyverse)
library(reshape2)
library(leaflet)
library(jsonlite)
data <- read_csv("APD-DATA-CLEAN.csv")
clean_data <- data %>% filter(!is.na(LONGITUDE))
austin_data <- filter(clean_data, -97.94 <= LONGITUDE, LATITUDE <= 30.52) %>% filter(LONGITUDE <= -97.56, 30.09 <= LATITUDE)
# geoJSON <- fromJSON("districts-processed-geoJSON.json")
# geojson <- readLines("austin-council-processed.geojson", warn = FALSE) %>%
# paste(collapse = "\n") %>%
# fromJSON(simplifyVector = FALSE)
gg <- geojson_read("austin-council-wound.geojson", method="local")
leaflet(austin_data) %>% setView(lng = -97.7341, lat = 30.2849, zoom = 10) %>%
addTiles() %>% addGeoJSON(gg, fillColor = topo.colors(10, alpha = NULL)) %>% addTiles() %>% addMarkers(clusterOptions = markerClusterOptions())
# Use below leaflet commands with leafletR
# sty <- styleCat(prop="district_id", val=c("1","2","3","4","5","6","7","8","9","10"),
# style.val=c('#8dd3c7','#ffffb3','#bebada','#fb8072','#80b1d3','#fdb462','#b3de69','#fccde5','#d9d9d9','#bc80bd'),
# leg="Council ID")
# map <- leaflet(data="austin-council-processed.geojson", title="District_id",
# style=sty)
# leaflet(aus_data) %>% addTiles() %>% addMarkers(clusterOptions = markerClusterOptions())
# {
# "type": "Feature",
# "properties": {"district_id": "7", "council_representative": "Leslie Poole"},
# "geometry": {
# "type": "MultiPolygon",
# "coordinates": [[
# [-104.05, 48.99],
# [-97.22, 48.98],
# [-96.58, 45.94],
# [-104.03, 45.94],
# [-104.05, 48.99]
# ]]
# }
# }
# geodata <- read_csv("city-council.csv")
# substrRight <- function(x, n){
# substr(x, nchar(x)-n+1, nchar(x))
# }
# rexp <- "^(\\w+)\\s?(.*)$"
# nexp <- "(-[0-9]{2}\\.[0-9]+)\\s([0-9]{2}\\.[0-9]+)"
# gdata <- tibble(DISTRICT=geodata$COUNCIL_DISTRICT, SHAPE=sub(rexp,"\\1",geodata$the_geom), COORDS=sub(rexp,"\\2",geodata$the_geom))
# gdata <- mutate(gdata, locs = gsub("\\,", "]\\,", COORDS))
# gdata <- mutate(gdata, locs = gsub(nexp, "\\1, \\2", locs))
# gdata <- mutate(gdata, locs = gsub("\\(", "[", locs))
# gdata <- mutate(gdata, locs = gsub(")", "]", locs))
# gdata <- mutate(gdata, locs = gsub("-", "[-", locs))
# gdata <- mutate(gdata, locs = paste(locs, "]", sep = ""))
# d <- substrRight(gdata$locs[1], 6)
#
# js <- gdata$locs[3]
# gdata <- mutate(gdata, locs = fromJSON(locs)) |
3211af211a4b09c8445c96b49585d2a3e3c6b2be | a30d89a181b8f4e9d49989058fc5ae2724883974 | /src/01.basic/4.function.R | 23c6acdc5f68005e867ae552b52a67988a86a315 | [] | no_license | imdangun/r_grammar.20.12 | d9b120e9f6a54b0172d09599285423288f1fb6c7 | 4f83ed51ad227716c48cf5adfa1212dd02c72767 | refs/heads/master | 2023-01-30T11:00:23.288349 | 2020-12-15T05:33:32 | 2020-12-15T05:33:32 | 320,449,625 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,705 | r | 4.function.R | #4.function
## 1. math function
x = c(5.415, -5.415)
ceiling(x) # [1] 6 -5
floor(x) # [1] 5 -6
trunc(x) # [1] 5 -5
round(x, 2) # [1] 5.42 -5.42
signif(x, 3) # [1] 5.42 -5.42
round(sqrt(1:10), 3) # [1] 1.000 1.414 1.732 2.000 2.236 2.449 2.646 2.828 3.000 3.162
## 2. statistics function
# na.ram
(x = c(1:3, NA)) # [1] 1 2 3 NA
mean(x) # [1] NA
mean(x, na.rm = T) # [1] 2
quantile(x) # Error in quantile.default(x)
quantile(x, na.rm = T)
# 0% 25% 50% 75% 100%
# 1.0 1.5 2.0 2.5 3.0
# parameter (..., ) (x, ...)
x = 1:3; y = 4:6
sum(x, y) # [1] 21
mean(x, y) # error
mean(c(x, y)) # [1] 3.5
# 비슷해 보이지만 다른 함수
min(x, y) # [1] 1
pmin(x, y) # [1] 1 2 3, index 별 최소치
# cum~() 누적
cumsum(x) # [1] 1 3 6
cumprod(x) # [1] 1 2 6, 누적곱
cummin(c(3:1, 2:0)) # [1] 3 2 1 1 1 0
cummax(c(3:1, 2:0)) # [1] 3 3 3 3 3 3
#
diff(c(1, 5, 2)) # [1] 4 -3, 차이 = 뒷 데이터 - 앞 데이터
## 3. table function
x = mtcars; names(x)
# [1] "mpg" "cyl" "disp" "hp" "drat" "wt" "qsec" "vs" "am" "gear" "carb"
(t1 = table(x$cyl))
# 4 6 8
# 11 7 14 , 4기통 11개, 6기통 7개, 8기통 14개
(t2 = table(x$am, x$cyl))
# 4 6 8
# 0 3 4 12
# 1 8 3 2, 0 수동, 1 자동
addmargins(t1) # Sum을 추가한다.
# 4 6 8 Sum
# 11 7 14 32
addmargins(t2)
# 4 6 8 Sum
# 0 3 4 12 19
# 1 8 3 2 13
# Sum 11 7 14 32
prop.table(t2) # 도수비율
# 4 6 8
# 0 0.09375 0.12500 0.37500
# 1 0.25000 0.09375 0.06250
addmargins(prop.table(t2))
# 4 6 8 Sum
# 0 0.09375 0.12500 0.37500 0.59375
# 1 0.25000 0.09375 0.06250 0.40625
# Sum 0.34375 0.21875 0.43750 1.00000
##4. string function
x = c(123, 456); x # [1] 123 456
substr(x, 1, 2) # [1] "12" "45"
(x = as.character(x)) # [1] "123" "456"
substr(x, 1, 2) # [1] "12" "45"
substring('hello', 1:5, 1:5) # [1] "h" "e" "l" "l" "o"
substr('hello', 1:5, 2:5) # [1] "he", 첫 원소만 적용한다.
#
x = c('최한석', '한아름', '최인한')
grep('최', x) # [1] 1 3
grepl('최', x) # [1] TRUE FALSE TRUE
grep('최', x, value = T) # [1] "최한석" "최인한"
#
x = c('최한석최', '한아름', '최인한최')
sub('최', '박', x) # [1] "박한석최" "한아름" "박인한최"
x = c('최한석최', '한아름', '최인한최')
gsub('최', '박', x) # [1] "박한석박" "한아름" "박인한박"
#
strsplit(x, ' ') # list 로 변환한다.
# [[1]]
# [1] "최한석최"
#
# [[2]]
# [1] "한아름"
#
# [[3]]
# [1] "최인한최"
##5. probability distribution function
# d~ : 확률분표
# p~ : 누적확률
# q~ : 분위수
# r~ : 확률변수(난수) 생성 함수
|
7067aa804774d0da8776da3aa07aea312b995e60 | 7a57aebb4ac8e0ae102839297adf3b654e386789 | /Rscript/GI_Distribution_sim_vs_theory_4.R | c24fcc16667375427b33bac1a4834d7cf6bd3571 | [] | no_license | davidchampredon/tmpGI | 3ea971b044b248d6d3e43a5f1e20141c4ed82a91 | fb239730f1f96ee6e185f88ff5426acb2076a01d | refs/heads/master | 2016-08-11T13:05:58.144478 | 2015-11-10T22:21:07 | 2015-11-10T22:21:07 | 44,702,143 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,144 | r | GI_Distribution_sim_vs_theory_4.R | #################################################################
###
### COMPARE EMPIRICAL & THEORETICAL GI DISTRIBUTIONS
###
### Created 2015-07-03 by David Champredon
###
#################################################################
source("calc_theoretical_GI.R")
source("read_simul_GI_FCT.R")
source("figures_ms.R")
save.to.file <- TRUE
add.info.filename <- F #TRUE
info.in.title <- F #TRUE
# Path to the C++ model generating simulations
path.model <- "../Gillespie_SEmInR/"
# Read the simulation parameter values:
simprm.list <- as.character(read.csv(paste0(path.model,"param_all_list.csv"),header=F)[,1])
file.prm <- simprm.list[1]
simprm <- read.csv(paste0(path.model,file.prm),header=F)
R0 <- simprm[simprm$V1=="R0",2]
nE <- simprm[simprm$V1=="nE",2]
nI <- simprm[simprm$V1=="nI",2]
mc <- simprm[simprm$V1=="mc_iter",2]
popSize <- simprm[simprm$V1=="popSize",2]
latent_mean <- simprm[simprm$V1=="latent_mean",2]
infectious_mean <- simprm[simprm$V1=="infectious_mean",2]
horiz <- simprm[simprm$V1=="horizon",2]
prm.info = paste0("_R0_",R0,"_nE_",nE,"_nI_",nI,
"_lat_",latent_mean,"_inf_",infectious_mean,
"_pop_",popSize/1000,"k_MC_",mc)
# File name for output plots
fname.fwd <- ifelse(add.info.filename,paste0("plot_fwd_dist",prm.info,".pdf"),"plot_fwd_dist.pdf")
fname.bck <- ifelse(add.info.filename,paste0("plot_bck_dist",prm.info,".pdf"),"plot_bck_dist.pdf")
# Mean intrinsic GI
mean.gi.intrinsic = latent_mean + infectious_mean*(nI+1)/nI/2
# Slim data frames
t.bucket = 0.002
### Retrieve generation intervals data from simulations:
GIbck.sim <- get.GI.bck.sim(doParallel=FALSE, file.prm,t.bucket,path.model)
GIbck.sim$t <- ceiling(GIbck.sim$time.infectee)
GIfwd.sim <- get.GI.fwd.sim.melt(doParallel=FALSE,file.prm,t.bucket,path.model)
GIfwd.sim$t <- round(GIfwd.sim$time.infector)
max.horizon <- round(max(GIbck.sim$time.infectee))+1
### Calculate theoretical (using a SEmInR model)
### forward & backward generation intervals
theo.GI <- calc.theoretical.GI(file.prmset = paste0(path.model,file.prm),
n.points.GI.crv = min(200,max.horizon),
horizon = 1.02*max.horizon,
do.plot = FALSE)
GI.ODE <- theo.GI[["GI.ODE"]]
theo.gi.fwd <- theo.GI[["GI.fwd.theo"]]
theo.gi.bck <- theo.GI[["GI.bck.theo"]]
theo.gi.fwd.time <- theo.GI[["GI.fwd.theo.time"]]
theo.gi.bck.time <- theo.GI[["GI.bck.theo.time"]]
theo.time <- theo.GI[["time.vec"]]
###############
### PLOTS ###
###############
# Calendar times where we look:
tsvec.fwd <- c(5,40,60)
tsvec.bck <- c(5,40,60)
plot.w <- 10
plot.h <- 5
if(save.to.file) pdf(fname.fwd,width = plot.w, height = plot.h)
layout(matrix(c(1,1,1,2,3,4),
nrow = 2, ncol=3, byrow = TRUE),
widths=c(1,1,1), heights=c(1,1))
thetitle = "Mean forward GI: theory vs. simulations"
if(info.in.title) thetitle = paste(thetitle,"\n",prm.info)
plot.theo.vs.sim(dat.gil = GIfwd.sim,
fwdOrBck = "fwd",
dat.ode = GI.ODE,
n.times = 50,
title = thetitle,
mean.gi.intrinsic = mean.gi.intrinsic,
min.mc = mc/4,
tsvec = tsvec.fwd)
sapply(tsvec.fwd,FUN=compare.sim.theo.distrib,
GIfwd.sim,g,I,
theo.gi.fwd,theo.gi.fwd.time,"Forward")
if(save.to.file) dev.off()
if(save.to.file) pdf(fname.bck,width = plot.w, height = plot.h)
layout(matrix(c(1,1,1,2,3,4),
nrow = 2, ncol=3, byrow = TRUE),
widths=c(1,1,1), heights=c(1,1))
thetitle = "Mean backward GI: theory vs. simulations"
if(info.in.title) thetitle = paste(thetitle,"\n",prm.info)
plot.theo.vs.sim(dat.gil = GIbck.sim,
fwdOrBck = "bck",
dat.ode = GI.ODE,
n.times = 50,
title = thetitle,
mean.gi.intrinsic = mean.gi.intrinsic,
min.mc = mc/4,
tsvec = tsvec.bck)
sapply(tsvec.bck,FUN=compare.sim.theo.distrib,
GIbck.sim,g,I,
theo.gi.bck,theo.gi.bck.time,"Backward")
if(save.to.file) dev.off()
|
99f39715547aa07ddf9ef457ffdb9d9dc705888f | 437ea30837d0068b8bca815f500396f30cd2ff74 | /man/phenodata.Rd | c0572f54566d8e40b850ec3889a76bb7c66d6087 | [] | no_license | hummelma/GlobalAncova | 0d51390638a353a3d2732a962f05b9a2a73606fc | f2512c80850a0b0ebb6d5ee53c6ed9228b85b74c | refs/heads/master | 2021-06-15T19:42:03.969139 | 2021-01-31T09:39:31 | 2021-01-31T09:39:31 | 123,418,216 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 954 | rd | phenodata.Rd | \name{phenodata}
\alias{phenodata}
\docType{data}
\title{Covariate information for the van t'Veer data}
\description{
Covariate data for the van t'Veer example:
\describe{
\item{Sample}{Sample number.}
\item{metastases}{Development of distant metastases within five years (\code{0}-no/\code{1}-yes).}
\item{grade}{Tumor grade (three ordere levels).}
\item{ERstatus}{Estrogen receptor status (\code{pos}-positive/\code{neg}-negative).}
}
}
\usage{data(phenodata)}
\format{
The format is:
\describe{
\item{\code{'data.frame'}:}{96 obs. of 4 variables:}
\item{\code{$Sample}:}{int 1 2 3 4 5 6 7 8 9 10 ...}
\item{\code{$metastases}:}{int 0 0 0 0 0 0 0 0 0 0 ...}
\item{\code{$grade}:}{int 2 1 3 3 3 2 1 3 3 2 ...}
\item{\code{$ERstatus}:}{Factor w/ 2 levels "neg","pos": 2 2 1 2 2 2 2 1 2
2 ...}
}
}
\examples{
data(phenodata)
#str(phenodata)
}
\keyword{datasets}
|
8b78fcd808fa07eda3fdaedd6c3b2345e38b4cae | 62ebe734f294c073e9f9d3044ab07c7d71e18808 | /logit-regression.r | b4ef5ecf2c656e7cd86d9765667c858e261c8deb | [] | no_license | hubbard/web-r | 9dbd629828131aac299fc0804c1f320cedd1dc19 | af73a51aa1f587737cd9d1ac99702c35d262c364 | refs/heads/main | 2023-03-14T15:56:49.887956 | 2021-03-04T00:31:00 | 2021-03-04T00:31:00 | 318,614,199 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 879 | r | logit-regression.r | # Thanks to the UCLA IDRE for this example http://www.ats.ucla.edu/stat/r/dae/logit.htm
library(aod)
library(ggplot2)
# if you are ever missing a package, start R as root (i.e. sudo R) and use install.packages("packagename") to get it
cat("logit is for a binary outcome (e.g., do or do not get accepted to grad school, based on GRE, GPA, and ")
cat("undergrad school prestige ranking)\n")
mydata <- read.csv("https://stats.idre.ucla.edu/stat/data/binary.csv")
head(mydata)
summary(mydata)
sapply(mydata, sd)
mydata$rank <- factor(mydata$rank)
mylogit <- glm(admit ~ gre + gpa + rank, data = mydata, family = "binomial")
summary(mylogit)
cat("I just realized that I do not know how to use these results. Please call/text if you can teach me 804-313-9894.\n")
# CIs using profiled log-likelihood
confint(mylogit)
wald.test(b = coef(mylogit), Sigma = vcov(mylogit), Terms = 4:6)
|
869c0477ef508a8057146af9f258aa95b56693fa | 2cf5744042a9802bc019c0557848db8fbfda0d39 | /man/MRIaggr-calcSmoothMask.Rd | 18106e3c49dddabb80c3e113453fe935d4c3e134 | [] | no_license | cran/MRIaggr | bcc874f1253ab7b168e4a6d68bc66e8556b7d330 | 099c3227ac60fdad71aa5c1b79bf53b91a92e177 | refs/heads/master | 2021-01-21T21:47:16.132229 | 2015-12-23T23:44:19 | 2015-12-23T23:44:19 | 31,946,742 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,752 | rd | MRIaggr-calcSmoothMask.Rd | \name{calcSmoothMask}
\title{Spatial regularization}
\alias{calcSmoothMask}
\alias{calcSmoothMask,MRIaggr-method}
\description{
Perform a spatial regularization of a binary mask.
}
\usage{
\S4method{calcSmoothMask}{MRIaggr}(object, mask = "mask", numeric2logical = FALSE,
size_2Dgroup = 50, Neighborhood_2D = "3D_N8", rm.2Dhole = FALSE,
size_3Dgroup = "unique", Neighborhood_3D = "3D_N10", rm.3Dhole = TRUE,
erosion.th = 0.75, Vmask_min = 0.25, Vbackground_max = 0.75,
Neighborhood_V = "3D_N10", verbose = optionsMRIaggr("verbose"),
update.object = FALSE, overwrite = FALSE)
}
\arguments{
\item{object}{an object of class \code{\linkS4class{MRIaggr}}. REQUIRED.}
\item{mask}{the binary contrast parameter that should be smoothed. \emph{character}.}
\item{numeric2logical}{should \code{mask} be convert to logical ? \emph{logical}.}
\item{size_2Dgroup}{the minimum size of the 2D groups. \emph{positive integer} or \code{"unique"}.}
\item{Neighborhood_2D}{the type of 2D neighbourhood. \emph{character}.}
\item{rm.2Dhole}{should the 2D wholes inside the mask be removed ? \emph{logical}.}
\item{size_3Dgroup}{the minimum size of the 3D groups. \emph{positive integer} or \code{"unique"}.}
\item{Neighborhood_3D}{the type of 3D neighbourhood. \emph{character}.}
\item{rm.3Dhole}{should the 3D wholes inside the mask be removed ? \emph{logical}.}
\item{erosion.th}{the threshold below which the observations will be removed by the erosion. \emph{numeric between 0 and 1}.}
\item{Vmask_min}{mask observations with a proportion of neighbors belonging to the mask lower than \code{Vmask_min} are attributed to the background. \emph{numeric between 0 and 1}.}
\item{Vbackground_max}{background observations with a proportion of neighbors belonging to the mask higher than \code{Vbackground_max} are attributed to the mask. \emph{numeric between 0 and 1}.}
\item{Neighborhood_V}{the type of neighbourhood to use for the spatial regularization. \emph{character}.}
\item{verbose}{should the execution of the function be traced ? \emph{logical}.}
\item{update.object}{should the resulting regularized mask be stored in \code{object} ? \emph{logical}.}
\item{overwrite}{if a mask is already stored in \code{object@data}, can it be overwritten ? \emph{logical}.}
}
\details{
ARGUMENTS: \cr
the \code{Neighborhood_2D} or \code{Neighborhood_3D} arguments can be a \emph{matrix} or an \emph{array} defining directly the neighbourhood to use (i.e the weight of each neighbor)
or a name indicating which type of neighbourhood should be used (see the details section of \code{\link{initNeighborhood}}).
FUNCTION: \cr
This function applies 6 smoothing steps :
\itemize{
\item exclusion of the small 2D groups from the mask (to skip set \code{size_2Dgroup} to \code{FALSE}). Note that \code{size_2Dgroup = "unique"} lead to keep the largest 2D group of each slice.
\item filling of the small 2D holes in the mask (to skip set \code{rm.2Dhole} to \code{FALSE}).
\item exclusion of the small 3D groups from the mask (to skip set \code{size_3Dgroup} to \code{FALSE}). Note that \code{size_3Dgroup = "unique"} lead to keep only the largest 3D group.
\item erosion that first temporarily remove observations from the mask that have less than \code{erosion.th} percent of their neighbourhood in the mask.
Then it computes the new 3D groups and remove permanently all the new 3D groups from the mask. To skip set \code{erosion.th} to \code{FALSE}.
\item filling of the small 3D holes in the mask (to skip set \code{rm.3Dhole} to \code{FALSE}).
\item spatial regularization that homogenize the local neighbourhood (to skip set both \code{Vmask_min} and \code{Vbackground_max} to \code{FALSE}).
}
}
\seealso{
\code{\link{selectContrast}} to select the smoothed mask.
\code{\link{calcBrainMask}} to compute an indicator of the brain observations.
}
\value{
An \emph{data.frame} containing the mask and the coordinates in columns.
}
\examples{
## load data and build MRIaggr
path.Pat1 <- system.file(file.path("nifti"), package = "MRIaggr")
ls.array <- list(readMRI(file.path(path.Pat1,"T2_GRE_t0"), format = "nifti"))
MRIaggr.Pat1 <- constMRIaggr(ls.array,identifier="Pat1", param = "T2_GRE_t0")
## create the cerebral mask
res <- calcBrainMask(MRIaggr.Pat1, param = "T2_GRE_t0", type = "kmeans",
kmeans.n_groups = 2:4,
update.object = TRUE, overwrite = TRUE)
## smooth the cerebral mask
res <- calcSmoothMask(MRIaggr.Pat1, update.object = TRUE, overwrite = TRUE)
## display
multiplot(MRIaggr.Pat1,param = "mask", legend = FALSE)
}
\concept{calc.}
\keyword{methods}
|
338b3ba23b50d975fea74bb4325c6188c07b5a1b | f53f54c5420cde05e685c93dff279c644c03b9bc | /rf_percentCover_BRTE2.R | 87088fc6d7d34f6ce1cfaa04f5b38e6cfc94a776 | [] | no_license | cacurtis/random_forest | ff3b724c0306845da021474df28d09bbe9a64add | 9f67a982910a9c53b20ad2f30b0c1ffe680dde63 | refs/heads/master | 2021-01-09T20:42:02.348521 | 2016-07-01T19:05:07 | 2016-07-01T19:05:07 | 62,414,257 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,734 | r | rf_percentCover_BRTE2.R | #############################################################################
# This script reads training data from the CSV file created using the "percentCoverResample.R
# script. The script then uses the X and Y coordinates from the training data file to select
# the pixel values (predictor values) for each sample point in the input image. The predictor
# values and the percent cover data from the training data file (response variable) are
# combined and used as input to the random forests model. After the model is created percent
# cover predictions are made on the input image to create an output image with percent cover
# values ranging from 0 to 1.
#
# Set the variables below in the "SET VARIABLES HERE" section of the script.
#
# This script was written by Ned Horning [horning@amnh.org]
# Support for writing and maintaining this script comes from The John D. and
# Catherine T. MacArthur Foundation.
#
# This script is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software Foundation
# either version 2 of the License, or ( at your option ) any later version.
#
#############################################################################
#Load libraries
require(maptools)
require(sp)
require(randomForest)
require(raster)
require(rgdal)
#
############################# SET VARIABLES HERE ###################################
############################# point data #######################################
# setwd("C:/Users/Bethany/Documents/My Dropbox/Bethany/Research/Invasive_mapping/Cheatgrass_RS_2015")
setwd("E:/artr_obl_spp")
# The CSV file containing X, Y, and percent cover point data created by the percentCoverResample.R script.
artr_dat <- read.csv('artr_pts/artr_clip2gb.csv', header=TRUE)
long <-artr_dat$long_proj
lat <-artr_dat$lat_proj
pctcov <-artr_dat$pctcov
#create pointdata from csv
pointData <- cbind(long, lat, pctcov)
############################# raster data - choose one option #########################
############################# option 1: bioclm data with mask #######################
#load raster layers (in this case, 19 bioclim layers) and turn them into a single raster stack.
files <- list.files(("_pvs"), pattern = 'tif$', full.names=TRUE)
# files <-list.files(("clim/Daymet_monthly/dymt_ppt20inc_bioclm_rmvcorrelated/TIFS"), pattern = 'tif$', full.names=TRUE)
bioclm <- stack(files)
plot(bioclm)
#import raster mask
r_mask<-raster("sb_outlineNEW/sbrush_mask.tif")
plot(r_mask)
#apply the mask to the raster layers.
#set maskvalue = to whatever value you defined as the NA value for the tif
x <- mask(bioclm, r_mask, maskvalue=0)
plot(x)
#write the masked file to save it
DaymetBioclim <- writeRaster(x, "Daymet_monthly/dymt_curr_bioclm/dymt_bioclm_tifs/19bclm_masked.tif", format='GTiff')
############################## option 2: bioclim data no mask ##########################
# create raster stack of PV layers
files <-list.files(("Daymet_monthly/dymt_curr_bioclm/dymt_bioclm_tifs"), pattern = 'tif$', full.names=TRUE)
predictors <- stack(files)
# predictors <- dropLayer(x=predictors, i=1, 3) #remove any unwated layers
predictors
s <- stack(predictors)
############################ option 3: single raster or satellite layer ################
# Name and path for the input satellite image
inImage <-'clim/stacks/annual_gb.bil'
############################ define output ###########################################
# Name and path of the output GeoTiff image
outImage <- 'rf_output/artr_23PVs2.tif'
# No data value for satellite image or raster stack
# nd <- -9999
nd <- -1.7e+308
######################################################################################
#
# Start processing
print("Set variables and start processing")
startTime <- Sys.time()
cat("Start time", format(startTime),"\n")
# pointTable <- read.csv(pointData, header=TRUE)
xy <- SpatialPoints(pointData[,1:2])
response <- as.numeric(pointData[,3])
# Load the moderate resolution image
satImage <- stack(x)
#don't use if using stacked raster ('s')
# for (b in 1:nlayers(satImage)) { NAvalue(satImage@layers[[b]]) <- nd }
# Get pixel DNs from the input image for each sample point
print("Getting the pixel values under each point")
trainvals <- cbind(response, extract(satImage, xy))
# Remove NA values from trainvals
trainvals_no_na <- na.omit(trainvals)
#####################################################################
##see http://www.statistik.uni-dortmund.de/useR-2008/slides/Strobl+Zeileis.pdf for other options
## also, varSelRF for backward elimination
library (varSelRF)
rf.vs1 <- varSelRF(x, cl, ntree = 200, ntreeIterat = 100,
vars.drop.frac = 0.2)
rf <- randomForest(x, cl, ntree = 200, importance = TRUE)
rf.rvi <- randomVarImpsRF(x, cl,
rf,
numrandom = 20,
usingCluster = FALSE)
randomVarImpsRFplot(rf.rvi, rf)
#OR
##add rfcv and tuneRF code here - figure out which PVs to drop
head(trainvals)
alldat <- read.csv("_pvs/pvs_extractto_artrGB.csv", head=TRUE)
pvs <- cbind(alldat[10:32])
response <- cbind(alldat[5])
result <-rfcv(pvs, alldat$pctcov, cv.fold=5)
with(result, plot(n.var, error.cv, log="x", type="o", lwd=2))
#############################################################################
# Run Random Forest
print("Starting to calculate random forest object")
randfor <- randomForest(response ~. , data=trainvals_no_na, importance=TRUE)
#save randfor model
# save(randfor, file = "rf_output/randfor_model_curr.rda") #only needed if projecting onto different layers than what the model was trained on
#if projecting onto "future" climate, change satImage to future layers now.
# Start predictions
print("Starting predictions")
predict(satImage, randfor, filename=outImage, progress='text', format='GTiff', datatype='FLT4S', type='response', overwrite=TRUE)
#
# Calculate processing time
timeDiff <- Sys.time() - startTime
cat("Processing time", format(timeDiff), "\n")
#Variable importance plotted in decreasing order (most important at bottom)
varImpPlot(randfor, sort=TRUE)
importance(randfor) #increase in node impurity = 'residual sum of squares'
#Plot error rates vs. number of trees
plot(randfor)
#Plot response curves of individual predictor variables to regression
partialPlot(randfor,trainvals_no_na, bio1_mskd) #Change BRTE_predictors.X accordingly
|
03d4afd4a114480516d40dd4c3e7dbbe3e4a98b8 | 5b5144b60245ccb475709617c7f6002077ef171a | /R/package.r | d1b5fdcfb1f5cea77b7b63c49dd933c347a2500c | [] | no_license | Guillermogsjc/scimple | 88ae92b5114ea30144aed251d8068fd4652b100d | 31ff2a217d235bc2ece5ffc4d46d48203d2959ee | refs/heads/master | 2021-06-11T01:06:34.621066 | 2017-03-04T05:25:41 | 2017-03-04T05:25:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,048 | r | package.r | #' Simultaneous Confidence Intervals for Multinomial Proportions
#'
#' Methods for obtaining simultaneous confidence intervals for multinomial proportions have
#' been proposed by many authors and the present study include a variety of widely
#' applicable procedures. Seven classical methods (Wilson, Quesenberry and Hurst, Goodman,
#' Wald with and without continuity correction, Fitzpatrick and Scott, Sison and Glaz)
#' and Bayesian Dirichlet models are included in the package. The advantage of MCMC pack
#' has been exploited to derive the Dirichlet posterior directly and this also helps in
#' handling the Dirichlet prior parameters. This package is prepared to have equal and
#' unequal values for the Dirichlet prior distribution that will provide better scope for
#' data analysis and associated sensitivity analysis.
#'
#' @name scimple
#' @docType package
#' @author Dr M.Subbiah [primary], Bob Rudis (bob@@rud.is) [tidy version]
#' @import tibble stats MCMCpack
#' @importFrom dplyr mutate select
#' @importFrom purrr map map_df
NULL
|
55429827c21af255883f2dbcf5fc00362e25933e | f81a5e8e2523934d39d24199b42654ecc09f9223 | /Recommender System/Network Analysis.R | e920229a8902d460b7622affc13a584666660ae4 | [] | no_license | Bl7tzcrank/DataInt-Recommender-System | 161f8940bd7e8b5023de3f435ff63d19ea793180 | 916c34d953e5ead1d319caa4e992a2f536dd5fa7 | refs/heads/master | 2021-05-14T11:14:06.369340 | 2018-01-30T22:20:00 | 2018-01-30T22:20:00 | 116,374,783 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,503 | r | Network Analysis.R | # Network analysis based on the 4Play Database
# ------------------------------------ HowTo ------------------------------------ #
# 1. First of all, run the function definitions (and package installations / requirements / library calls)
# 2. create a database connection by running the respective commands in the "Database connection" part
# 3. select the database tables you need by running the respective commands in the "Get Graph Data" part
# 4. follow the steps in the "Create the graph" part to generate a network graph
# ------------------------------------ Package installations ------------------------------------ #
install.packages("igraph")
library("igraph")
install.packages("RPostgreSQL")
require("RPostgreSQL")
install.packages("RMySQL")
require("RMySQL")
# ------------------------------------ Database connection ------------------------------------ #
# choose your settings and set up a the respective connection (simply run the lines that fit)
driver = dbDriver("PostgreSQL")
driver = MySQL()
dbname = 'DataIntegration'
dbname = '4Play'
dbname = '4PlayNetwork'
host = '127.0.0.1'
port = 5432
port = 3006
user = 'postgres'
user = 'root'
password = 'pw'
password = ''
con = dbConnect(driver, dbname = dbname,
host = host, port = port,user = user, password = password)
con = dbConnect(driver, dbname = dbname,
host = host, user = user, password = password)
# ------------------------------------ Helper Function declaration ------------------------------------ #
# Takes a dataframe (for example user_favourited songs) and calculates the edgelist of the first
# two columns where as the first column is the entity for vertices and if there is the same column2 entry
# for a different column1 entry the two corresponding column1 entries will be c1 and c2 for the edgelist.
# For example: userid 1 favoures songid 2 and userid 10 favours songid 2, one row of the upcoming edgelist would be
# 1 - 10 - weight. The weight is determined by the number of same favourited songs. If userid 1 and 10 would only
# have this one songid (2) in common the weight would be 1.
createWeightedGraph = function(data){
c1 = c()
c2 = c()
weight = c()
comparison = list()
comparison[[1]] = as.numeric(c(0,0,0)) #dummy entry for list comparison in the first step
insertat = 1
adjusted = FALSE
cname1 = paste0(colnames(data)[1], "1")
cname2 = paste0(colnames(data)[1], "2")
for(i in 1:(nrow(data)-1)){
for(j in (i+1):nrow(data)){
if(data[i,2] == data[j,2] && data[i,1] != data[j,1]){
for(k in 1:length(comparison)){
if(!adjusted && is.element(data[i,1], comparison[[k]][1:2]) && is.element(data[j,1], comparison[[k]][1:2])){
comparison[[k]][3] = comparison[[k]][3] + 1
adjusted = TRUE
break
}
}
if(!adjusted){
comparison[[insertat]] = as.numeric(c(data[i,1], data[j,1], 1))
insertat = insertat + 1
}
adjusted = FALSE
}
}
}
for(i in 1:length(comparison)){
c1 = append(c1, comparison[[i]][1])
c2 = append(c2, comparison[[i]][2])
weight = append(weight, comparison[[i]][3])
}
df = data.frame(c1, c2, weight)
colnames(df) = c(cname1, cname2, "weight")
graph = graph_from_data_frame(df[1:2], directed = FALSE)
E(graph)$weight = as.numeric(as.vector(df[,3]))
return(graph)
}
# for a given graph, the number of incident edges is assigned as the vertex.size
setVertexSizes = function(graph, factor, log = FALSE, default = FALSE){
if(default){
V(graph)$size = 5
}
else {
for(node in V(graph)){
if(log){
V(graph)$size[node] = log(length(incident(graph, node, 'all')) * factor)
}
else {
V(graph)$size[node] = length(incident(graph, node, 'all')) * factor
}
}
}
return(V(graph)$size)
}
# ------------------------------------ Get Graph Data ------------------------------------ #
# Define db table names here to use them later in the code
# MySQL
table_song_production = 'Song_production'
table_user_favourited_song = 'User_favourited_song'
table_user_follower = 'User_follower'
table_artist_genre = 'Artist_genre'
table_user = 'User'
table_song = 'Song'
# Postgres
table_song_production = 'song_production'
table_user_favourited_song = 'user_favourited_song'
table_user_follower = 'user_follower'
table_artist_genre = 'artist_genre'
table_user = 'user'
table_song = 'song'
# get the data for our graph
song_production <- dbGetQuery(conn = con, paste0("SELECT * FROM ", table_song_production))
user_song <- dbGetQuery(conn = con, paste0("SELECT * FROM ", table_user_favourited_song))
user_follower <- dbGetQuery(conn = con, paste0("SELECT * FROM ", table_user_follower))
artist_genre <- dbGetQuery(conn = con, paste0("SELECT * FROM ", table_artist_genre))
# create weighted graph
user_song_weighted_graph = createWeightedGraph(user_song)
song_production_weighted_graph = createWeightedGraph(song_production)
artist_genre_weighted_graph = createWeightedGraph(artist_genre)
# information to the graph design
# http://kateto.net/networks-r-igraph
# https://cran.r-project.org/web/packages/igraph/igraph.pdf
# ------------------------------------ Create the graph ------------------------------------ #
# Test Graph
g1 <- graph(edges = c("A","B", "A","B", "A","B", "A","B", "A","B", "A","B", "A","B", "A","B", "A","B", "A","B", "A","B", "A","B", "B","C", "B","C", "C","A", "B","D",
"D","E", "D","G", "D","F", "E","F",
"F","G"), directed = FALSE)
g1 <- delete.edges(g1, E(g1)[E(g1)$weight <= 3])
plot(g1)
# choose the graph you want to plot
graph_to_plot = song_production_weighted_graph
graph_to_plot = user_song_weighted_graph
graph_to_plot = artist_genre_weighted_graph
graph_to_plot = g1
# Create diffrent network algorithms -> choose one of them to be plotted later
# 1.Newman-Girvan
newman_girvan <- cluster_edge_betweenness(graph_to_plot)
# 2.Label propagation
label_propagation <- cluster_label_prop(graph_to_plot)
# 3.Fast greedy
fast_greedy <- cluster_fast_greedy(graph_to_plot)
# 4.Walktrap
walktrap <- cluster_walktrap(graph_to_plot)
# 5.leading eigenvector
leading_eigenvector <- cluster_leading_eigen(graph_to_plot)
# 6.Spinglass
spinglass <- cluster_spinglass(graph_to_plot)
# 7.Infomap
infomap <- cluster_infomap(graph_to_plot)
# show edge betweenness
edge_betweenness(graph_to_plot)
# set the algorithm and plot
algorithm = newman_girvan
name = 'Communities based on "Edge-Betweenness"'
algorithm = label_propagation
name = 'Communities based on "Label propagation"'
algorithm = fast_greedy
name = 'Communities based on "Fast greedy"'
algorithm = walktrap
name = 'Communities based on "Walktrap"'
algorithm = leading_eigenvector
name = 'Communities based on "Leading eigenvector"'
algorithm = spinglass
name = 'Communities based on "Spinglass"'
algorithm = infomap
name = 'Communities based on "Infomap"'
# Layout options
plot(
algorithm,
graph_to_plot,
#graph_to_plot_simplified,
vertex.color = "grey",
vertex.size = setVertexSizes(graph_to_plot, 15, log=TRUE, default = FALSE),
#vertex.size = setVertexSizes(graph_to_plot, .2, log=FALSE, default = FALSE),
#vertex.size = setVertexSizes(graph_to_plot, 0.1, default = TRUE),
vertex.label.cex = 0.5,
vertex.label.color ="black",
vertex.label.dist=0,
vertex.shape="square",
edge.width=E(graph_to_plot)$weight * .3,
arrow.mode=1,
layout = layout.auto,
main = name
)
|
00c01f31cd2ba38ae636bef6b99c78db32b6b80e | 0d5f02f6ab16924115aa2cc3b446a80ff87a6395 | /man/fqr.Rd | 844e8c4629a4e650bd5cc18a1c66930b7a472baa | [
"MIT"
] | permissive | MohsinFuzzy/FuzzReg | a6b8c86b56e94756854f2dba9e351567c0d7c1ea | 4df04534ddb1deccae499bab51606213586122f2 | refs/heads/master | 2021-04-06T14:06:26.225717 | 2018-03-15T15:10:02 | 2018-03-15T15:10:02 | 119,653,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,371 | rd | fqr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fqr.R
\name{fqr}
\alias{fqr}
\title{Fuzzy Quantile Regression}
\usage{
fqr(X,y_left,y_centre,y_right,t,type)
}
\arguments{
\item{t}{is a spesified quantile ranges from 0 to 1 i.e t=[0,1]}
\item{type}{spesifies the model (1 or 2)}
\item{X}{is an input fuzzy number}
\item{y_left}{is an left output fuzzy number}
\item{y_centre}{is an centre of output fuzzy number}
\item{y_right}{is an right output fuzzy number}
\item{1}{"Fuzzy output, Fuzzy input and Fuzzy Parameters"}
\item{2}{"Fuzzy output, Crisp input and Fuzzy Parameters"}
}
\description{
It gives the estimates of fuzzy quantile regression using the method of Weighted Least Absolute Deviation (WLAD). It converts the input variables into Linear Programming Problem (LPP) and uses the Simplex Algorithm to solve the LPP.
}
\examples{
If given Triangular Fuzzy NUmber
library ("lpSolve")
x_left<-c(1.5,3.0,4.5,6.5,8.0,9.5,10.5,12.0)
x_centre<-c(2.0,3.5,5.5,7.0,8.5,10.5,11.0,12.5)
x_right<-c(2.5,4.0,6.5,7.5,9.0,11.5,11.5,13.0)
y_left<-c(3.5,5.0,6.5,6.0,8.0,7.0,10.0,9.0)
y_centre<-c(4.0,5.5,7.5,6.5,8.5,8.0,10.5,9.5)
y_right<-c(4.5,6.0,8.5,7.0,9.0,9.0,11.0,10.0)
X<-cbind(x_left,x_centre,x_right)
t<-0.5
fqr(X,y_left,y_centre,y_right,t,type=1)
}
\author{
Mohsin Shahzad
}
|
0567c030e0d4197a3522d714ff7cf07c0dd0ca4e | 523ba4df759398ac3386dd9e723a5dd9f7981df9 | /scripts/4plotbuilderv2.R | 25e0902b792e254c8d3d646b46e29042acd13bea | [
"MIT"
] | permissive | Jake1Egelberg/DEARV2 | 4431e068de0618e24e39c60a2a2577ead5262214 | 201782f2abbf7484f648620c34b0d9755689fe08 | refs/heads/main | 2023-07-10T22:45:07.257164 | 2023-07-08T10:40:25 | 2023-07-08T10:40:25 | 541,267,149 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,677 | r | 4plotbuilderv2.R |
#--------------------RUN REGARDLESS
.GlobalEnv$design_path<-paste(exp_directory,"/Metadata.csv",sep="")
if(file.exists(design_path)==TRUE){
#Read design matrix
design_raw<-read.csv(paste(exp_directory,"/Metadata.csv",sep=""),header=TRUE,fileEncoding = 'UTF-8-BOM')
design_raw$Seq<-gsub(".fastq.gz","",design_raw$Reads)
.GlobalEnv$design_raw<-design_raw
#Get design matrix for significance fitting
.GlobalEnv$design<-data.frame(Intercept=1,
GROUP=design_raw$GROUP)
.GlobalEnv$seqs<-design_raw$Seq
.GlobalEnv$seqs_var<-tclVar(seqs)
.GlobalEnv$classes<-unique(design_raw$Classification)
.GlobalEnv$class_num<-length(classes)
.GlobalEnv$save_file<-paste(fastq_dir,"/analysisdata.Rdata",sep="")
if(file.exists(save_file)==FALSE){
.GlobalEnv$thresh<-"auto"
#--------------------FUNCTIONS
#Start prog bar
.GlobalEnv$an_prog<-winProgressBar(title="DEAR Analysis and Visualization",
label="Loading analysis functions...",
initial=10,min=0,max=100,width=300)
#Read data fun, returns countdata
.GlobalEnv$read_inputs<-function(){
tryCatch(setWinProgressBar(an_prog,value=20,label="Reading raw feature counts..."),
error=function(e)print("no prog"))
#Load count data
countdata_raw<-read.csv(paste(fastq_dir,"/rawfeaturecounts.csv",sep=""),header=TRUE,fileEncoding = 'UTF-8-BOM')
#Read first col as rownames
rownames(countdata_raw)<-countdata_raw$X
countdata_raw<-countdata_raw[,-1,drop=FALSE]
#GET COUNTDATA
.GlobalEnv$countdata<-countdata_raw
return(countdata)
}
#Filter lowly expressed genes
.GlobalEnv$filter_genes<-function(countdata,thresh){
tryCatch(setWinProgressBar(an_prog,value=30,label="Removing lowly expressed genes..."),
error=function(e)print("no prog"))
#Get CPM
.GlobalEnv$cpm_data<-cpm(countdata)
if(thresh=="auto"){
#Determine thresh automatically, cpm that corresponds to counts of 10
#For each sequence (column, get cpm that corresponds to 10)
diff_to_10<-function(x){
x_dat<-countdata[,x]
vec<-abs(abs(x_dat)-10)
ind<-which(vec==min(vec))[1]
cor_cpm<-cpm_data[ind,x]
return(cor_cpm)
}
counts_cor<-unlist(lapply(1:ncol(countdata),diff_to_10))
#Get threshhold as mean
use_thresh<-as.numeric(round(mean(counts_cor),1))
} else{
use_thresh<-as.numeric(thresh)
}
.GlobalEnv$use_thresh<-use_thresh
#Get well expressed genes
cpm_data_thresh<-cpm_data>use_thresh
good_gene_inds<-which(apply(cpm_data_thresh,1,sum)>=1)
print(use_thresh)
print(nrow(cpm_data))
print(nrow(countdata[good_gene_inds,]))
return(countdata[good_gene_inds,])
}
#Process data fun
.GlobalEnv$process_data<-function(countdata_cur,design){
#Remove features with 0 reads
.GlobalEnv$zero_count_features<-rownames(countdata_cur[rowSums(countdata_cur==0)>=1,])
if(length(zero_count_features)>0){
#Remove genes
print(paste("removing ",length(zero_count_features)," zero counts features"),sep="")
countdata_cur<-countdata_cur[-which(rownames(countdata_cur)%in%zero_count_features),]
.GlobalEnv$countdata_cur<-countdata_cur
}
tryCatch(setWinProgressBar(an_prog,value=40,label="Converting to DGEList..."),
error=function(e)print("no prog"))
#Convert to a DGE obj
dgeObj<-DGEList(countdata_cur)
tryCatch(setWinProgressBar(an_prog,value=50,label="Normalizing"),
error=function(e)print("no prog"))
#Normalize
dgeObj_norm<-calcNormFactors(dgeObj)
tryCatch(setWinProgressBar(an_prog,value=60,label="Calculating between-group variance..."),
error=function(e)print("no prog"))
#Get between-group (total dataset) variation
dgeObj_bwvar<-estimateCommonDisp(dgeObj_norm)
tryCatch(setWinProgressBar(an_prog,value=70,label="Calculating within-group variance..."),
error=function(e)print("no prog"))
#Get within-group (within gene) variation
dgeObj_wivar<-estimateGLMTrendedDisp(dgeObj_bwvar)
dgeObj_tag<-estimateTagwiseDisp(dgeObj_wivar)
tryCatch(setWinProgressBar(an_prog,value=80,label="Fitting linear model..."),
error=function(e)print("no prog"))
#Fit GLM
fit<-glmFit(dgeObj_tag,design)
#Conduct lilklihood ratio test for significance
lrt<-glmLRT(fit)
#Calculate FDR for all genes
top_genes<-topTags(lrt,n=nrow(lrt$table))
out_list<-list()
out_list[[length(out_list)+1]]<-dgeObj
out_list[[length(out_list)+1]]<-as.data.frame(top_genes)
return(out_list)
}
#--------------------RUN ON OPEN
#Read countdata
countdata<-read_inputs()
names(countdata)<-design_raw$Seq
#Get well expressed genes
.GlobalEnv$countdata_cur<-filter_genes(countdata,thresh)
#Get significance
.GlobalEnv$sig_list<-process_data(countdata_cur,design)
.GlobalEnv$raw_dge<-sig_list[[1]]
.GlobalEnv$annot_genes<-sig_list[[2]]
#Order by feature for volcano plot
.GlobalEnv$annot_genes_ord<-annot_genes[order(rownames(annot_genes)),]
#Get all features for volcano plot
.GlobalEnv$annot_fts<-rownames(annot_genes_ord)
tryCatch(setWinProgressBar(an_prog,value=90,label="Formatting data..."),
error=function(e)print("no prog"))
#Aggregate data for reads/cpm and library distribution plot
quality_control_list<-lapply(1:ncol(countdata_cur),function(x){
tmp_counts<-countdata_cur[,x,drop=FALSE]
seq=names(tmp_counts)
features=rownames(tmp_counts)
reads=tmp_counts[,1]
cpm<-(reads/sum(reads))*1000000
log2cpm<-log2(cpm)
class<-design_raw[which(design_raw$Seq==seq),]$Classification
df<-data.frame(Seq=seq,
Feature=features,
Reads=reads,
CPM=cpm,
Log2CPM=log2cpm,
Class=class)
return(df)
})
quality_control_df<-bind_rows(quality_control_list)
quality_control_df$ClassSeq<-paste(quality_control_df$Class,quality_control_df$Seq,sep="_")
quality_control_df$FeatureClass<-paste(quality_control_df$Feature,quality_control_df$Class,sep="_")
.GlobalEnv$quality_control_df<-quality_control_df
#Get data for library sizes plot
.GlobalEnv$lib_sizes<-data.frame(Seq=rownames(raw_dge$samples),
Size=raw_dge$samples$lib.size,
Class=design_raw[match(rownames(raw_dge$samples),design_raw$Seq),]$Classification)
#Match data for heatmap plot
#Feature, Class, Log2CPM, logFC, FDR
#Get LogFC and FDR
.GlobalEnv$all_features<-unique(quality_control_df$Feature)
heat_data<-data.frame(Feature=rep(all_features,class_num),
Class=unlist(lapply(classes,function(x){rep(x,times=nrow(annot_genes))})))
heat_data$FeatureClass<-paste(heat_data$Feature,heat_data$Class,sep="_")
#Get feature, class, Log2CPM
#Average data over feature class
na.rm.mean<-function(x){
return(mean(x,na.rm=TRUE))
}
means<-tapply(quality_control_df$CPM,quality_control_df$FeatureClass,na.rm.mean)
logmeans<-tapply(quality_control_df$Log2CPM,quality_control_df$FeatureClass,na.rm.mean)
#Match to LogFC and FDR data
heat_data$CPM<-means[match(heat_data$FeatureClass,names(means))]
heat_data$Log2CPM<-logmeans
annot_matches<-match(heat_data$Feature,rownames(annot_genes_ord))
heat_data$LogFC<-annot_genes_ord[annot_matches,]$logFC
heat_data$FDR<-annot_genes_ord[annot_matches,]$FDR
#Sort by logFC
heat_data_ord<-heat_data[order(abs(heat_data$LogFC),decreasing=TRUE),]
.GlobalEnv$heat_data_ord<-heat_data_ord
tryCatch(setWinProgressBar(an_prog,value=95,label="Saving analysis..."),
error=function(e)print("no prog"))
#Save dfs needed for analysis in Rdata
save(list=c("use_thresh",
"quality_control_df",
"lib_sizes",
"heat_data_ord",
"all_features",
"annot_genes_ord",
"annot_fts"),file=save_file)
} else{
#Start prog bar
.GlobalEnv$an_prog<-winProgressBar(title="DEAR Analysis and Visualization",
label="Loading analysis...",
initial=50,min=0,max=100,width=300)
load(save_file,envir=.GlobalEnv)
}
#Get variables for plotting from dfs
#Annotation vars for volcano plot
.GlobalEnv$annot_fts_var<-tclVar(annot_fts)
#All features for heatmap
.GlobalEnv$all_features_var<-tclVar(all_features)
#--------------------PLOT FUNCTIONS
.GlobalEnv$plot_type_var<-tclVar("Reads/CPM")
.GlobalEnv$show_raw_cpm_var<-tclVar("1")
.GlobalEnv$show_raw_cpm_val<-"1"
.GlobalEnv$displated_features<-all_features[1:10]
.GlobalEnv$cur_volc_point<-""
.GlobalEnv$plot_theme<-function(){
theme(axis.text = element_text(size=15),
axis.title = element_text(size=17,face="bold"),
strip.text = element_text(size=15,face="bold"),
legend.text = element_text(size=12),
legend.title = element_text(size=12,face="bold"))
}
#Save the plot
.GlobalEnv$save_plot_function<-function(){
setwd(plot_dir)
ggsave(paste(str_replace_all(cur_plot_type,"/","_"),".png",sep=""),plot=plot,width=15,height=10)
shell.exec(paste(plot_dir,"/",paste(str_replace_all(cur_plot_type,"/","_"),".png",sep=""),sep=""))
}
#Create the plot
.GlobalEnv$create_plot<-function(){
print("rendering plot")
if(cur_plot_type=="Reads/CPM"){
.GlobalEnv$selected_seq_inds<-tkcurselection(seq_listbox)
if(""%in%tclvalue(selected_seq_inds)){
selected_seq_inds<-0
tkselection.set(seq_listbox,0)
}
.GlobalEnv$selected_seqs<-seqs[as.numeric(selected_seq_inds)+1]
print(selected_seqs)
if(length(selected_seqs)>0){
tmp_seqs<-quality_control_df[which(quality_control_df$Seq%in%selected_seqs),]
.GlobalEnv$plot<-ggplot(tmp_seqs,aes(x=CPM,y=Reads))+
geom_point()+
xlab("Counts Per Million (CPM)")+
scale_x_continuous(n.breaks=5,limits=c(0,use_thresh*2))+
scale_y_continuous(limits=c(0,15),n.breaks=6)+
geom_vline(xintercept=use_thresh,size=0.3,lty="dashed",col="red")+
geom_hline(yintercept=10,size=0.3,lty="dashed",col="blue")+
facet_grid(rows=vars(Seq))+
theme_classic()
if(min(tmp_seqs$Reads)>=10){
.GlobalEnv$plot<-plot+
geom_text(label="No reads below 10 to remove",x=use_thresh,y=12)
}
}
} else if(cur_plot_type=="Library Sizes"){
#Generate library size plot
.GlobalEnv$plot<-ggplot(lib_sizes,aes(x=Seq,y=Size,fill=Class))+
geom_col(col="black")+
theme_classic()+
scale_fill_manual(values=c("gray60","gray40"))+
ylab("Library Size")+
xlab("")+
scale_y_continuous(n.breaks=10)+
theme(axis.text.x = element_text(angle=45,hjust=1,vjust=1))
} else if(cur_plot_type=="Library Distribution"){
#Generate library distribution plot
.GlobalEnv$plot<-ggplot(quality_control_df,aes(x=Seq,y=Log2CPM))+
geom_boxplot(size=0.1,outlier.size=0.1,outlier.alpha = 1,outlier.color="red",width=0.7)+
xlab("")+
theme_classic()+
theme(axis.text.x = element_text(angle=45,hjust=1,vjust=1))
} else if(cur_plot_type=="Volcano Plot"){
#Volcano plot
.GlobalEnv$plot<-ggplot(annot_genes_ord,aes(x=logFC,y=-log10(FDR)))+
geom_point(col=ifelse(annot_genes_ord$FDR<0.05,"red","black"),size=1)+
scale_y_continuous(n.breaks=10)+
scale_x_continuous(n.breaks=10)+
geom_hline(yintercept=-log10(0.05),col="blue",lty="dashed")+
xlab("Log(FC)")+
ylab("-log10(False Discovery Rate (FDR))")+
theme_classic()
if(cur_volc_point!=""){
sel_pt_dat<-annot_genes_ord[which(rownames(annot_genes_ord)==cur_volc_point),]
sel_pt_label<-paste("Feature: ",rownames(sel_pt_dat),"\n",
"LogFC: ",round(sel_pt_dat$logFC,3),"\n",
"FDR: ",round(sel_pt_dat$FDR,6),sep="")
.GlobalEnv$plot<-plot+
geom_point(data=sel_pt_dat,col="purple",shape=21,fill="yellow",size=6)+
geom_label(data=sel_pt_dat,fill="yellow",col="purple",size=6,aes(x=min(annot_genes_ord$logFC),y=0),label=sel_pt_label,vjust=-0.5,hjust=0,label.padding=unit(0.15,"in"))+
geom_label(data=sel_pt_dat,label=cur_volc_point,col="purple",fill="yellow",hjust=-0.2,vjust=1.2)
}
} else if(cur_plot_type=="Heatmap"){
#Plot top 10
.GlobalEnv$tops<-heat_data_ord[which(heat_data_ord$Feature%in%displated_features),]
.GlobalEnv$plot<-ggplot(tops,aes(x=Class,y=factor(Feature,levels=rev(displated_features)),fill=Log2CPM))+
geom_tile()+
scale_fill_gradient(low="yellow",high="red",name="Log2(CPM)",n.breaks=5)+
ylab("Feature")+
xlab("")+
theme_classic()
if(show_raw_cpm_val=="1"){
.GlobalEnv$plot<-plot+
geom_text(aes(label=round(Log2CPM,3)))
}
}
plot_fun<-function(){
.GlobalEnv$plot_to_plot<-plot+plot_theme()
return(plot(plot_to_plot))
}
#Render the plot
plot_frame<-tkframe(analyze_gui)
tkgrid(plot_frame,column=2,row=1,sticky="w",rowspan=1000)
plot_widg<-tkrplot(plot_frame,fun=plot_fun,hscale=2.6,vscale=2.2)
tkgrid(plot_widg)
}
#Update graph parms fun
.GlobalEnv$update_graph_parms<-function(){
.GlobalEnv$cur_plot_type<-tclvalue(plot_type_var)
tryCatch(tkgrid.remove(graph_parm_frame),error=function(e)print("not rendered yet"))
render_title<-function(){
graph_parms_ttl<-tklabel(graph_parm_frame,text="Graph Parameters",font=underline_font)
tkgrid(graph_parms_ttl,row=1,column=1)
}
.GlobalEnv$graph_parm_frame<-tkframe(analyze_gui)
tkgrid(graph_parm_frame,column=1,row=3)
if(cur_plot_type=="Reads/CPM"){
render_title()
.GlobalEnv$deselect_seqs<-function(){
lapply(1:length(seqs),function(x){
tkselection.clear(seq_listbox,x-1)
})
}
.GlobalEnv$select_seqs<-function(){
lapply(1:length(seqs),function(x){
tkselection.set(seq_listbox,x-1)
})
create_plot()
}
#Select sequence
#Scrollbar
scroll<-tkscrollbar(graph_parm_frame,repeatinterval=1,command=function(...)tkyview(seq_listbox,...))
tkgrid(scroll,row=2,sticky="nsw",padx=0,column=1)
.GlobalEnv$seq_listbox<-tklistbox(graph_parm_frame,listvariable=seqs_var,width=20,height=6,selectmode="multiple",exportselection=FALSE,yscrollcommand=function(...)tkset(scroll,...))
tkgrid(seq_listbox,row=2,column=1,padx=20)
tkbind(seq_listbox,"<<ListboxSelect>>",create_plot)
sel_but<-tkbutton(graph_parm_frame,text="Select All",command=select_seqs)
tkgrid(sel_but,row=3,column=1,pady=5)
desel_but<-tkbutton(graph_parm_frame,text="Deselect All",command=deselect_seqs)
tkgrid(desel_but,row=4,column=1,pady=0)
create_plot()
} else if(cur_plot_type=="Library Sizes"){
create_plot()
} else if(cur_plot_type=="Library Distribution"){
create_plot()
} else if(cur_plot_type=="Heatmap"){
render_title()
check_cpms<-function(){
.GlobalEnv$raw_cpm_val<-tclvalue(show_raw_cpm_var)
if(raw_cpm_val=="0"){
.GlobalEnv$show_raw_cpm_val<-"1"
} else if(raw_cpm_val=="1"){
.GlobalEnv$show_raw_cpm_val<-"0"
}
print(show_raw_cpm_val)
create_plot()
}
get_displayed_fts<-function(){
.GlobalEnv$top_ft<-as.numeric(tknearest(feature_list,1))
.GlobalEnv$displayed_vec<-c(top_ft:(top_ft+9))+1
.GlobalEnv$displated_features<-all_features[displayed_vec]
create_plot()
}
select_a_feature<-function(){
.GlobalEnv$cur_selected<-all_features[(as.numeric(tkcurselection(feature_list))+1)]
.GlobalEnv$cur_selected_dat<-heat_data_ord[which(heat_data_ord$Feature==cur_selected),]
message=paste("Feature: ",unique(cur_selected_dat$Feature),"\n",
"LogFC: ",round(unique(cur_selected_dat$LogFC),3),"\n",
"FDR: ",round(unique(cur_selected_dat$FDR),6),"\n",
"Group/Log2(CPM): ",paste(cur_selected_dat$Class,round(cur_selected_dat$Log2CPM,3),collapse=" "),sep="")
tk_messageBox(message=message)
}
#Define scroll bar functions
scroll_command<-function(...){
tkset(scroll_fts,...)
get_displayed_fts()
}
also_scroll_command<-function(...){
tkyview(feature_list,...)
}
show_cpms<-tkcheckbutton(graph_parm_frame,text='Show Raw CPM',variable=show_raw_cpm_var)
tkgrid(show_cpms,column=1,pady=5,row=1)
tkbind(show_cpms,"<Button>",check_cpms)
feature_ttl<-tklabel(graph_parm_frame,text="Features")
tkgrid(feature_ttl,column=1,sticky="w",row=2)
scroll_fts<-tkscrollbar(graph_parm_frame,repeatinterval=1,command=also_scroll_command)
tkgrid(scroll_fts,column=1,row=3,sticky="nsw")
feature_list<-tklistbox(graph_parm_frame,listvariable=all_features_var,height=10,width=20,selectmode="single",exportselection=FALSE,yscrollcommand=scroll_command)
tkgrid(feature_list,column=1,sticky="w",padx=16,row=3)
tkbind(feature_list,"<<ListboxSelect>>",select_a_feature)
create_plot()
} else if(cur_plot_type=="Volcano Plot"){
select_volc_point<-function(){
.GlobalEnv$cur_volc_point<-annot_fts[(as.numeric(tkcurselection(volc_feature_list))+1)]
print(cur_volc_point)
create_plot()
}
render_title()
feature_ttl<-tklabel(graph_parm_frame,text="Features")
tkgrid(feature_ttl,column=1,sticky="w",row=2)
volcscroll<-tkscrollbar(graph_parm_frame,repeatinterval=1,command=function(...)tkyview(volc_feature_list,...))
tkgrid(volcscroll,column=1,row=3,sticky="nsw")
volc_feature_list<-tklistbox(graph_parm_frame,listvariable=annot_fts_var,height=10,width=20,selectmode="single",exportselection=FALSE,yscrollcommand=function(...)tkset(volcscroll,...))
tkgrid(volc_feature_list,row=3,column=1,padx=16)
tkbind(volc_feature_list,"<<ListboxSelect>>",select_volc_point)
create_plot()
}
}
tryCatch(close(an_prog),error=function(e)print("no prog"))
#--------------------RENDER GUI
analyze_gui<-tktoplevel()
tkwm.geometry(analyze_gui,"900x600+200+20")
tkwm.title(analyze_gui,"DEAR Plot Builder")
#Top frame
first_frame<-tkframe(analyze_gui)
tkgrid(first_frame,pady=10,padx=10,column=1,row=1)
title_lbl<-tklabel(first_frame,text="DEAR Plot Builder",font=title_font)
tkgrid(title_lbl)
#Plot lbl
plot_frame<-tkframe(analyze_gui)
tkgrid(plot_frame,column=2,row=2,sticky="n",rowspan=1000,padx=20,columnspan=100)
plot_display<-tklabel(plot_frame,text="")
tkgrid(plot_display)
#Type frame
type_frame<-tkframe(analyze_gui,borderwidth=3,relief="raised")
tkgrid(type_frame,pady=10,padx=10,column=1,row=2)
#Select plot type
type_lbl<-tklabel(type_frame,text="Select plot",justify="left",font=header_font)
tkgrid(type_lbl,padx=35,row=1)
type_sel<-ttkcombobox(type_frame,values=c("Reads/CPM","Library Sizes","Library Distribution","Heatmap","Volcano Plot"),textvariable=plot_type_var,width=15)
tkgrid(type_sel,padx=35,pady=10,row=2)
tkbind(type_sel,"<<ComboboxSelected>>",update_graph_parms)
#Generate plot
gen_frame<-tkframe(analyze_gui)
tkgrid(gen_frame,column=1,row=4,pady=10)
#Save plot
save_but<-tkbutton(gen_frame,text="Save plot",font=header_font,command=save_plot_function)
tkgrid(save_but,pady=5,padx=15,row=1,column=2)
update_graph_parms()
tkwait.window(analyze_gui)
} else{
tk_messageBox(message=paste("Design matrix file not found in ",exp_directory,".\n\nEnsure you have annotated your reads!",sep=""))
}
|
76581bdf1337db52321a78100599709b62388042 | 371ce0d6d5ed76bd45298c82f5af6c80b44df6c0 | /man/get_pubmed_ids.Rd | 03f6dcb41fb7aa22e4525879aceab3e28e9ff73d | [] | no_license | dami82/easyPubMed | 87003b93f549b5a20ecf3d4b7c9899ba68e2ed82 | 2826071e5764edc5c36e604b894a3d380350fbde | refs/heads/master | 2023-06-07T19:50:23.415789 | 2023-05-29T20:32:42 | 2023-05-29T20:32:42 | 81,876,896 | 13 | 11 | null | 2023-05-29T20:32:43 | 2017-02-13T22:06:32 | R | UTF-8 | R | false | true | 2,150 | rd | get_pubmed_ids.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/easyPubMed_src.R
\name{get_pubmed_ids}
\alias{get_pubmed_ids}
\title{Simple PubMed Record Search}
\usage{
get_pubmed_ids(pubmed_query_string, api_key = NULL)
}
\arguments{
\item{pubmed_query_string}{is a string (character vector of length 1) that is used
for querying PubMed (standard PubMed synthax, see reference for details).}
\item{api_key}{String (character vector of length 1): user-specific API key to
increase the limit of queries per second. You can obtain your key from NCBI.}
}
\value{
The function returns a list. The list includes the number of records found on PubMed and
the first 20 PubMed IDs (UID) retrieved by the query. The list also includes QueryKey and WebEnv
that are required for a subsequent fetch_pubmed_data() call.
}
\description{
Query PubMed (Entrez) in a simple way via the PubMed API eSearch function.
Calling this function results in posting the query results on the PubMed History Server.
This allows later access to the resulting data via the fetch_pubmed_data() function,
or other easyPubMed functions.
}
\details{
This function will use the String provided as argument for querying PubMed via the eSearch
function of the PubMed API. The Query Term can include one or multiple words, as well as the standard
PubMed operators (AND, OR, NOT) and tags (i.e., [AU], [PDAT], [Affiliation], and so on). ESearch will
post the UIDs resulting from the search operation onto the History server so that they can be used directly
in a subsequent fetchPubmedData() call.
}
\examples{
try({
## Search for scientific articles written by Damiano Fantini
## and print the number of retrieved records to screen.
## Also print the retrieved UIDs to screen.
##
dami_on_pubmed <- get_pubmed_ids("Damiano Fantini[AU]")
print(dami_on_pubmed$Count)
print(unlist(dami_on_pubmed$IdList))
}, silent = TRUE)
}
\references{
\url{https://www.data-pulse.com/dev_site/easypubmed/}
\url{https://www.ncbi.nlm.nih.gov/books/NBK3827/#_pubmedhelp_Search_Field_Descriptions_and_}
}
\author{
Damiano Fantini \email{damiano.fantini@gmail.com}
}
|
d48a42cc09b625ce416d24982a42a578cdb1f1b0 | 99b03f6b5e20cf0a04aa4e4edc68759f65d7005b | /man/sentenceParse.Rd | 3c9563b18960945422c09e725b74ded8f51fd5ee | [] | no_license | jiunnguo/lexRankr | 521ba9546efc396483a09b82303f2a9ed0e4284c | 8239cd7698a62342b1dfdf7a20f88efe2a767e12 | refs/heads/master | 2021-01-13T14:58:46.923964 | 2017-01-13T19:57:02 | 2017-01-13T19:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,136 | rd | sentenceParse.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sentenceParse.R
\name{sentenceParse}
\alias{sentenceParse}
\title{Parse text into sentences}
\usage{
sentenceParse(text, docId = "create")
}
\arguments{
\item{text}{Character vector to be parsed into sentences}
\item{docId}{A vector of document IDs with length equal to the length of \code{text}. If \code{docId == "create"} then doc IDs will be created as an index from 1 to \code{n}, where \code{n} is the length of \code{text}.}
}
\value{
A data frame with 3 columns and \code{n} rows, where \code{n} is the number of sentences found by the routine. Column 1: \code{docId} document id for the sentence. Column 2: \code{sentenceId} sentence id for the sentence. Column 3: \code{sentence} the sentences found in the routine.
}
\description{
Parse the elements of a character vector into a dataframe of sentences with additional identifiers.
}
\examples{
sentenceParse("Bill is trying to earn a Ph.D.", "You have to have a 5.0 GPA.")
sentenceParse(c("Bill is trying to earn a Ph.D.", "You have to have a 5.0 GPA."),
docId=c("d1","d2"))
}
|
bb65856e985a04c3043afe40b7a8e02d9046f292 | 38d64d099cfef6f39fa08aa6364b0464a988102d | /bipartite/R/plotPAC.R | 3579a6c883a0123367aff786beb56dce473bbe77 | [] | no_license | biometry/bipartite | 004b458f73c25f64de5bda3c4c9e2c861aec983a | 2fb52577d297480a3a1c1c707a3549ac97e5d08c | refs/heads/master | 2023-06-23T12:37:01.423686 | 2023-03-01T15:22:14 | 2023-03-01T15:22:14 | 24,846,853 | 37 | 16 | null | 2020-05-27T11:07:11 | 2014-10-06T13:26:44 | R | UTF-8 | R | false | false | 2,997 | r | plotPAC.R | plotPAC <- function(web, scaling=1, plot.scale=1, fill.col=rgb(.2,.2,.2,.5), arrow.col=rgb(.5,.5,.5,.5), outby=1, label=TRUE, text=TRUE, circles=FALSE, radius=1, text.cex=1){
# function to draw a circular PAC-plot, as in Morris et al. 2005
# PAC is the "Potential for Apparent Competition and is computed using the function with the same name in bipartite
# by default, this function yields a plot for the lower trophic level
# author: Carsten Dormann, 07 Sept 2009
#
# web a community matrix with two trophic levels
toCartesian <- function (t1, rP) {
# I stole this function from the package fisheyeR (sorry, but it was not worth including it as dependent only for three lines of code)
x1 = rP * cos(t1)
y1 = rP * sin(t1)
return(cbind.data.frame(x = x1, y = y1))
}
toPolar <- function (xy){
# same source as toCartesian ...
# nicked and vectorised
xy <- t(as.matrix(xy))
t1 = atan2(xy[,2], xy[,1])
rP = sqrt(xy[,1]^2 + xy[,2]^2)
return(c(t1 = t1, rP = rP))
}
pointsoncircle <- function(N){
# helper function
# computes positions of equidistant points (i.e. higher trophic level species) on a circle
rhos <- seq(0, 2*pi, length=N+1)
out <- as.matrix(toCartesian(rhos, 1)[-(N+1),2:1])
colnames(out) <- c("x", "y")
out
}
coords <- pointsoncircle(NROW(web))
rs <- rowSums(web)
# plot position and size of species:
par(mar=c(0,0,0,0)+.1)
plot(coords, cex=sqrt(rs)*0.75*scaling, xlab="", ylab="", axes=FALSE, xlim=c(-1, 1)*1.25*plot.scale, ylim=c(-1, 1)*1.25*plot.scale,asp=1)
# compute PACs:
PV <- PAC(web)
# plot self-loop (i.e. diagonals) as filling:
D <- diag(PV)
points(coords, cex=sqrt(rs)*0.75*scaling*D, pch=16, col=fill.col)
if (length(arrow.col) < NROW(web)) arrow.col <- rep(arrow.col, len=NROW(web))
# draw PAC-triangles (polygons!):
for (i in (1:NROW(PV))[order(rs)]){
for (j in (1:NROW(PV))[order(rs)]){
if (i <= j) next # dAB and dBA are drawn simultaneously
arrow.direction <- toPolar(coords[j,] - coords[i,])[1] #arrow from j to i
orthog <- arrow.direction + pi/2
# a cex=1 is 0.05 units diameter
# to scale the absolute width to cex-equivalents, we need to multiply with 0.05:
width.i <- PV[j, i]/2*0.025 *sqrt(rs[i]) *0.75*scaling # /2 because the width goes in both directions later
width.j <- PV[i, j]/2*0.025 *sqrt(rs[j]) *0.75*scaling
upper.i <- coords[i,] + toCartesian(orthog, width.i)
lower.i <- coords[i,] - toCartesian(orthog, width.i)
upper.j <- coords[j,] + toCartesian(orthog, width.j)
lower.j <- coords[j,] - toCartesian(orthog, width.j)
polygon(rbind(upper.i, lower.i, lower.j, upper.j), col=arrow.col[i], border=NA) #from j to i
}
}
if (label){
if (text) {
text(coords*1.25*outby, rownames(web), cex=text.cex)
} else {
text(coords*1.25*outby, as.character(1:NROW(web)), cex=text.cex)
}
}
if (circles) symbols(coords*1.25*outby, circles=rep(0.07*radius, NROW(web)), add=TRUE, inches=FALSE)
}
|
a0f9d3e7141a1510dcf539ebe3aa610ef0a2a565 | 4b100d411187c8e0718d176e8f51846e514b272e | /R/GC.content.R | a269918a1f1c90c180f4b413f41fbe9d78e863f3 | [] | no_license | cran/vhcub | 4897cd01406ddbcb426e2ed395bd9944faf81364 | d7ea958270d0b68d118109f60612cb664ec9385a | refs/heads/master | 2020-12-22T23:05:44.804924 | 2019-11-15T11:00:02 | 2019-11-15T11:00:02 | 236,957,357 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,393 | r | GC.content.R | #' GC content
#'
#' Calculates overall GC content as well as GC at first, second, and third codon positions.
#'
#' @usage GC.content(df.virus)
#'
#' @param df.virus data frame with seq_name and its DNA sequence.
#'
#' @return A data.frame with overall GC content as well as GC at first, second, and third codon positions of all DNA sequence from df.virus.
#'
#' @import seqinr
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#'
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#'
#' # Calculate GC content
#' gc.df <- GC.content(fasta.v)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{ali.mo.anwar@std.agr.cu.edu.eg} and Mohmed Soudy \email{MohmedSoudy2009@gmail.com}
GC.content <- function(df.virus) {
df.all.GC <- data.frame()
length <- 1:length(df.virus$seq_name)
for (i_seq in length) {
sequence <- as.character(df.virus$sequence[[i_seq]])
seq_name <- df.virus$seq_name[[i_seq]]
gc <- GC(s2c(sequence))
gc1 <- GCpos(s2c(sequence), "1")
gc2 <- GCpos(s2c(sequence), "2")
gc3 <- GCpos(s2c(sequence), "3")
df.gc <- data.frame(gene.name = seq_name, GC = gc, GC1 = gc1, GC2 = gc2, GC3 = gc3)
df.all.GC <- rbind(df.all.GC,df.gc)
}
return(df.all.GC)
}
|
a36c72286af93a88ed2b596fbde3993090ec4e43 | 5bac3ce8fa5ce7921b2c318d46500020b5b4d3d1 | /man/show.Rd | bf294129a4830b69fd7eee7730d83dee181d928a | [
"Apache-2.0"
] | permissive | CDK-R/fingerprint | ce621309e28d00e18e1a284795418e228c507895 | 8da6b320856538a05d5502b8be5191193d714e34 | refs/heads/master | 2022-10-26T09:01:21.536490 | 2022-10-16T23:08:52 | 2022-10-16T23:08:52 | 156,985,877 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 529 | rd | show.Rd | \name{show}
\alias{show,fingerprint-method}
\alias{show,featvec-method}
\alias{show,feature-method}
\title{
String Representation of a Fingerprint or Feature
}
\description{
Simply summarize the fingerprint or feature
}
\usage{
\S4method{show}{fingerprint}(object)
\S4method{show}{featvec}(object)
\S4method{show}{feature}(object)
}
\arguments{
\item{object}{
An object of class \code{fingerprint}, \code{featvec} or \code{feature}
}
}
\author{Rajarshi Guha \email{rajarshi.guha@gmail.com}}
\keyword{logic}
|
2a3af8b6f154aa7a7e096ccace49bdc7da65029e | 6dc2d9ef6198ede44345bdea09aad12107e4d5d9 | /functions/generaPdfTablas.R | ad91d15880edc6f9694a7f44dcf7e13cadc7d25b | [] | no_license | laparcela/modelo_red_booleana_milpa_rafa | 0785af645855f393712c69fb26ceb6b4447cd75f | 2c593eebe211f9af2443e74b33446583a276e049 | refs/heads/master | 2021-07-17T01:28:33.787325 | 2017-10-23T21:12:18 | 2017-10-23T21:12:18 | 108,016,086 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,373 | r | generaPdfTablas.R | ############################################################
### Function for generating a pdf from stargazer outputs ###
############################################################
generaPdfTablas<-function(x=NULL,nombre=NULL, width=8, height=11, columnas=NULL, var_resp=NULL, titulo="",summary=F){
require(stargazer)
stargazer(x,out=nombre, column.labels=columnas, dep.var.caption="Variable dependiente",
dep.var.labels=var_resp, title=titulo, notes.label="",summary.logical=summary)
x<-readLines(nombre)
cat({paste0(
"\\documentclass[spanish,11pt]{article}\n",
"\\pdfpagewidth ",width,"in\n",
"\\pdfpageheight ",height,"in\n",
"\\usepackage[spanish]{babel}\n",
"\\selectlanguage{spanish}\n",
"\\usepackage[utf8]{inputenc}\n",
"\\begin{document}\n",
paste0(x,collapse="\n"),
"\n\\end{document}",
collapse="\n")},
file=nombre)
# Sys.sleep(5)
system(paste0("pdflatex ~/Dropbox/Chido/",nombre))
}
#notes="Los resultados se muestran en el formato usual de los resúmenes para modelos de regresión realizados en R. Estos se leen\\ considerando el tratamiento de referencia (Milpa con desyerbe manual sin perturbaciones) <y observando los coeficientes\\asociados a cada covariable, si el coeficiente es signifi<cativo (p<0.05) entonces el valor correspondiente \\tendrá uno o más *, de no ser así, el coeficiente será igual al valor de referencia"
|
79c041de0bc16978bb172ae360c31208cca15b09 | 21a76218b0882cf35e85c2297b5ee5d58e4da64e | /R/center.R | 3d32e598fe9cc7e83cd4ae5cb476f7b4219d896d | [
"MIT"
] | permissive | Zack-83/maRgheRitaR | 29ca2df425472f11845c1069d1db5e149fb79d9c | 45d10a7854dd9ee53ef2745501d24df03137292c | refs/heads/master | 2020-03-22T21:50:31.213511 | 2018-07-12T14:55:28 | 2018-07-12T14:55:28 | 140,716,790 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 514 | r | center.R | #' Centering datasets
#'
#' Some more description
#'
#' And even more
#'
#' @param dataset The numeric vector to be centered
#' @param desired The numeric midpoint value around which the data will be centered (default: 0)
#'
#' @return A new vector containing the original data centered around the desired values
#'
#' @examples
# center(c(1,2,3)) # should return -1 0 1
# center(c(4,5,6),2) # should return 1 2 3
center <- function(dataset, desired = 0) {
dataset - mean(dataset) + desired
}
|
e0524668b2ed606db29bda2ef3b7f9fbb270acef | fe872a4ad8d46e7df60dd19617fb14e988f07ed8 | /R/m2-mixt-three-sided-v2.R | 56c6909a2e7bde2b5b95d16748c42d5b8d484e93 | [
"MIT"
] | permissive | chaudhary-amit/acblm | 638aa75273f6f4522279634e67b3b831036d0a03 | b6aa44163c1f2782becbbef6b6f71d5fe4b85f62 | refs/heads/master | 2023-04-25T06:25:22.124853 | 2021-05-18T15:49:43 | 2021-05-18T15:49:43 | 368,360,787 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 34,450 | r | m2-mixt-three-sided-v2.R | # This is an em for the exognous mobility case.
# It will estimate a non-stationary model, and will be able to impose monotonicity
# ------------- Initiliazing functions ---------------------
#' create a random model for EM with three sided
#' endogenous mobility with multinomial pr
#' @export
m2.mixt.new <-function(nk,nf,nb,fixb=F,stationary=F) {
model = list()
# model for Y1|Y2,l,k for movers and stayes
model$A1 = array(0.9*(1 + 0.5*rnorm(nb*nf*nk)),c(nb,nf,nk))
model$S1 = 0.3*array(1+0.5*runif(nb*nf*nk),c(nb,nf,nk))
# model for Y4|Y3,l,k for movers and stayes
model$A2 = array(0.9*(1 + 0.5*rnorm(nb*nf*nk)),c(nb,nf,nk))
model$S2 = 0.3*array(1+0.5*runif(nb*nf*nk),c(nb,nf,nk))
# model for p(K | l ,l') for movers
model$pk1 = rdirichlet(nb*nb*nf*nf,rep(1,nk))
dim(model$pk1) = c(nb*nb, nf*nf , nk)
# model for p(K | l ,l') for stayers
model$pk0 = rdirichlet(nb*nf,rep(1,nk))
dim(model$pk0) = c(nb,nf,nk)
# movers matrix
model$NNm = nf*nb*toeplitz(ceiling(seq(1000,100,l=nf*nb)))
dim(model$NNm) = c(nb,nb,nf,nf)
# stayers matrix
model$NNs = array(300000/(nf*nb),c(nb,nf))
model$nb = nb
model$nk = nk
model$nf = nf
for (b in 1:nb) for (l in 1:nf) {
model$A1[b,l,] = sort(model$A1[b,l,])
model$A2[b,l,] = sort(model$A2[b,l,])
}
if (fixb) {
model$A2 = spread(rowMeans(model$A2),2,nk) + model$A1 - spread(rowMeans(model$A1),2,nk)
}
if (stationary) {
model$A2 = model$A1
}
return(model)
}
# ------------- Simulating functions ---------------------
#' Using the model, simulates a dataset of movers
#' @export
m2.mixt.simulate.movers <- function(model,NNm=NA) {
J1 = array(0,sum(NNm))
J2 = array(0,sum(NNm))
M1 = array(0,sum(NNm))
M2 = array(0,sum(NNm))
Y1 = array(0,sum(NNm))
Y2 = array(0,sum(NNm))
K = array(0,sum(NNm))
A1 = model$A1
A2 = model$A2
S1 = model$S1
S2 = model$S2
pk1 = model$pk1
nb = model$nb
nk = model$nk
nf = model$nf
i =1
for (b1 in 1:nb) for (l1 in 1:nf) for (b2 in 1:nb) for (l2 in 1:nf) {
I = i:(i+NNm[b1,b2,l1,l2]-1)
ni = length(I)
mm = b1 + nb*(b2 -1)
jj = l1 + nf*(l2 -1)
M1[I] = b1
M2[I] = b2
J1[I] = l1
J2[I] = l2
# draw k
Ki = sample.int(nk,ni,T,pk1[mm,jj,])
K[I] = Ki
# draw Y2, Y3
Y1[I] = A1[b1,l1,Ki] + S1[b1,l1,Ki] * rnorm(ni)
Y2[I] = A2[b2,l2,Ki] + S2[b2,l2,Ki] * rnorm(ni)
i = i + NNm[b1,b2,l1,l2]
}
jdatae = data.table(k=K,y1=Y1,y2=Y2,m1=M1,m2=M2,j1=J1,j2=J2)
return(jdatae)
}
#' Using the model, simulates a dataset of stayers.
#' @export
m2.mixt.simulate.stayers <- function(model,NNs) {
M1 = array(0,sum(NNs))
M2 = array(0,sum(NNs))
J1 = array(0,sum(NNs))
J2 = array(0,sum(NNs))
Y1 = array(0,sum(NNs))
Y2 = array(0,sum(NNs))
K = array(0,sum(NNs))
A1 = model$A1
A2 = model$A2
S1 = model$S1
S2 = model$S2
pk0 = model$pk0
nb = model$nb
nk = model$nk
nf = model$nf
# ------ impute K, Y1, Y4 on jdata ------- #
i =1
for (b1 in 1:nb) for (l1 in 1:nf) {
I = i:(i+NNs[b1,l1]-1)
ni = length(I)
M1[I] = b1
J1[I] = l1
# draw k
Ki = sample.int(nk,ni,T,pk0[b1,l1,])
K[I] = Ki
# draw Y2, Y3
Y1[I] = A1[b1,l1,Ki] + S1[b1,l1,Ki] * rnorm(ni)
Y2[I] = A2[b1,l1,Ki] + S2[b1,l1,Ki] * rnorm(ni)
i = i + NNs[b1,l1]
}
sdatae = data.table(k=K,y1=Y1,y2=Y2,m1=M1,m2=M1,j1=J1,j2=J1,x=1)
return(sdatae)
}
#' Using the model, simulates a dataset of stayers.
#' @export
m2.mixt.simulate.stayers.withx <- function(model,NNsx) {
J1 = array(0,sum(NNsx))
J2 = array(0,sum(NNsx))
Y1 = array(0,sum(NNsx))
Y2 = array(0,sum(NNsx))
K = array(0,sum(NNsx))
X = array(0,sum(NNsx))
A1 = model$A1
A2 = model$A2
S1 = model$S1
S2 = model$S2
pk0 = model$pk0
nk = model$nk
nf = model$nf
nx = nrow(NNsx)
# ------ impute K, Y1, Y4 on jdata ------- #
i =1
for (l1 in 1:nf) for (x in 1:nx) {
I = i:(i+NNsx[x,l1]-1)
ni = length(I)
J1[I] = l1
# draw k
Ki = sample.int(nk,ni,T,pk0[x,l1,])
K[I] = Ki
X[I] = x
# draw Y2, Y3
Y1[I] = A1[l1,Ki] + S1[l1,Ki] * rnorm(ni)
Y2[I] = A2[l1,Ki] + S2[l1,Ki] * rnorm(ni)
i = i + NNsx[x,l1]
}
sdatae = data.table(k=K,y1=Y1,y2=Y2,j1=J1,j2=J1,x=X)
return(sdatae)
}
#' @export
m2.mixt.impute.movers <- function(jdatae,model) {
A1 = model$A1
S1 = model$S1
pk1 = model$pk1
A2 = model$A2
S2 = model$S2
nk = model$nk
nf = model$nf
# ------ impute K, Y1, Y4 on jdata ------- #
jdatae.sim = copy(jdatae)
jdatae.sim[, c('k_imp','y1_imp','y2_imp') := {
ni = .N
jj = j1 + nf*(j2-1)
Ki = sample.int(nk,.N,prob = pk1[jj,],replace=T)
# draw Y1, Y4
Y1 = rnorm(ni)*S1[j1,Ki] + A1[j1,Ki]
Y2 = rnorm(ni)*S2[j2,Ki] + A2[j2,Ki]
list(Ki,Y1,Y2)
},list(j1,j2)]
return(jdatae.sim)
}
#' @export
m2.mixt.impute.stayers <- function(sdatae,model) {
A1 = model$A1
S1 = model$S1
pk0 = model$pk0
A2 = model$A2
S2 = model$S2
nk = model$nk
nf = model$nf
# ------ impute K, Y1, Y4 on jdata ------- #
sdatae.sim = copy(sdatae)
sdatae.sim[, c('k_imp','y1_imp','y2_imp') := {
ni = .N
Ki = sample.int(nk,.N,prob = pk0[x,j1,],replace=T)
# draw Y2, Y3
Y1 = A1[j1,Ki] + S1[j1,Ki] * rnorm(ni)
Y2 = A2[j1,Ki] + S2[j1,Ki] * rnorm(ni) # false for movers
list(Ki,Y1,Y2)
},list(j1,x)]
return(sdatae.sim)
}
#' Simulates data (movers and stayers) and attached firms ids. Firms have all same expected size.
#' @export
m2.mixt.simulate.sim <- function(model,fsize,msize,smult=1,mmult=1) {
jdata = m2.mixt.simulate.movers(model,model$NNm*mmult)
sdata = m2.mixt.simulate.stayers(model,model$NNs*smult)
sim = list(sdata=sdata,jdata=jdata)
# create some firm ids
sdata <- sdata[,f1 := paste("F",j1 + model$nf*(sample.int(.N/fsize,.N,replace=T)-1),sep=""),j1]
sdata <- sdata[,j1b:=j1]
sdata <- sdata[,j1true := j1]
jdata <- jdata[,j1true := j1][,j2true := j2]
jdata <- jdata[,j1c:=j1]
jdata <- jdata[,f1:=sample( unique(sdata[j1b %in% j1c,f1]) ,.N,replace=T),j1c]
jdata <- jdata[,j2c:=j2]
jdata <- jdata[,f2:=sample( unique(sdata[j1b %in% j2c,f1]) ,.N,replace=T),j2c]
jdata$j2c=NULL
jdata$j1c=NULL
sdata$j1b=NULL
sdata[,f2:=f1]
sdata <- sdata[,g1 := paste("M",m1 + model$nb*(sample.int(.N/msize,.N,replace=T)-1),sep=""),m1]
sdata <- sdata[,g1b:=m1]
sdata <- sdata[,g1true := m1]
jdata <- jdata[,g1true := m1][,g2true := m2]
jdata <- jdata[,g1c:=m1]
jdata <- jdata[,g1:=sample( unique(sdata[g1b %in% g1c,g1]) ,.N,replace=T),g1c]
jdata <- jdata[,g2c:=m2]
jdata <- jdata[,g2:=sample( unique(sdata[g1b %in% g2c,g1]) ,.N,replace=T),g2c]
jdata$g2c=NULL
jdata$g1c=NULL
sdata$g1b=NULL
sdata[,g2:=g1]
sim = list(sdata=sdata,jdata=jdata)
return(sim)
}
#' Simulates data (movers and stayers)
#' @export
m2.mixt.simulate.sim.clust <- function(model,fsize,msize,smult=1,mmult=1) {
jdata = m2.mixt.simulate.movers(model,model$NNm*mmult)
sdata = m2.mixt.simulate.stayers(model,model$NNs*smult)
sim = list(sdata=sdata,jdata=jdata)
# create some firm ids
#sdata <- sdata[,f1 := paste("F",j1 + model$nf*(sample.int(.N/fsize,.N,replace=T)-1),sep=""),c("m1","j1")]
# loop change
# mapping
mapping = array(0,c(model$nb*model$nf,3))
# create the mappings
ci=1
for (b1 in 1:model$nb) for (l1 in 1:model$nf){
mapping[ci,1]= b1
mapping[ci,2]= l1
mapping[ci,3]= ci
ci=ci+1
}
map2d = data.table(b_3d = mapping[,1],f_3d = mapping[,2],b_2d=1,f_2d=mapping[,3])
for (b1 in 1:model$nb) for (l1 in 1:model$nf) {
b1_2d = map2d[b_3d == b1 & f_3d == l1, b_2d]
l1_2d = map2d[b_3d == b1 & f_3d == l1, f_2d]
#sdata[(m1==b1) & (j1==l1), f1 := paste("F",l1_2d + model$nf*model$nb*(sample.int(.N/fsize,.N,replace=T)-1),sep="")]
sdata[(m1==b1) & (j1==l1), f1 := paste("F",l1 + model$nf*model$nb*(sample.int(.N/fsize,.N,replace=T)-1),sep="")]
sdata[(m1==b1) & (j1==l1), g1 := paste("M",b1 + model$nf*model$nb*(sample.int(.N/fsize,.N,replace=T)-1),sep="")]
}
for (l1 in 1:model$nf) {
sdata[(j1==l1), f1 := paste("F",l1 + model$nf*model$nb*(sample.int(.N/fsize,.N,replace=T)-1),sep="")]
}
for (b1 in 1:model$nb) {
sdata[(m1==b1), g1 := paste("M",b1 + model$nf*model$nb*(sample.int(.N/fsize,.N,replace=T)-1),sep="")]
}
sdata <- sdata[,j1b:=j1]
sdata <- sdata[,j1true := j1]
sdata <- sdata[,g1b:=m1]
sdata <- sdata[,g1true := m1]
jdata <- jdata[,j1true := j1][,j2true := j2]
jdata <- jdata[,j1c:=j1]
jdata <- jdata[,g1true := m1][,g2true := m2]
jdata <- jdata[,g1c:=m1]
jdata <- jdata[,g2c:=m2]
jdata <- jdata[,f1:=sample( unique(sdata[(g1b %in% g1c) & (j1b %in% j1c) ,f1]) ,.N,replace=T),.(g1c,j1c)]
jdata <- jdata[,j2c:=j2]
jdata <- jdata[,f2:=sample( unique(sdata[(g1b %in% g2c) & (j1b %in% j2c) ,f1]) ,.N,replace=T),.(g2c,j2c)]
jdata$j2c=NULL
jdata$j1c=NULL
sdata$j1b=NULL
sdata[,f2:=f1]
#sdata <- sdata[,g1:= paste("M",model$nb*model$nf*as.numeric(substr(f1,2,length(f1))),sep="")]
#sdata <- sdata[,g1 := paste("M",m1 + model$nb*(sample.int(.N/msize,.N,replace=T)-1),sep=""),m1]
jdata <- jdata[,g1:=sample( unique(sdata[g1b %in% g1c,g1]) ,.N,replace=T),g1c]
jdata <- jdata[,g2:=sample( unique(sdata[g1b %in% g2c,g1]) ,.N,replace=T),g2c]
jdata$g2c=NULL
jdata$g1c=NULL
sdata$g1b=NULL
sdata[,g2:=g1]
sim = list(sdata=sdata,jdata=jdata)
return(sim)
}
# -------------------- Estimating functions -----------------------------
#' Estimates the static model parameters for movers
#'
#' @export
m2.mixt.movers <- function(jdatae,model,ctrl) {
start.time <- Sys.time()
tic <- tic.new()
dprior = ctrl$dprior
model0 = ctrl$model0
taum = ctrl$tau
### ----- GET MODEL ---
nb = model$nb
nk = model$nk
nf = model$nf
A1 = model$A1
S1 = model$S1
A2 = model$A2
S2 = model$S2
pk1 = model$pk1
# ----- GET DATA
# movers
Y1m = jdatae$y1
Y2m = jdatae$y2
M1m = jdatae$m1
M2m = jdatae$m2
J1m = jdatae$j1
J2m = jdatae$j2
JJm = J1m + nf*(J2m-1)
MMm = M1m + nb*(M2m-1)
Nm = jdatae[,.N]
# get the constraints
CS1 = cons.pad(cons.get(ctrl$cstr_type[1],ctrl$cstr_val[1],nk,nb*nf),nk*nb*nf*0, nk*nb*nf*1)
CS2 = cons.pad(cons.get(ctrl$cstr_type[2],ctrl$cstr_val[2],nk,nb*nf),nk*nb*nf*1,0)
# combine them
CS = cons.bind(CS1,CS2)
# create the stationary contraints
if (ctrl$fixb==T) {
CS2 = cons.fixb(nk,nb*nf,2)
CS = cons.bind(CS2,CS)
}
# create a constraint for the variances
if (ctrl$model_var==T) {
CSw = cons.none(nk,nb*nf*2)
} else{
CS1 = cons.pad(cons.mono_k(nk,nb*nf),nk*nb*nf*0, nk*nb*nf*3)
CS2 = cons.pad(cons.mono_k(nk,nb*nf),nk*nb*nf*1, nk*nb*nf*2)
CSw = cons.bind(CS1,CS2)
CSw$meq = length(CSw$H)
}
# prepare matrices aggregated at the type level
Dkj1f = diag(nb*nf) %x% rep(1,nb*nf) %x% diag(nk) # A[k,l] coefficients for j1
Dkj2f = rep(1,nb*nf) %x% diag(nb*nf) %x% diag(nk) # A[k,l] coefficients for j2
# regression matrix for the variance
XX = rBind(
cBind( Dkj1f, 0*Dkj2f),
cBind( 0*Dkj1f, Dkj2f)
)
## --- prepare regressions covariates --- #
# create the depend variables
lik_old = -Inf
lik = -Inf
lik_best = -Inf
liks = 0
likm=0
lpt1 = array(0,c(Nm,nk))
lpt2 = array(0,c(Nm,nk))
lp = array(0,c(Nm,nk))
tic("prep")
stop = F;
for (step in 1:ctrl$maxiter) {
model1 = list(nb=nb,nk=nk,nf=nk,A1=A1,A2=A2,S1=S1,S2=S2,
pk1=pk1,dprior=dprior)
### ---------- E STEP ------------- #
# compute the tau probabilities and the likelihood
if (is.na(taum[1]) | (step>1)) {
# for efficiency we want to group by (l1,l2)
for (b1 in 1:nb) for (b2 in 1:nb) for (l1 in 1:nf) for (l2 in 1:nf) {
I = which((M1m==b1) & (M2m==b2) & (J1m==l1) & (J2m==l2))
bb = b1 + nb*(b2-1)
ll = l1 + nf*(l2-1)
if (length(I)==0) next;
for (k in 1:nk) {
lpt1[I,k] = lognormpdf(Y1m[I] , A1[b1,l1,k], S1[b1,l1,k])
lpt2[I,k] = lognormpdf(Y2m[I] , A2[b2,l2,k], S2[b2,l2,k])
# sum the log of the periods
lp[I,k] = log(pk1[bb,ll,k]) + lpt1[I,k] + lpt2[I,k]
if (k==1) {
#flog.info("b1=%3i b2=%3i l1=%3i l2=%3i A1=%3.3f A2=%3.3f",b1,b2,l1,l2,A1[b1,l1,k],A2[b2,l2,k])
}
#browser()
}
}
liks = sum(logRowSumExp(lp))
taum = exp(lp - spread(logRowSumExp(lp),2,nk)) # normalize the k probabilities Pr(k|Y1,Y2,Y3,Y4,l)
#browser()
# compute prior
lik_prior = (dprior-1) * sum(log(pk1))
lik = liks + lik_prior
#print(liks)
} else {
cat("skiping first max step, using supplied posterior probabilities\n")
}
tic("estep")
if (stop) break;
# ---------- MAX STEP ------------- #
# taum = makePosteriorStochastic(tau = taum,m = ctrl$stochastic) # if we want to implement stochastic EM
# we start by recovering the posterior weight, and the variances for each term
rwm = c(t(taum + ctrl$posterior_reg))
if (ctrl$fixm==F) {
DYY = array(0,c(nk,nf,nf,nb,nb,2))
WWT = array(1e-7,c(nk,nf,nf,nb,nb,2))
for (b1 in 1:nb) for (b2 in 1:nb) for (l1 in 1:nf) for (l2 in 1:nf) {
I = which((M1m==b1) & (M2m==b2) & (J1m==l1) & (J2m==l2))
if (length(I)==0) next;
for (k in 1:nk) {
# compute the posterior weight, it's not time specific
ww = sum(taum[I,k] + ctrl$posterior_reg)
# construct dependent for each time period k,l2,l1,
DYY[k,l2,l1,b2,b1,1] = sum( Y1m[I] * (taum[I,k] + ctrl$posterior_reg) )/ww
DYY[k,l2,l1,b2,b1,2] = sum( Y2m[I] * (taum[I,k] + ctrl$posterior_reg) )/ww
# Scaling the weight by the time specific variance
WWT[k,l2,l1,b2,b1,1] = ww/pmax(ctrl$sd_floor,S1[b1,l1,k]^2)
WWT[k,l2,l1,b2,b1,2] = ww/pmax(ctrl$sd_floor,S2[b2,l2,k]^2)
}
}
WWT = WWT/sum(WWT)
fit = slm.wfitc(XX,as.numeric(DYY),as.numeric(WWT),CS)$solution
is = 1
A1solver = (rdim(fit[is:(is + nk*nb*nf-1)],nk,nf,nb)); is = is+nk*nb*nf
A2solver = (rdim(fit[is:(is + nk*nb*nf-1)],nk,nf,nb)); is = is+nk*nb*nf
for (b1 in 1:nb) {
A1[b1,,] = t(A1solver[,,b1])
A2[b1,,] = t(A2solver[,,b1])
}
# compute the variances!!!!
DYY_bar = array(0,c(nk,nf,nf,nb,nb,2))
DYY_bar[] = XX%*%fit
DYYV = array(0,c(nk,nf,nf,nb,nb,2))
for (b1 in 1:nb) for (b2 in 1:nb) for (l1 in 1:nf) for (l2 in 1:nf) {
I = which((M1m==b1) & (M2m==b2) & (J1m==l1) & (J2m==l2))
if (length(I)==0) next;
for (k in 1:nk) {
# construct dependent for each time period k,l2,l1,
ww = sum(taum[I,k] + ctrl$posterior_reg)
DYYV[k,l2,l1,b2,b1,1] = sum( (Y1m[I] - DYY_bar[k,l2,l1,b2,b1,1])^2 * (taum[I,k] + ctrl$posterior_reg) )/ww
DYYV[k,l2,l1,b2,b1,2] = sum( (Y2m[I] - DYY_bar[k,l2,l1,b2,b1,2])^2 * (taum[I,k] + ctrl$posterior_reg) )/ww
}
}
fitv = slm.wfitc(XX,as.numeric(DYYV),as.numeric(WWT),CSw)$solution
is = 1
S1solver = sqrt((rdim(fitv[is:(is + nk*nb*nf-1)],nk,nf,nb))); is = is+nk*nb*nf
S2solver = sqrt((rdim(fitv[is:(is + nk*nb*nf-1)],nk,nf,nb))); is = is+nk*nb*nf
for (b1 in 1:nb) {
S1[b1,,] = t(S1solver[,,b1])
S2[b1,,] = t(S2solver[,,b1])
}
S1[S1<ctrl$sd_floor]=ctrl$sd_floor # having a variance of exacvtly 0 creates problem in the likelihood
S2[S2<ctrl$sd_floor]=ctrl$sd_floor
}
tic("mstep-ols")
## -------- PK probabilities ------------ #
## --- movers --- #
for (b1 in 1:nb) for (b2 in 1:nb) for (l1 in 1:nf) for (l2 in 1:nf) {
mm = b1 + nb*(b2 -1)
jj = l1 + nf*(l2-1)
I = which((MMm==mm) & (JJm == jj))
if (length(I)>1) {
pk1[mm,jj,] = colSums(taum[I,])
} else if (length(I)==0) { # this deals with the case where the cell is empty
pk1[mm,jj,] = 1/nk
} else {
pk1[mm,jj,] = taum[I,]
}
pk1[mm,jj,] = (pk1[mm,jj,] + dprior-1 )/(sum(pk1[mm,jj,] + dprior -1 ))
}
#check_lik = computeLik(Y1m,Y2m,Y3m,Y4m,A12,B12,S12,A43,B43,S43,A2ma,A2mb,S2m,A3ma,A3mb,B32m,S3m)
#if (check_lik<lik) cat("lik did not go down on pk1 update\n")
# checking model fit
if ((!any(is.na(model0))) & ((step %% ctrl$nplot) == (ctrl$nplot-1))) {
I1 = order(colSums(A1))
I2 = order(colSums(model0$A1))
rr = addmom(A2[,I1],model0$A2[,I2],"A2")
rr = addmom(A1[,I1],model0$A1[,I2],"A1",rr)
rr = addmom(S2[,I1], model0$S2[,I2], "S2", rr,type="var")
rr = addmom(S1[,I1], model0$S1[,I2], "S1", rr,type="var")
rr = addmom(pk1,model0$pk1,"pk1",rr,type="pr")
print(ggplot(rr,aes(x=val2,y=val1,color=type)) + geom_point() + facet_wrap(~name,scale="free") + theme_bw() + geom_abline(linetype=2))
} else {
if ((step %% ctrl$nplot) == (ctrl$nplot-1)) {
wplot(A1)
}
}
# -------- check convergence ------- #
dlik = (lik - lik_old)/abs(lik_old)
lik_old = lik
lik_best = pmax(lik_best,lik)
if ( (step %% ctrl$ncat) == 0) flog.info("[%3i][%s] lik=%4.4f dlik=%4.4e liks=%4.4e likm=%4.4e",step,ctrl$textapp,lik,dlik,liks,likm);
if (step>10) if (abs(dlik)<ctrl$tol) break;
tic("loop-wrap")
}
flog.info("[%3i][%s][final] lik=%4.4f dlik=%4.4e liks=%4.4e likm=%4.4e",step,ctrl$textapp,lik,dlik,liks,likm);
# Y1 | Y2
model$A1 = A1
model$S1 = S1
model$A2 = A2
model$S2 = S2
## --movers --
model$pk1 = pk1
model$NNm = acast(jdatae[,.N,list(j1,j2)],j1~j2,fill=0,value.var="N")
model$likm = lik
end.time <- Sys.time()
time.taken <- end.time - start.time
return(list(tic = tic(), model=model,lik=lik,step=step,dlik=dlik,time.taken=time.taken,ctrl=ctrl,liks=liks,likm=likm))
}
m2.mixt.rdim.pk1 <-function(pk1) {
}
#' use the marginal distributions to extract type distributions
#' within each cluster and observable characteristics
#' @export
m2.mixt.stayers <- function(sdata,model,ctrl) {
# we set a linear programing problem to maximize likelihood subject
# to non negetivity and summing to one
# the objective weights are the the density evaluated at each k
nk = model$nk
nf = model$nf
Y1 = sdata$y1 # firm id in period 1
J1 = sdata$j1 # wage in period 1
X = sdata$x # observable category
# @todo add code in case X is missing, just set it to one
nx = length(unique(X))
N = length(Y1)
Wmu = t(model$A1[1,,])
Wsg = t(model$S1[1,,])
# we create the index for the movement
# this needs to take into account the observable X
J1x = X + nx*(J1-1) # joint in index for movement
J1s <- Matrix(0, nrow = N, ncol = nf * nx, sparse = TRUE)
II = 1:N + N*( J1x -1 ); J1s[II]=1
tot_count = t(spread(Matrix::colSums(J1s),2,nk))
empty_cells = (tot_count[1,]==0)
#PI = rdirichlet(nf*nx,rep(1,nk))
flog.info("print pk0,nf*nx,nk ")
print(model$pk0[1,,])
#print(nf)
#print(nx)
#print(nk)
PI = rdim(model$pk0,nf*nx,nk)
PI_old = PI
lik_old = Inf
iter_start =1
for (count in iter_start:ctrl$maxiter) {
# the coeffs on the pis are the sum of the norm pdf
norm1 = dnorm(spread(Y1,2,nk),t(Wmu[,J1]),t(Wsg[,J1]))
tau = PI[J1x,]*norm1
tsum = Matrix::rowSums(tau)
tau = tau / spread( tsum ,2,nk )
lik = - sum(log(tsum))
PI = t.default( as.array( t(tau) %*% J1s / tot_count ))
PI[empty_cells,] = array(1/nk,c(sum(empty_cells),nk))
dPI = abs(PI - PI_old)
max_change = max(dPI)
mean_change = mean(dPI)
PI_old = PI
if (!is.finite(lik)) { status = -5; break; }
prg = (lik_old - lik)/lik
lik_old = lik
if ((count %% ctrl$ncat)==(ctrl$ncat-1)) {
flog.info("[%3i][%s] lik=%4.4e inc=%4.4e max-pchg=%4.4e mean-pchg=%4.4e",count,ctrl$textapp,lik,prg,max_change,mean_change)
flush.console()
}
if (max_change<ctrl$tol) {
status = 1;
msg = "converged";
break;
}
}
print(rdim(PI,nx,nf,nk))
model$pk0 = rdim(PI,nx,nf,nk)
dim(model$pk0) = c(1,nf,nk)
model$liks = lik
model$NNs = sdata[,.N,j1][order(j1)][,N]
#dim(model$NNs) = c(1,nf)
return(model)
}
#' Estimates the static mixture model on 2 periods
#'
#' This estimator uses multiple starting values to try to find the global maxima.
#'
#' @export
m2.mixt.estimate.all <- function(sim,nk=6,ctrl,cl=NA,nbb=1) {
start.time <- Sys.time()
sdata = sim$sdata
jdata = sim$jdata
mm = mean(sdata$y1)
ms = 2*sd(sdata$y1)
# check that sdata has an x column
if (!("x" %in% names(sdata))) {
flog.info("creating an x column in sdata and set it to 1")
sdata$x=1
} else if (length(unique(sdata$x)) >= 50 ) {
stop("likely too many values in the x column of sdata")
}
nf = max(sdata$j1);
nb = max(sdata$m1)
model_start = m2.mixt.new(nk,nf,nb)
res_para = m2.mixt.movers(jdata,model_start,ctrl=em.control(ctrl,cstr_type="para",textapp="para0",fixb=F))
flog.info("res para : value at model start")
#print(res_para)
# use cluster if available
if (!any(is.na(cl))) {
flog.info("cluster -- exporting objects to nodes")
# export environment to nodes
clusterExport(cl,c("res_para","jdata","ctrl"),environment())
mylapply <- function(...) parLapply(cl,...)
nnodes=length(cl)
} else {
mylapply <- function(...) lapply(...)
nnodes=1
}
flog.info("starting repetitions with %i nodes",nnodes)
rr = mylapply(1:ctrl$est_rep, function(i) {
res_mixt = list()
tryCatch({
for (b1 in 1:nb) {
res_para$model$A1[b1,,] = spread(sort(rnorm(nk))*ms+mm,1,nf)
}
#res_para$model$A1[1,,] = seq(0.1,1,l=model$nf) %o% seq(0.1,1,l=model$nk)
#res_para$model$A1[2,,] = seq(0.2,1,l=model$nf) %o% seq(0.3,.9,l=model$nk)
res_para$model$A2 = res_para$model$A1
res_para_fixm = m2.mixt.movers(jdata,res_para$model,ctrl=em.control(ctrl,cstr_type="para",textapp=sprintf("paraf (%i/%i)",i,ctrl$est_rep),fixm=T,fixb=F))
res_para_new = m2.mixt.movers(jdata,res_para_fixm$model,ctrl=em.control(ctrl,textapp=sprintf("para1 (%i/%i)",i,ctrl$est_rep),cstr_type="para",fixm=F,fixb=F))
#print(res_para_new$model$A1[1,,])
#print(res_para_new$model$A1[2,,])
res_mixt = m2.mixt.movers(jdata,res_para_new$model,ctrl=em.control(ctrl,textapp=sprintf("move1 (%i/%i)",i,ctrl$est_rep)))
# ------ compute connectedness ----- #
res_mixt$connectedness = 0
#res_mixt$connectedness = model.connectiveness(res_mixt$model)
res_mixt$rep_id = i
}, error = function(e) {catf("error in rep %i!\n",i);print(e);})
flog.info("done with reptitions %i/%i",i,ctrl$est_rep)
res_mixt
})
# backing up to disk
#save(rr,ctrl,file=paste(ctrl$file_backup_prefix,"data",sep="."))
# extract likelihoods and connectedness
rrd = ldply(rr,function(r) {
data.frame(lik_mixt = r$model$likm,connectedness = r$connectedness,i=r$rep_id)
})
# selecting best starting value
rrd = data.table(rrd)
rrd[, sel:=-1]
rrd.sub = rrd[order(-lik_mixt)][1:ctrl$est_nbest]
rrd[i %in% rrd.sub$i, sel:=0]
Ibest = rrd.sub[order(-connectedness)][1,i]
res_mixt = rr[[Ibest]]
rrd[i==Ibest, sel:=1]
# sub-sample the stayers for computational reasons (if too large)
if (ctrl$sdata_subredraw==TRUE) {
sim$sdata[,sample := rank(runif(.N))/.N<=ctrl$sdata_subsample,j1]
flog.info("drawing %f from the stayers",ctrl$sdata_subsample)
}
flog.info("selecting best model")
#return(res_mixt)
#print(res_mixt$model)
res_mixt$model = m2.mixt.stayers(sim$sdata[sample==1],res_mixt$model,ctrl = em.control(ctrl,textapp="stayers"))
res_mixt$second_stage_reps = rrd
res_mixt$second_stage_reps_all = rr
#return(res_mixt)
# ------ compute linear decomposition ------- #
NNm = res_mixt$model$NNm
NNs = res_mixt$model$NNs/ctrl$sdata_subsample
NNm[!is.finite(NNm)]=0
NNs[!is.finite(NNs)]=0
share_s = sum(NNs)/(sum(NNm) + sum(NNs))
share_m = sum(NNm)/(sum(NNm) + sum(NNs))
NNs = round(NNs*ctrl$vdec_sim_size*share_s/sum(NNs))
NNm = round(NNm*ctrl$vdec_sim_size*share_m/sum(NNm))
flog.info("drawing here")
# we simulate from the model both movers and stayers
# fix the dimention or array to incorporate the three-sided model
dim(NNs) = c(nb,nf)
dim(NNm) = c(nb,nb,nf,nf)
sdata.sim = m2.mixt.simulate.stayers(res_mixt$model,NNs)
jdata.sim = m2.mixt.simulate.movers(res_mixt$model,NNm)
sdata.sim.2d = rbind(sdata.sim[,list(j1,k,y1)],jdata.sim[,list(j1,k,y1)])
vdec = lin.proj(sdata.sim.2d,y_col = "y1",k_col="k",j_col = "j1")
sdata.sim.3d = rbind(sdata.sim[,list(m1,j1,k,y1)],jdata.sim[,list(m1,j1,k,y1)])
# mapping
manager_3d = nbb
firm_3d = nf/manager_3d
mapping = array(0,c(manager_3d*firm_3d,3))
# create the mappings
ci=1
for (b1 in 1:manager_3d) for (l1 in 1:firm_3d){
mapping[ci,1]= b1
mapping[ci,2]= l1
mapping[ci,3]= ci
ci=ci+1
}
map2d = data.table(b_3d = mapping[,1],f_3d = mapping[,2],b_2d=1,f_2d=mapping[,3])
#print(map2d)
for (b1 in 1:nb) for (l1 in 1:nf) {
b1_3d = map2d[b_2d == b1 & f_2d == l1, b_3d]
l1_3d = map2d[b_2d == b1 & f_2d == l1, f_3d]
#sdata[(m1==b1) & (j1==l1), f1 := paste("F",l1_2d + model$nf*model$nb*(sample.int(.N/fsize,.N,replace=T)-1),sep="")]
sdata.sim.3d[(m1==b1) & (j1==l1), `:=` (m1=b1_3d, j1=l1_3d)]
}
vdec = lin.proj.three(sdata.sim.3d,y_col = "y1",k_col="k",j_col = "j1",m_col="m1")
res_mixt$vdec = vdec
res_mixt$ctrl = ctrl
end.time <- Sys.time()
res_mixt$time.taken <- end.time - start.time
return(res_mixt)
}
#' Computes the variance decomposition by simulation
#' @export
m2.mixt.vdec <- function(model,nsim,stayer_share=1,ydep="y2") {
if (ydep!="y1") flog.warn("ydep other than y1 is not implemented, using y1")
# simulate movers/stayers, and combine
NNm = model$NNm
NNs = model$NNs
NNm[!is.finite(NNm)]=0
NNs[!is.finite(NNs)]=0
NNs = round(NNs*nsim*stayer_share/sum(NNs))
NNm = round(NNm*nsim*(1-stayer_share)/sum(NNm))
flog.info("computing var decomposition with ns=%i nm=%i",sum(NNs),sum(NNm))
# we simulate from the model both movers and stayers
sdata.sim = m2.mixt.simulate.stayers(model,NNs)
jdata.sim = m2.mixt.simulate.movers(model,NNm)
sdata.sim = rbind(sdata.sim[,list(j1,k,y1)],jdata.sim[,list(j1,k,y1)])
proj_unc = lin.proj(sdata.sim,"y1","k","j1");
return(proj_unc)
}
#' Compute mean effects
#' @export
m2.mixt.meaneffect <- function(model) {
NNs = model$NNs*100 # used 10% sample
NNm = model$NNm
share_s = sum(NNs)/(sum(NNm) + sum(NNs))
share_m = sum(NNm)/(sum(NNm) + sum(NNs))
NNs = round(NNs*1e6*share_s/sum(NNs))
NNm = round(NNm*1e6*share_m/sum(NNm))
# we simulate from the model both movers and stayers
sdata = m2.mixt.simulate.stayers(model,NNs)
jdata = m2.mixt.simulate.movers(model,NNm)
sdata = rbind(sdata[,list(j1,k,y1)],jdata[,list(j1,k,y1)])
# compute decomposition
#vdec = lin.proj(sdata,y_col = "y1",k_col="k",j_col = "j1")
#res_bs$mixt_all[[nn]]$vdec_1m = vdec
rt = sample.stats(sdata,"y1","j1", "pk0")
# then we set the distribution to uniform
model_nosort = copy(model)
#print(model_nosort$pk0[1,,])
#print(NNs/(sum(NNs)))
#print(model_nosort$pk0[1,,] * spread(NNs/(sum(NNs)),2,model_nosort$nk))
#print("p2")
#print(colSums(model_nosort$pk0[1,,] * spread(NNs/(sum(NNs)),2,model_nosort$nk)))
model_nosort$pk0[1,,] = spread(colSums(model_nosort$pk0[1,,] * spread(NNs/(sum(NNs)),2,model_nosort$nk)),1,model_nosort$nf)
print(model_nosort$pk0[1,,])
vec1 <- c( 1, 0, 0 , 0, 0 )
vec2 <- c( 0, 1, 0 , 0, 0 )
vec3 <- c( 0, 0, 1 , 0, 0 )
vec4 <- c( 0, 0, 0 , 1, 1 )
#model_nosort$pk0[1,,] = array(c(vec1,vec2,vec3,vec4),dim= c(5,4))
#print(model_nosort$pk0[1,,])
# movers
dpk1 = m2.get.pk1(model)
pk = dpk1[,pr_k[1],k][,V1]
model_nosort$pk1 = spread(pk,1,model$nf * model$nf)
# simulate from uniform
sdata = m2.mixt.simulate.stayers(model_nosort,NNs)
jdata = m2.mixt.simulate.movers(model_nosort,NNm)
sdata = rbind(sdata[,list(j1,k,y1)],jdata[,list(j1,k,y1)])
rt2 = sample.stats(sdata,"y1","j1","pku")
return(rbind(rt,rt2))
}
# ------------- Testing functions ---------------------
# for more tests, look at tests/testthat/test_model_mixt2.R
# here we want to check a bunch of properties for the EM steps
# model1 and model2 should be 2 consecutive steps
m2.mixt.check <- function(Y1,Y2,J1,J2,JJ,nk,Nm,model1,...) {
change = list(...)
# compute posterior for model1
res1 = with(model1,{
taum = array(0,c(Nm,nk))
lpm = array(0,c(Nm,nk))
likm = 0
for (i in 1:Nm) {
ltau = log(pk1[JJ[i],])
lnorm1 = lognormpdf(Y1[i], A1[J1[i],], S1[J1[i],])
lnorm2 = lognormpdf(Y2[i], A2[J2[i],], S2[J2[i],])
lall = ltau + lnorm2 + lnorm1
lpm[i,] = lall
likm = likm + logsumexp(lall)
taum[i,] = exp(lall - logsumexp(lall))
}
# compute prior
lik_prior = (dprior-1) * sum(log(pk1)) # dirichlet distribution
lik = likm + lik_prior
list(taum = taum, lpm =lpm, lik=likm,lik_prior=lik_prior,post=lik)
})
model2 = copy(model1)
model2[names(change)] = change[names(change)]
# compute posterior for model2
res2 = with(model2,{
taum = array(0,c(Nm,nk))
lpm = array(0,c(Nm,nk))
likm = 0
for (i in 1:Nm) {
ltau = log(pk1[JJ[i],])
lnorm1 = lognormpdf(Y1[i], A1[J1[i],], S1[J1[i],])
lnorm2 = lognormpdf(Y2[i], A2[J2[i],], S2[J2[i],])
lall = ltau + lnorm2 + lnorm1
lpm[i,] = lall
likm = likm + logsumexp(lall)
taum[i,] = exp(lall - logsumexp(lall))
}
# compute prior
lik_prior = (dprior-1) * sum(log(pk1)) # dirichlet distribution
lik = likm + lik_prior
list(taum = taum, lpm =lpm, lik=likm,lik_prior=lik_prior,post=lik)
})
# do the analysis, Evaluate Q(theta | theta^t) , Q(theta^t | theta^t), H(theta | theta^t) and H(theta^t | theta^t)
Q1 = sum( ( (res1$taum) * res1$lpm ))
Q2 = sum( ( (res1$taum) * res2$lpm ))
H1 = - sum( (res1$taum) * log(res1$taum))
H2 = - sum( (res1$taum) * log(res2$taum))
warn_str=""
test = TRUE
if (( Q2<Q1) | (H2<H1)) {
warn_str = "!!!!!!!!!";
test=FALSE
}
catf("[emcheck] %s Qd=%4.4e Hd=%4.4e %s\n",paste(names(change),collapse = ","), Q2-Q1,H2-H1,warn_str)
return(test)
}
m2.mixt.test <- function() {
nf = 10
nk = 6
model = m2.mixt.new(nk,nf)
NNm = floor(array(30000/(nf^2),c(nf,nf)))
jdata = m2.mixt.simulate.movers(model,NNm)
ctrl = em.control(nplot=10,check_lik=F,fixb=F,est_rho=F,model0=model,dprior=1.05,maxiter=100)
ctrl$posterior_reg=0
ctrl$fixm=FALSE
ctrl$ncat=5
ctrl$check_lik=FALSE
res = m2.mixt(jdata,model,ctrl)
# trying to do the no from there with 3 components
ctrl$model0=NA
model_np = step2.static.em.np.new.from.ns(res$model,nm=3)
res = model_np = step2.static.em.np.movers.estimate(jdata,model_np,ctrl)
# try to plot the outcome.
res$model$W1
res = m2.mixt.fixed(jdata,model)
}
em.endo.simulatebest <- function() {
# load the grid
load("inst/figures/src/em-endo-full_rhogrid-halton-6x10.dat",verbose=F)
# find the best
dd = data.frame()
for (r in rr) {
dd = rbind(dd,data.frame(rho=r$model$B32m,lik=r$lik,time=r$time.taken,step=r$step,dlik=r$dlik))
}
rbest = rr[[which.max(dd$lik)]]
cat(sprintf("%i evaluations, best value is %f\n",length(rr),rbest$lik))
# get number of movers
load("../figures/src/em-endo-info.dat",verbose=F)
# reweight the statyers to 30,0000
tot = NNs[,sum(ni)]
NNs[,ni := round(ni * 30000 /sum(ni)) ]
setkey(NNs,j)
NNs = NNs[,ni]
# get the movers matrix
NNm = acast(NNm,j1~j2,value.var="ni")
# ----- simulate ------ #
nk = rbest$model$nk;
nf = rbest$model$nf;
model = rbest$model
jdatae = em.endo.full.simulate.movers(model,NNm)
sdatae = em.endo.full.simulate.stayers(model,NNs)
jdatae[,m:=1][,w:=1]
sdatae[,m:=0][,w:=tot/.N]
sdatae[,j2:=j1]
adatae = rbind(sdatae,sdatae)
cat(sprintf("simulated data with %i stayers and %i movers \n",sdatae[,.N],jdatae[,.N]))
return(adatae)
}
#' @export
m2.get.pk1 <- function(model) {
pk1 = rdim(model$pk1,model$nf,model$nf,model$nk)
dd_post = data.table(melt(pk1,c('j1','j2','k')))
pp = model$NNm/sum(model$NNm)
dd_post <- dd_post[, pr_j1j2 := pp[j1,j2],list(j1,j2) ]
dd_post <- dd_post[, pr_j1j2k := pr_j1j2*value]
dd_post <- dd_post[, pr_k := sum(pr_j1j2k),k]
dd_post
}
#' Returns the uconditional type probability in the crossection
#' @export
m2.get.pk_unc <- function(model,supersample=0.1) {
dpk1 = m2.get.pk1(model)
pk_m = acast(dpk1[,sum(pr_j1j2k),list(j1,k)],j1~k,value.var = "V1")
NNs = model$NNs*round(1/supersample) # used 10% sample
NNm = model$NNm
share_s = sum(NNs)/(sum(NNm) + sum(NNs))
pk_unc = share_s*rdim(res_main$model$pk0[,,I],res_main$model$nf,res_main$model$nk) +
(1- share_s) * pk_m
pk_unc
}
#' check the fit in the movers/stayers using imputed data
#' @export
m2.movers.checkfit <- function(jdata) {
dd = jdata[, {
d1=data.frame(src="data",
m1=mean(y1),m2=mean(y2),
d12=mean(y1-y2),
cov12=cov(y1,y2),v1=var(y1),v2=var(y2))
d2=data.frame(src="imp",
m1=mean(y1_imp),m2=mean(y2_imp),
d12=mean(y1_imp-y2_imp),
cov12=cov(y1_imp,y2_imp),v1=var(y1_imp),v2=var(y2_imp))
rbind(d1,d2)
},list(j1,j2)]
ddm = melt(dd,id.vars = c("j1","j2","src"))
ddm = cast(ddm,j1+j2+variable~src,value = "value")
ggplot(ddm,aes(x=data,y=imp)) + geom_point() + facet_wrap(~variable,scales = "free") + theme_bw() +
geom_abline(linetype=2)
ddm
}
#' check the fit in the movers/stayers using imputed data
#' @export
m2.stayers.checkfit <- function(sdata,r1,r4) {
dd = jdata[, {
d1=data.frame(src="data",
m1=mean(y1),m2=mean(y2),
d12=mean(y1-y2),
cov12=cov(y1,y2),v1=var(y1),v2=var(y2))
d2=data.frame(src="imp",
m1=mean(y1_imp),m2=mean(y2_imp),
d12=mean(y1_imp-y2_imp),
cov12=cov(y1_imp,y2_imp),v1=var(y1_imp),v2=var(y2_imp))
rbind(d1,d2)
},list(j1)]
ddm = melt(dd,id.vars = c("j1","src"))
ddm = cast(ddm,j1+variable~src,value = "value")
ggplot(ddm,aes(x=data,y=imp)) + geom_point() + facet_wrap(~variable,scales = "free") + theme_bw() +
geom_abline(linetype=2)
ddm
}
|
67722df43988495cfadf4b87d7eb95d28f5798cc | e7a62f2bbd1ca228200304f5577239556336fd81 | /Sensitivity Analysis/sensitivity_analysis_f.R | 9d8b79a95aaae553acaef958e86af246e04675e8 | [] | no_license | pakdamie/codlingmothdiapause | 7bf11bc87f253f6937eb4c0569f5121f8dd1dd98 | a38fc061cb361443b76d07a06f25ccead4fdc72e | refs/heads/master | 2022-06-16T16:58:44.966296 | 2020-05-06T15:14:47 | 2020-05-06T15:14:47 | 261,789,576 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,454 | r | sensitivity_analysis_f.R | ###############################
###SENSITIVITY ANALYSIS#######
############################
library(pracma)
library(pomp2)
################################################
###I'm running it only for the first 10 Years###
################################################
#############################
###Smoother and peak finders#
#############################
###10 years
#Jan 1st, 1984 - Dec 31st, 1993
DATES <- seq.Date(as.Date('01-01-1984',format = '%m-%d-%Y'),
as.Date('12-31-2016', format = '%m-%d-%Y'),'days')
# Sensitivity analysis for the phenology ----------------------------------
##THE CONTROL, assuming no changes to the parameters-
###again only running it for 10 years
CONTROL_MODEL <- trajectory(
POMP_CM,
PARAMETERS,
times = seq(1, 12054),
format = 'data.frame',
method = 'bdf'
)
#Only looking at the phenology of adults
CONTROL_R.Adult <- log((0.5*(rowSums(CONTROL_MODEL
[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)]))+1))
###MAKING IT INTO A DATAFRAME WITH YEARS
CONTROL_MODEL_R.Adult = cbind.data.frame(DATES,CONTROL_R.Adult )
CONTROL_MODEL_R.Adult$Year <- as.numeric(format(CONTROL_MODEL_R.Adult$DATES,'%Y'))
SPLITTED_CONTROL <- split(CONTROL_MODEL_R.Adult, CONTROL_MODEL_R.Adult$Year)
#############################
###THIS IS A FOR LOOP THAT CALCULATES THE PEAK
DOY_FIRST_PEAK_CONTROL = NULL #This is the collector
for(k in seq(1,33)){ #For every year
tmp <-SPLITTED_CONTROL [[k]] # THIS GETS THE ORIGINAL (UNCHANGED)
tmp$day <- as.numeric(format(tmp$DATES,'%j')) # get the DOY
tmp2<- subset(tmp, tmp$day > 100)
tmp_smooth <- smooth.spline(x=tmp2$day, y=tmp2$CONTROL_R.Adult, spar = 0.5)
df_smooth <- cbind.data.frame(doy= tmp_smooth $x,abund = tmp_smooth $y)
peak =data.frame(findpeaks( df_smooth$abund,npeaks=3,
minpeakheight=2,
zero='+'))
index_peak <- peak $X2
DOY_PEAK= data.frame(tmp2 [index_peak ,])
#this is where the package comes in
closest=DOY_PEAK[which.min(abs(150 - DOY_PEAK$day)),]
DOY_FIRST_PEAK_CONTROL [[k]] = cbind.data.frame( day =closest$day,
year =unique(tmp$Year))
plot(tmp$day,tmp$CONTROL_R.Adult,main=unique(tmp$Year))
abline(v= closest$day,col='red')
}
DOY_FIRST_PEAK_CONTROL_F <- do.call(rbind,DOY_FIRST_PEAK_CONTROL)
###n=10
###plot(as.numeric(format(SPLITTED_CONTROL[[n]]$DATES,'%j')),
### log(SPLITTED_CONTROL[[n]]$CONTROL_R.Adult+1),type='l')
###abline(v=DOY_FIRST_PEAK_CONTROL [[n]]);DOY_FIRST_PEAK_CONTROL[[n]]
###abline(v = DOY_SECOND_PEAK_CONTROL[[n]]);DOY_SECOND_PEAK_CONTROL[[n]]
###Looks goods-
PARAMS_START <- PARAMETERS
TRAJ <- NULL
DOY_PEAK_PARAMS <- NULL
for ( i in seq(1,50)){
###HERE YOU GET THE 0.95 of the Parameter i
PARAMS_2_95 <- PARAMS_START
PARAMS_2_95[[i]] <- PARAMS_START[[i]]*0.95
PARAMS_2_105 <- PARAMS_START
PARAMS_2_105[[i]] <- PARAMS_START[[i]] * 1.05
TRAJ_MODEL_95 <- trajectory(POMP_CM,
PARAMS_2_95 ,times=seq(1,12054),
format = 'data.frame')
TRAJ_MODEL_105 <- trajectory(POMP_CM, PARAMS_2_105 ,
times = seq(1,12054),
format = 'data.frame')
R.adult_95<- log((0.5*rowSums(TRAJ_MODEL_95[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)]+1)))
R.adult_105<- log((0.5*rowSums(TRAJ_MODEL_105[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)]+1)))
NEW_95 <- cbind.data.frame(DATES,R.adult_95)
NEW_105 <- cbind.data.frame(DATES, R.adult_105)
NEW_95$Year <- as.numeric(format( NEW_95$DATES,'%Y'))
NEW_105 $Year <- as.numeric(format( NEW_105 $DATES,'%Y'))
TRAJ_MODELS <- cbind(NEW_95[,-3], NEW_105[,-c(1)])
TRAJ[[i]] <- TRAJ_MODELS
SPLITTED_95 <- split( NEW_95, NEW_95$Year)
SPLITTED_105 <- split( NEW_105, NEW_105$Year)
DOY_PEAK =NULL
for(k in seq(1,33)){
tmp_95 <-SPLITTED_95[[k]]
tmp_105 <- SPLITTED_105[[k]]
tmp_95$day <- as.numeric(format(tmp_95$DATES,'%j'))
tmp_105$day <- as.numeric(format(tmp_105$DATES,'%j'))
tmp_95_100 <- subset(tmp_95, tmp_95$day > 100)
tmp_105_100 <- subset(tmp_105, tmp_105$day > 100)
a_95 <- smooth.spline(x=tmp_95_100$day, y=tmp_95_100$R.adult, spar = 0.5)
a_105 <- smooth.spline(x=tmp_105_100$day, y=tmp_105_100$R.adult, spar = 0.5)
df_a_95 <- cbind.data.frame(doy= a_95$x,abund = a_95$y)
df_a_105 <- cbind.data.frame(doy= a_105$x,abund = a_105$y)
ind_95_DF =data.frame(findpeaks(df_a_95 $abund,npeaks=3,
minpeakheight=2,
zero='+'))
ind_105_DF=data.frame(findpeaks( df_a_105 $abund,npeaks=3,
minpeakheight=2,
zero='+'))
index_95 <- ind_95_DF$X2
index_105 <- ind_105_DF$X2
DOY_PEAK_95 = data.frame(tmp_95_100 [index_95,], id ='95', param =names(PARAMS_2_95[i]))
colnames(DOY_PEAK_95)[2]='adu'
DOY_PEAK_105 = data.frame(tmp_105_100[index_105,], id ='105', param =names(PARAMS_2_95[i]))
colnames(DOY_PEAK_105)[2]='adu'
DOY_PEAK[[k]] <- rbind.data.frame(DOY_PEAK_95,
DOY_PEAK_105)
}
DOY_PEAK_PARAMS[[i]] = DOY_PEAK
}
PEAK_FINDER_F <- NULL
for (m in seq(1, length(DOY_PEAK_PARAMS))){
tmp= DOY_PEAK_PARAMS[[m]]
split = split(TRAJ[[m]],TRAJ[[m]]$Year)
PEAK_FINDER <- NULL
for (n in seq(1, 33)){
temp_95 = subset(tmp[[n]], tmp[[n]]$id==95)
temp_105= subset(tmp[[n]], tmp[[n]]$id==105)
closest95=temp_95[which.min(abs(150 - temp_95$day)),]
closest105=temp_105[which.min(abs(150 - temp_105$day)),]
PEAK_FINDER[[n]]=cbind.data.frame(d_95 =
closest95$day[1], d_105=closest105$day[1],
year= unique( temp_95$Year),
param= unique( temp_95$param))
}
PEAK_FINDER_F[[m]] = PEAK_FINDER
}
# for (k in seq(1,15)){
# # plot(SPLITTED_Y[[k]]$Day, SPLITTED_Y[[k]]$R.adult_95,
# # main = unique(SPLITTED_Y[[k]]$Year))
# # abline(v=PEAK_PARAMS$d_95[[k]])
# # points(SPLITTED_Y[[k]]$Day, SPLITTED_Y[[k]]$R.adult_105,col='blue')
# # abline(v=PEAK_PARAMS$d_105[[k]],col='blue')
# # title(outer = TRUE, unique(PEAK_PARAMS$param))
#
PEAK_FINDER_F[[24]][[9]]$d_95 <- 168
PEAK_FINDER_F[[24]][[9]]$d_105 <- 168
PEAK_FINDER_F[[24]][[14]]$d_95 <- 170
PEAK_FINDER_F[[24]][[14]]$d_105 <- 170
PERCENT_CHANGE=NULL
for(l in seq(1,50)){
PEAK_PARAMS <- do.call(rbind,PEAK_FINDER_F[[l]])
Current_Traj_Param <- TRAJ[[l]]
Current_Traj_Param$Day <- as.numeric(format(Current_Traj_Param$DATES,'%j'))
SPLITTED_Y <- split(Current_Traj_Param, Current_Traj_Param$Year)
PERCENT_CHANGE [[l]]= mean((PEAK_PARAMS[,2]- PEAK_PARAMS[,1])/ (0.10*(DOY_FIRST_PEAK_CONTROL_F
$day)))}
#
# for (k in seq(1,15)){
# plot(SPLITTED_Y[[k]]$Day, SPLITTED_Y[[k]]$R.adult_95,
# main = unique(SPLITTED_Y[[k]]$Year))
# abline(v=PEAK_PARAMS$d_95[[k]])
# points(SPLITTED_Y[[k]]$Day, SPLITTED_Y[[k]]$R.adult_105,col='blue')
# abline(v=PEAK_PARAMS$d_105[[k]],col='blue')
# title(outer = TRUE, unique(PEAK_PARAMS$param))
}
}
###Trying to find peak can be a bit picky at times-
###Eying it manually to make sure nothing is going wrong
#alphaP_C is wonky...(24th parameter) 1992 and 1997
###1992 - 9th year
##1997-14th year
###MANUALLY FIX THESE
###HERE YOU GET THE 0.95 of the Parameter i
PARAMS_FIX_95 <- PARAMS_START
PARAMS_FIX_95[[24]] <- PARAMS_FIX_95[[24]]*0.95
PARAMS_FIX_105 <- PARAMS_START
PARAMS_FIX_105[[24]] <- PARAMS_FIX_105[[24]]*1.05
TRAJ_MODEL_FIX_95 <- trajectory(POMP_CM,
PARAMS_FIX_95 ,times=seq(1,12054),
format = 'data.frame')
TRAJ_MODEL_FIX_105 <- trajectory(POMP_CM, PARAMS_FIX_105,
times = seq(1,12054),
format = 'data.frame')
R.adult_95_FIX<- log((0.5*rowSums(TRAJ_MODEL_95[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)]+1)))
R.adult_105_FIX<- log((0.5*rowSums(TRAJ_MODEL_105[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)]+1)))
NEW_95_F <- cbind.data.frame(DATES,R.adult_95_FIX)
NEW_105_F <- cbind.data.frame(DATES, R.adult_105_FIX)
NEW_95_F$Year <- as.numeric(format( NEW_95_F$DATES,'%Y'))
NEW_105_F$Year <- as.numeric(format( NEW_105_F $DATES,'%Y'))
TRAJ_MODELS_FIX <- cbind(NEW_95_F[,-3], NEW_105_F[,-c(1)])
TRAJ_MODEL_1992 <- subset(TRAJ_MODELS_FIX,
TRAJ_MODELS_FIX$Year==1992)
TRAJ_MODEL_1997 <- subset(TRAJ_MODELS_FIX,
TRAJ_MODELS_FIX$Year ==1997)
TRAJ_MODEL_1992$day <- as.numeric(format(TRAJ_MODEL_1992$DATES,'%j'))
TRAJ_MODEL_1997$day <- as.numeric(format(TRAJ_MODEL_1997$DATES,'%j'))
###ONLY THE 95th need to be fixed
TRAJ_MODEL_1992_100<- subset(TRAJ_MODEL_1992,
TRAJ_MODEL_1992$day > 100)
TRAJ_MODEL_1997_100<- subset(TRAJ_MODEL_1997,
TRAJ_MODEL_1997$day > 100)
a_95_1992 <- smooth.spline(x=TRAJ_MODEL_1992_100$day, y=TRAJ_MODEL_1992_100$R.adult_95_FIX, spar = 0.5)
a_105_1992 <-smooth.spline(x=TRAJ_MODEL_1992_100$day, y =TRAJ_MODEL_1992_100$R.adult_105_FIX, spar = 0.5)
a_95_1997 <- smooth.spline(x=TRAJ_MODEL_1997_100$day, y=TRAJ_MODEL_1997_100$R.adult_95_FIX, spar = 0.5)
a_105_1997 <-smooth.spline(x=TRAJ_MODEL_1997_100$day, y =TRAJ_MODEL_1997_100$R.adult_105_FIX, spar = 0.5)
df_a_95_92 <- cbind.data.frame(doy= a_95_1992 $x,abund = a_95_1992 $y)
df_a_105_92 <- cbind.data.frame(doy = a_105_1992 $x,abund = a_105_1992 $y)
df_a_95_97 <- cbind.data.frame(doy= a_95_1997 $x,abund = a_95_1997$y)
df_a_105_97 <- cbind.data.frame(doy= a_105_1997 $x,abund = a_105_1997$y)
ind_95_92_DF =data.frame(findpeaks(df_a_95_92 $abund,npeaks=3,
minpeakheight=2,
zero='+'))
ind_105_92_DF =data.frame(findpeaks(df_a_105_92 $abund,npeaks=3,
minpeakheight=2,
zero='+'))
points(a_105_1992,col='blue')
abline(v=df_a_105_92[ind_105_92_DF$X2[2],]$doy,col='blue')
###SO 95 for 1992 is DOY 168
###SO 105 for 1992 is DOY 168
ind_95_97_DF =data.frame(findpeaks(df_a_95_97 $abund,npeaks=3,
minpeakheight=2,
zero='+'))
ind_105_97_DF =data.frame(findpeaks(df_a_105_97 $abund,npeaks=3,
minpeakheight=2,
zero='+'))
###SO 95 for 1997 is DOY 170
###SO 105 for 1992 is DOY 170
#SENSITIVTY INDEX
SENSITIVITY_DEVELOPMENT_FIRST =
cbind.data.frame(name = names(PARAMETERS),sensitivity= PERCENT_CHANGE )
SENSITIVITY_DEVELOPMENT_FIRST$Function <- c(rep('birth',3),
rep('dev_e',3),
rep('dev_l1', 3),
rep('dev_l2',3),
rep('dev_l3',3),
rep('dev_l4',3),
rep('dev_l5',3),
rep('dev_p',3),
rep('dev_a',3),
rep('dev_dl',3),
rep('mort_e', 3),
rep('mort_l',3),
rep('mort_p',3),
rep('mort_a',3),
rep('mort_dl',3),
rep('dia_induc',3),
'C','COMP')
SENSITIVITY_DEVELOPMENT_FIRST$Function_G <- c(rep('birth',3),
rep('dev',3),
rep('dev', 3),
rep('dev',3),
rep('dev',3),
rep('dev',3),
rep('dev',3),
rep('dev',3),
rep('dev',3),
rep('dia',3),
rep('mort', 3),
rep('mort',3),
rep('mort',3),
rep('mort',3),
rep('mort',3),
rep('dia',3),
'birth','mort')
#############################
#Parameters related to Birth#
#############################
SENSITIVITY_DEVELOPMENT_FIRST_BIRTH_PARAMETERS <-
subset(SENSITIVITY_DEVELOPMENT_FIRST, SENSITIVITY_DEVELOPMENT_FIRST$Function_G =='birth')
SENSITIVITY_DEVELOPMENT_FIRST_BIRTH_PARAMETERS$name <- as.character(SENSITIVITY_DEVELOPMENT_FIRST_BIRTH_PARAMETERS$name)
BIRTH <- ggplot(SENSITIVITY_DEVELOPMENT_FIRST_BIRTH_PARAMETERS,
aes(x= 1, y = name, fill = abs(sensitivity)))+
geom_tile(color='black')+coord_equal()+
scale_y_discrete(limits =
rev(SENSITIVITY_DEVELOPMENT_FIRST_BIRTH_PARAMETERS$name))+
theme_classic()+
theme(axis.text.x=element_blank(),
axis.ticks.x = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.line = element_blank())+
scale_fill_viridis(limits =c(0,0.6),guide=FALSE)+
ggtitle("Fecundity")
###################################
#Parameters related to Development#
###################################
SENSITIVITY_DEVELOPMENT_FIRST_DEV_PARAMETERS <-
subset(SENSITIVITY_DEVELOPMENT_FIRST, SENSITIVITY_DEVELOPMENT_FIRST$Function_G =='dev')
SENSITIVITY_DEVELOPMENT_FIRST_DEV_PARAMETERS$name2 <-
c(rep("E",3),
rep("L1",3),
rep("L2",3),
rep("L3",3),
rep("L4",3),
rep("L5",3),
rep("P",3),
rep("A",3))
SENSITIVITY_DEVELOPMENT_FIRST_DEV_PARAMETERS$name2 <- factor(SENSITIVITY_DEVELOPMENT_FIRST_DEV_PARAMETERS$name2,
levels=rev(c("E", "L1", "L2","L3","L4","L5","P","A")))
SENSITIVITY_DEVELOPMENT_FIRST_DEV_PARAMETERS$param <-rep(c('a','b','c'),8)
DEV<- ggplot(SENSITIVITY_DEVELOPMENT_FIRST_DEV_PARAMETERS,
aes(x= param, y = name2, fill = abs(sensitivity)))+
geom_tile(color='black')+coord_equal()+
theme_classic()+
theme(
axis.ticks.x = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.line = element_blank())+
scale_fill_viridis(limits =c(0,0.6),guide=FALSE)+
ggtitle("Development")
###################################
#Parameters related to MORTALITY#
###################################
SENSITIVITY_DEVELOPMENT_FIRST_MORT_PARAMETERS <-
subset(SENSITIVITY_DEVELOPMENT_FIRST, SENSITIVITY_DEVELOPMENT_FIRST$Function_G =='mort')
SENSITIVITY_DEVELOPMENT_FIRST_MORT_PARAMETERS$name2 <-
c(rep("E",3),
rep("L",3),
rep("P",3),
rep("A",3),
rep("DL",3),
"COMP")
SENSITIVITY_DEVELOPMENT_FIRST_MORT_PARAMETERS$name2 <- factor(SENSITIVITY_DEVELOPMENT_FIRST_MORT_PARAMETERS$name2,
levels=rev(c("E", "L","P","A","DL","COMP")))
SENSITIVITY_DEVELOPMENT_FIRST_MORT_PARAMETERS$param <-c(rep(c('a','b','c'),5),'b')
MORT<- ggplot(SENSITIVITY_DEVELOPMENT_FIRST_MORT_PARAMETERS,
aes(x= param, y = name2, fill = abs(sensitivity)))+
geom_tile(color='black')+coord_equal()+
theme_classic()+
theme(
axis.ticks.x = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.line = element_blank())+
scale_fill_viridis( limits =c(0,0.6),guide=FALSE)+
ggtitle("Mortality")
###################################
#Parameters related to DIAPAUSE#
###################################
SENSITIVITY_DEVELOPMENT_FIRST_DIA_PARAMETERS <-
subset(SENSITIVITY_DEVELOPMENT_FIRST, SENSITIVITY_DEVELOPMENT_FIRST$Function_G =='dia')
SENSITIVITY_DEVELOPMENT_FIRST_DIA_PARAMETERS$name2 <-
c(rep("DIA_1",3),
rep("DIA_2",3))
SENSITIVITY_DEVELOPMENT_FIRST_DIA_PARAMETERS$param <-rep(c('a','b','c'),2)
DIA<- ggplot(SENSITIVITY_DEVELOPMENT_FIRST_DIA_PARAMETERS,
aes(x= param, y = name2, fill = abs(sensitivity )))+
geom_tile(color='black')+coord_equal()+
theme_classic()+
theme(
axis.ticks.x = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.line = element_blank())+
scale_fill_viridis(limits =c(0,0.6))+
ggtitle("Development")
BIRTH + DEV + MORT+ DIA
#########################################################################
#########################################################################
########################
###ABUNDANCE- EGG#####
########################
###CONTROL
# EGG- ABUNDANCE ----------------------------------------------------------
CONTROL_MODEL<- trajectory(POMP_CM,
PARAMETERS ,times=seq(1,12054),
format = 'data.frame',method = 'bdf')
CONTROL_EGG <- (rowSums(CONTROL_MODEL [,1:(nE)]))
CONTROL_MODEL_EGG = cbind.data.frame(DATES,Egg=log((0.5*CONTROL_EGG)+1))
CONTROL_MODEL_EGG$Year <- as.numeric(format(CONTROL_MODEL_EGG$DATES,'%Y'))
CONTROL_MODEL_EGG_SPLIT <- split(CONTROL_MODEL_EGG,CONTROL_MODEL_EGG$Year)
SUMMED_SPLIT <-unlist(lapply(CONTROL_MODEL_EGG_SPLIT, function(x) sum(x$Egg )))
###Looks goods-
PARAMS_START <- PARAMETERS
PERCENT_CHANGE_EGG_ABUNDANCE<- NULL
for ( i in seq(1,50)){
PARAMS_2_95 <- PARAMS_START
PARAMS_2_95[[i]] <- PARAMS_START[[i]]*0.95
PARAMS_2_105 <- PARAMS_START
PARAMS_2_105[[i]] <- PARAMS_START[[i]] * 1.05
TRAJ_MODEL_95 <- trajectory(POMP_CM,
PARAMS_2_95 ,times=seq(1,12054),
format = 'data.frame')
TRAJ_MODEL_105 <- trajectory(POMP_CM, PARAMS_2_105 ,
times = seq(1,12054),
format = 'data.frame')
#EGGS-0.95
E_95<- data.frame(Date = DATES,
E_95 =log((0.5*rowSums(TRAJ_MODEL_95[,1:(nE)]))+1))
E_95$Year <- as.numeric(format(E_95$Date, format = '%Y'))
#EGGS-1.05
E_105<- data.frame(Date = DATES,
E_105=log((0.5*rowSums(TRAJ_MODEL_105[,1:(nE)]))+1))
E_105$Year <- as.numeric(format(E_105$Date, format = '%Y'))
###SPLIT- 0.95
E_95_SPLIT <- split(E_95, E_95$Year)
SUMMED_SPLIT_95 <-unlist(lapply( E_95_SPLIT , function(x) sum(x$E_95 )))
#SPLIT-1.05
E_105_SPLIT <- split(E_105, E_105$Year)
SUMMED_SPLIT_105 <-unlist(lapply( E_105_SPLIT , function(x) sum(x$E_105 )))
PERCENT_CHANGE_EGG_ABUNDANCE [[i]]= mean((SUMMED_SPLIT_105-SUMMED_SPLIT_95)/ (0.10*(
SUMMED_SPLIT )))
}
SENSITIVITY_EGG_FIRST =
cbind.data.frame(name = names(PARAMETERS),sensitivity= PERCENT_CHANGE_EGG_ABUNDANCE)
SENSITIVITY_EGG_FIRST $Name <- c(rep('E',3),
rep('E',3),
rep('L1', 3),
rep('L2',3),
rep('L3',3),
rep('L4',3),
rep('L5',3),
rep('P',3),
rep('A',3),
rep('DL',3),
rep('E', 3),
rep('L',3),
rep('P',3),
rep('A',3),
rep('DL',3),
rep('DIA',3),
'C','COMP')
ggplot(SENSITIVITY_EGG_FIRST, aes(y=name , x= 1,
label =round(sensitivity,digits=2)))+
geom_tile(aes(fill =abs((sensitivity)),width = 1,height =1), size = 0.8,
color='black')+facet_grid(.~Name)+
scale_fill_viridis()+coord_equal()
save(SENSITIVITY_EGG_FIRST, file = 'Sens_EGG_Abund.RData')
####################################################################
####################################################################
#########################
#######################
###DIAPAUSING LARVAE#####
#########################
#########################
CONTROL_MODEL<- trajectory(POMP_CM,
PARAMETERS ,times=seq(1,12054),
format = 'data.frame',method = 'bdf')
CONTROL_DL <- (rowSums(CONTROL_MODEL [,(nE+nL1+nL2+nL3+nL4+nL5+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5)]))
CONTROL_MODEL_DL = cbind.data.frame(DATES,DL=log((0.5*CONTROL_DL)+1))
CONTROL_MODEL_DL$Year <- as.numeric(format(CONTROL_MODEL_DL$DATES,'%Y'))
CONTROL_MODEL_DL_SPLIT <- split(CONTROL_MODEL_DL,CONTROL_MODEL_DL$Year)
SUMMED_SPLIT_DL <-unlist(lapply(CONTROL_MODEL_DL_SPLIT, function(x) sum(x$DL)))
###Looks goods-
PARAMS_START <- PARAMETERS
PERCENT_CHANGE_DL_ABUNDANCE<- NULL
for ( i in seq(1,50)){
PARAMS_2_95 <- PARAMS_START
PARAMS_2_95[[i]] <- PARAMS_START[[i]]*0.95
PARAMS_2_105 <- PARAMS_START
PARAMS_2_105[[i]] <- PARAMS_START[[i]] * 1.05
TRAJ_MODEL_95 <- trajectory(POMP_CM,
PARAMS_2_95 ,times=seq(1,12054),
format = 'data.frame')
TRAJ_MODEL_105 <- trajectory(POMP_CM, PARAMS_2_105 ,
times = seq(1,12054),
format = 'data.frame')
DL_95<- data.frame(Date = DATES,
DL_95 =log((0.5*rowSums(TRAJ_MODEL_95[,(nE+nL1+nL2+nL3+nL4+nL5+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5)]))+1))
DL_95$Year <- as.numeric(format(DL_95$Date, format = '%Y'))
DL_105<- data.frame(Date = DATES,
DL_105=log((0.5*rowSums(TRAJ_MODEL_105[,(nE+nL1+nL2+nL3+nL4+nL5+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5)]))+1))
DL_105$Year <- as.numeric(format(DL_105$Date, format = '%Y'))
DL_95_SPLIT <- split(DL_95, DL_95$Year)
SUMMED_SPLIT_95_DL <-unlist(lapply(DL_95_SPLIT , function(x) sum(x$DL_95 )))
DL_105_SPLIT <- split(DL_105, DL_105$Year)
SUMMED_SPLIT_105_DL <-unlist(lapply(DL_105_SPLIT , function(x) sum(x$DL_105 )))
PERCENT_CHANGE_DL_ABUNDANCE [[i]]= mean((SUMMED_SPLIT_105_DL-SUMMED_SPLIT_95_DL)/ (0.10*(
SUMMED_SPLIT_DL)))
}
SENSITIVITY_DL_FIRST =
cbind.data.frame(name = names(PARAMETERS),sensitivity= PERCENT_CHANGE_DL_ABUNDANCE)
SENSITIVITY_DL_FIRST $Name <- c(rep('E',3),
rep('E',3),
rep('L1', 3),
rep('L2',3),
rep('L3',3),
rep('L4',3),
rep('L5',3),
rep('P',3),
rep('A',3),
rep('DL',3),
rep('E', 3),
rep('L',3),
rep('P',3),
rep('A',3),
rep('DL',3),
rep('DIA',3),
'C','COMP')
ggplot(SENSITIVITY_DL_FIRST, aes(y=name , x= 1,
label =round(sensitivity,digits=2)))+
geom_tile(aes(fill =abs((sensitivity)),width = 1,height =1), size = 0.8,
color='black')+facet_grid(.~Name)+
scale_fill_viridis()+coord_equal()
###############################
###############################
#############ADULT#############
###############################
################################
CONTROL_MODEL<- trajectory(POMP_CM,
PARAMETERS ,times=seq(1,12054),
format = 'data.frame',method = 'bdf')
CONTROL_A <- rowSums(CONTROL_MODEL[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)])
CONTROL_MODEL_A = cbind.data.frame(DATES,A=log((0.5*CONTROL_A)+1))
CONTROL_MODEL_A$Year <- as.numeric(format(CONTROL_MODEL_A$DATES,'%Y'))
CONTROL_MODEL_A_SPLIT <- split(CONTROL_MODEL_A,CONTROL_MODEL_A$Year)
SUMMED_SPLIT_A <-unlist(lapply(CONTROL_MODEL_A_SPLIT, function(x) sum(x$A)))
PARAMS_START <-PARAMETERS
PERCENT_CHANGE_A_ABUNDANCE<- NULL
for ( i in seq(1,50)){
PARAMS_2_95 <- PARAMS_START
PARAMS_2_95[[i]] <- PARAMS_START[[i]]*0.95
PARAMS_2_105 <- PARAMS_START
PARAMS_2_105[[i]] <- PARAMS_START[[i]] * 1.05
TRAJ_MODEL_95 <- trajectory(POMP_CM,
PARAMS_2_95 ,times=seq(1,12054),
format = 'data.frame')
TRAJ_MODEL_105 <- trajectory(POMP_CM, PARAMS_2_105 ,
times = seq(12054),
format = 'data.frame')
A_95<- rowSums(TRAJ_MODEL_95[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)])
A_105<- rowSums(TRAJ_MODEL_105[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)])
A_95<- data.frame(Date = DATES,
A_95 =log((0.5* rowSums(TRAJ_MODEL_95[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)]))+1))
A_95$Year <- as.numeric(format(A_95$Date, format = '%Y'))
#EGGS-1.05
A_105<- data.frame(Date = DATES,
A_105=log((0.5*rowSums(TRAJ_MODEL_105[,(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+1):
(nE+nL1+nL2+nL3+nL4+nL5+nDL5+nP+nAr)]))+1))
A_105$Year <- as.numeric(format(A_105$Date, format = '%Y'))
###SPLIT- 0.95
A_95_SPLIT <- split(A_95, A_95$Year)
SUMMED_SPLIT_95_A <-unlist(lapply(A_95_SPLIT , function(x) sum(x$A_95 )))
#SPLIT-1.05
A_105_SPLIT <- split(A_105,A_105$Year)
SUMMED_SPLIT_105_A <-unlist(lapply(A_105_SPLIT , function(x) sum(x$A_105 )))
PERCENT_CHANGE_A_ABUNDANCE [[i]]= mean((SUMMED_SPLIT_105_A-SUMMED_SPLIT_95_A)/ (0.10*(
SUMMED_SPLIT_A)))
}
SENSITIVITY_A_FIRST =
cbind.data.frame(name = names(PARAMETERS),sensitivity=
PERCENT_CHANGE_A_ABUNDANCE)
SENSITIVITY_A_FIRST $Name <- c(rep('E',3),
rep('E',3),
rep('L1', 3),
rep('L2',3),
rep('L3',3),
rep('L4',3),
rep('L5',3),
rep('P',3),
rep('A',3),
rep('DL',3),
rep('E', 3),
rep('L',3),
rep('P',3),
rep('A',3),
rep('DL',3),
rep('DIA',3),
'C','COMP')
ggplot(SENSITIVITY_A_FIRST, aes(y=name , x= 1,
label =round(sensitivity,digits=2)))+
geom_tile(aes(fill =abs((sensitivity)),width = 1,height =1), size = 0.8,
color='black')+facet_grid(.~Name)+
scale_fill_viridis()+coord_equal()
save(SENSITIVITY_A_FIRST, file = 'Sens_A_Abund.RData')
|
390944b2d79dc87b29f41a8607e64f5fc52175c9 | 01cf57814df3eb6ece3742d3af1702eed0794bb1 | /man/rasterize_cellplan.Rd | d8d8d95342c41ddd7fd8ce337034e607fb1a6cbe | [] | no_license | Flowminder/mobloc | bbb8fc6ed792f90bc9b2685f8de6e64885d792b3 | 6edb4ddfcb4c468b039d3f45532d0c2ae794ee8c | refs/heads/master | 2020-03-21T23:00:05.591714 | 2018-06-11T10:22:48 | 2018-06-11T10:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 453 | rd | rasterize_cellplan.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rasterize_cellplan.R
\name{rasterize_cellplan}
\alias{rasterize_cellplan}
\title{Rasterize cellplan}
\usage{
rasterize_cellplan(cp, cp_poly, raster, elevation, param)
}
\arguments{
\item{cp}{cellplan}
\item{cp_poly}{cellplan polygons}
\item{raster}{raster with indices}
\item{elevation}{raster with elevation data}
\item{param}{list}
}
\description{
Rasterize cellplan
}
|
f2b5dafa1f7460ead630a395d6c7088fbd4bc7cd | 9c14a3f22704fdb9d794cb2eee40662d6174d162 | /src/junk/svmRuns.r | 50d2738fa47da43c094aa36b3af5b08b90048838 | [] | no_license | mutual-ai/kernel-methods | b35d31f9996b2bd8c5c1842fcc74e085e960817a | 73c8d5829b9d89d613c15a4eb0882557eafbe9a1 | refs/heads/master | 2020-03-19T23:31:51.668901 | 2016-12-06T18:17:05 | 2016-12-06T18:17:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,787 | r | svmRuns.r | #import dependencies
#libs
library('e1071')
library('kernlab')
library('randomForest')
#src
source('model.svm.r')
source('model.svm.r')
source('train.r')
#DATA SETUP
#----------
#read in data
map = read.table('../data/Yatsunenko_global_gut_study_850_mapping_file.txt',sep='\t',head=T,row=1,check=F,comment='')
otus = read.table('../data/Yatsunenko_global_gut_study_850_gg_ref_13_8_L7.txt',sep='\t',head=T,row=1,check=F,comment='')
uni.dist = read.table('../data/unifracDistances/weighted_unifrac_Yatsunenko_global_gut_study_850_gg_ref_13_8.txt',sep='\t',head=T,row=1,check=F,comment='')
#switch otus to have points as rows instead of columns
otus = t(otus)
#filter on intersection of rownames
overlap = intersect(rownames(map),rownames(otus))
map = map[overlap,]
otus = otus[overlap,]
# do the filtering for unifrac
uni.overlap = intersect(rownames(map),rownames(uni.dist))
uni.map = map[uni.overlap,]
uni.dist = as.matrix(uni.dist[uni.overlap,])
#create L2 distance matrix for otu svm
l2.dist = as.matrix(dist(otus))
#generate random sample indices
sampler = sample(1:nrow(uni.dist),nrow(uni.dist),replace=FALSE)
part = 0.8 * nrow(uni.dist)
#L2 OTU SVM
#----------
#grab the training set
l2.training.samples = l2.dist[sampler[1:part],]
l2.validation.samples = l2.dist[sampler[(part+1):nrow(l2.dist)],];
#grab the outcomes (US vs not US)
l2.training.outcomes = (map[rownames(l2.training.samples),'COUNTRY', drop=F] == "GAZ:United States of America")
#keep the names with the outcomes factor
outcome.names = rownames(l2.training.outcomes)
l2.training.outcomes = factor(l2.training.outcomes)
names(l2.training.outcomes) = outcome.names
#create the model
l2.svm.model = svm.model(as.kernelMatrix(l2.training.samples),l2.training.outcomes,C=8, kernel='matrix')
#run svm on test set (will return a named character list)
l2.svm.prediction = predict(l2.svm.model,as.kernelMatrix(l2.validation.samples))
#grab the actual outcomes, keep the names
l2.actual.outcomes = (map[rownames(l2.validation.samples),'COUNTRY', drop=F] == "GAZ:United States of America")
actual.outcome.names = rownames(l2.actual.outcomes)
l2.actual.outcomes = as.character(l2.actual.outcomes)
names(l2.actual.outcomes) = actual.outcome.names
#synchronize list indices
overlap = intersect(names(l2.actual.outcomes),names(l2.knn.prediction))
#make confusion matrix
l2.svm.confusion = table(l2.svm.prediction, l2.actual.outcomes)
#UNIFRAC SVM
#----------
#grab the training set
uni.training.samples = uni.dist[sampler[1:part],]
uni.training.samples = uni.training.samples[,rownames(uni.training.samples)]
uni.validation.samples = uni.dist[sampler[(part+1):nrow(uni.dist)],rownames(uni.training.samples)];
#grab the outcomes (US vs not US)
uni.training.outcomes = (uni.map[rownames(uni.training.samples),'COUNTRY', drop=F] == "GAZ:United States of America")
#keep the names with the outcomes factor
outcome.names = rownames(uni.training.outcomes)
uni.training.outcomes = factor(uni.training.outcomes)
names(uni.training.outcomes) = outcome.names
#create kernel matrix
make.uni.kern = function(x1,x2){
}
training.kernel.matrix =
#create the model
uni.svm.model = svm.model(as.kernelMatrix(uni.training.samples),uni.training.outcomes,C=8, kernel='matrix')
#run svm on test set (will return a named character list)
uni.svm.prediction = predict(uni.svm.model,as.kernelMatrix(uni.validation.samples))
#grab the actual outcomes, keep the names
uni.actual.outcomes = (uni.map[rownames(uni.validation.samples),'COUNTRY', drop=F] == "GAZ:United States of America")
actual.outcome.names = rownames(uni.actual.outcomes)
uni.actual.outcomes = as.character(uni.actual.outcomes)
names(uni.actual.outcomes) = actual.outcome.names
#make confusion matrix
uni.svm.confusion = table(uni.svm.prediction, uni.actual.outcomes)
#tune L2 svm
#grab all the outcomes, the tuner will cross validate
l2.outcomes = (map[rownames(l2.dist),'COUNTRY', drop=F] == "GAZ:United States of America")
#keep the names with the outcomes factor
outcome.names = rownames(l2.training.outcomes)
l2.outcomes = factor(l2.outcomes)
names(l2.outcomes) = outcome.names
#tuning
l2.svm.result = mwas.evaluate(as.kernelMatrix(l2.dist),l2.outcomes,model.type='SVM',svm.C=2^(1:5), svm.kernel='matrix')
#tune unifrac svm
#grab all the outcomes, the tuner will cross validate
uni.outcomes = (uni.map[rownames(uni.dist),'COUNTRY', drop=F] == "GAZ:United States of America")
#keep the names with the outcomes factor
outcome.names = rownames(uni.training.outcomes)
uni.outcomes = factor(uni.outcomes)
names(uni.outcomes) = outcome.names
#tuning
uni.svm.result = mwas.evaluate(as.kernelMatrix(uni.dist),uni.outcomes,model.type='SVM',svm.C=2^(1:5),svm.kernel='matrix') |
7967fde161cba28400d0c915091e541fae50d3e0 | fe5f50773bfac59ffad8c74c392628c7362a0e53 | /R/mfgTraits.r | cf76604cf789054b4c01871092b3a60ba3aaf083 | [] | no_license | cran/algaeClassify | d7b0dd9acb17f31050c9da4e686c03c3aa399661 | f661618feaaded028b66075d2dfee27edf1b074f | refs/heads/master | 2022-03-17T06:17:41.166073 | 2022-03-11T22:30:08 | 2022-03-11T22:30:08 | 208,512,920 | 4 | 3 | null | null | null | null | UTF-8 | R | false | false | 997 | r | mfgTraits.r | #' Functional Trait Database derived from Rimet et al.
#'
#' @format A data frame with columns:
#' \describe{
#' \item{phyto_name}{binomial scientific name}
#' \item{genus}{genus name}
#' \item{species}{species name}
#' \item{Mobility.apparatus}{1/0 indicates presence/absence of flagella or motility}
#' \item{Size}{character values 'large' or 'small'; based on 35 micrometer max linear dimension}
#' \item{Colonial}{1/0 indicates typical colonial growth form or not}
#' \item{Filament}{1/0 indicates filamentous growth form or not}
#' \item{Centric}{1/0 indicates diatoms with centric growth form}
#' \item{Gelatinous}{1/0 indicates presence/absence of mucilage}
#' \item{Aerotopes}{1/0 indicates presence/absence of aerotopes}
#' \item{Class}{Taxonomic class}
#' \item{Order}{Taxonomic order}
#' \item{MFG.fromtraits}{MFG classification using traits_to_mfg function}
#' }
#'
#' @docType data
#'
#' @usage data(mfgTraits)
#'
#' @keywords datasets
#'
"mfgTraits" |
da6a6c9441083c1fc48b1a3745d2ae05537f8237 | 82d128a4b47a0a3a85e6c768b25df5848b7f3b54 | /CBMRtools/man/CBMRtools.Rd | a74ce6e7ea7eebf61c4e9b596869a913a73d74d8 | [] | no_license | tetomonti/CBMRtools | 37e19db6a23c46882ec850df64a0a664f39d57f6 | 6ea11896423219a1df91528eed7ba7744e1d27d0 | refs/heads/master | 2021-03-27T10:27:51.753463 | 2020-02-06T19:03:14 | 2020-02-06T19:03:14 | 35,168,993 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 253 | rd | CBMRtools.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CBMRtools.R
\docType{package}
\name{CBMRtools}
\alias{CBMRtools}
\title{CBMRtools}
\description{
A package for accessing general-use R scripts developed by montilab members
}
|
41f0f676a5754ef2269f29956f28dfa8cb6fb4e2 | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Miller_And_Freund_S_Probability_And_Statistics_For_Engineers_by_Richard_A._Johnson/CH5/EX5.44/EX5_44.R | 78ed2688162411f765e0784ce6c4db87c8be9233 | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 95 | r | EX5_44.R | Data<-c(0.57,0.74,0.26,0.77,0.12)
alpha = 0.05
beta = 2.0
((-1/alpha)*log(1-Data))^(1/beta) |
a4dc209c056f012777b2e88772f13b2afe3cb0e9 | 9ced058004c19ba00d837a8e456817d56a565c9d | /tests/testthat/test-oc_key.R | 0c0723cb5fe1e853fa86d41aa069120d2f8d4303 | [] | no_license | cran/opencage | 84594102736a8d97869cceb15ec774c5d7af0f41 | 11a46b26ae7b13a3eca36a2b4a42fa3c998a4361 | refs/heads/master | 2021-05-15T01:06:06.777397 | 2021-02-20T00:00:02 | 2021-02-20T00:00:02 | 58,643,210 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,350 | r | test-oc_key.R | ## Test oc_check_key ##
test_that("oc_check_key checks key", {
expect_error(
oc_check_key(
key = 45
),
"`key` must be a character vector."
)
expect_error(
oc_check_key(
key = c(key_200, key_402)
),
"`key` must be a vector of length one."
)
expect_error(
oc_check_key(
key = NULL
),
"`key` must be provided."
)
expect_error(
oc_check_key(substr(key_200, 1L, 30L)),
"32 character long, alphanumeric string"
)
})
## Test oc_mask_key ##
test_that("oc_mask_key masks key", {
withr::local_envvar(c("OPENCAGE_KEY" = key_200))
expect_match(oc_mask_key(key_200), "OPENCAGE_KEY", fixed = TRUE)
})
test_that("oc_mask_key does nothing if no key present", {
withr::local_envvar(c("OPENCAGE_KEY" = ""))
expect_match(
oc_mask_key("no_key_available"),
"no_key_available",
fixed = TRUE
)
})
## Test oc_key_present ##
test_that("oc_key_present detects if key is present", {
withr::local_envvar(c("OPENCAGE_KEY" = key_200))
expect_true(oc_key_present())
})
test_that("oc_key_present detects if key is not present", {
withr::local_envvar(c("OPENCAGE_KEY" = ""))
expect_false(oc_key_present())
withr::local_envvar(c("OPENCAGE_KEY" = "string_but_no_key!!!11"))
expect_false(oc_key_present())
})
|
3c5ee7b8c6b9e0b325f90e588d48e3466c351ba9 | a9a9863005028343faba4634a8232e6736935ef3 | /data/airquality/air_quality_July4th.R | 69fca63ce68bd08e3b6a9a6cfb0119c5dd62bc2a | [
"MIT"
] | permissive | data-reef/data-reef.github.io | ca3db9f55c9a19d70e5818f0a9e4be1a6199ba90 | 73a7a2a3f9333e3b22c832c27daf8222233415c1 | refs/heads/master | 2019-07-19T07:44:52.482991 | 2017-11-05T21:31:58 | 2017-11-05T21:31:58 | 93,815,387 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,684 | r | air_quality_July4th.R | #Air Polution during July 4th. PM2.5 hourly data.
#Data Reef
library(ggplot2)
library(gridExtra)
library(ggthemes)
library(readr)
#Import the data downloaded from https://aqsdr1.epa.gov/aqsweb/aqstmp/airdata/download_files.html
hourly_88101_2016 <- read_csv("hourly_88101_2016.csv")
#Extract only certain dates data
sub3 = subset(hourly_88101_2016,hourly_88101_2016$`Date Local` == "2016-07-03" )
sub4 = subset(hourly_88101_2016,hourly_88101_2016$`Date Local` == "2016-07-04" )
sub5 = subset(hourly_88101_2016,hourly_88101_2016$`Date Local` == "2016-07-05" )
#Plot each day's graph
sub3g = ggplot(aes(x = sub3$`Time Local`, y = sub3$`Sample Measurement`,color=sub3$`Sample Measurement`), data = sub3) + geom_point(aes(colour = cut(sub3$`Sample Measurement`, c(-Inf, 35, Inf))),shape=18,size=3)+ coord_cartesian(ylim = c(0, 800)) + geom_hline(yintercept = 35) + annotate("text", 35, 60, label = "Standard") + scale_color_manual(name = "mu", values = c("(-Inf,35]" = "blue","(35, Inf]" = "red"),guide = FALSE)+theme_solarized()+ggtitle("Sunday, July 3,2016")+labs(x="Time",y=expression(PM[2.5]~(μg/m^{3})))+theme(plot.title = element_text(size=16,hjust = 0.5,face="bold"),axis.text=element_text(size=15),axis.title=element_text(size=12,face="bold"))
sub4g = ggplot(aes(x = sub4$`Time Local`, y = sub4$`Sample Measurement`,color=sub4$`Sample Measurement`), data = sub4) + geom_point(aes(colour = cut(sub4$`Sample Measurement`, c(-Inf, 35, Inf))),shape=18,size=3)+ coord_cartesian(ylim = c(0, 800)) + geom_hline(yintercept = 35) + annotate("text", 35, 60, label = "Standard") + scale_color_manual(name = "mu", values = c("(-Inf,35]" = "blue","(35, Inf]" = "red"),guide = FALSE)+theme_solarized()+ggtitle("Monday, July 4,2016")+labs(x="Time",y=expression(PM[2.5]~(μg/m^{3})))+theme(plot.title = element_text(size=16,hjust = 0.5,face="bold"),axis.text=element_text(size=15),axis.title=element_text(size=12,face="bold"))
sub5g = ggplot(aes(x = sub5$`Time Local`, y = sub5$`Sample Measurement`,color=sub5$`Sample Measurement`), data = sub5) + geom_point(aes(colour = cut(sub5$`Sample Measurement`, c(-Inf, 35, Inf))),shape=18,size=3)+ coord_cartesian(ylim = c(0, 800)) + geom_hline(yintercept = 35) + annotate("text", 35, 60, label = "Standard") + scale_color_manual(name = "mu", values = c("(-Inf,35]" = "blue","(35, Inf]" = "red"),guide = FALSE)+theme_solarized()+ggtitle("Tuesday, July 5,2016")+labs(x="Time",y=expression(PM[2.5]~(μg/m^{3})))+theme(plot.title = element_text(size=16,hjust = 0.5,face="bold"),axis.text=element_text(size=15),axis.title=element_text(size=12,face="bold"))
#Show all the graphs in one plot
grid.arrange(arrangeGrob(arrangeGrob(sub3g,sub4g,sub5g)))
|
bc88efbf3e12c49a0e14e528fd1221f1b864b979 | 5a5bc9e1b0d59859b4e213b092e19afe232819e1 | /R/raster.R | dcddaad9da8dfd9a73afab4e809c2fcd9f2ca441 | [] | no_license | jrmosedale/microclimates | bf469e07b688e9342c0a8d767db84ee428e778f3 | ae2e61969631506c523bd618c9106a61b00355dd | refs/heads/master | 2021-04-30T15:18:19.091728 | 2018-02-12T11:31:16 | 2018-02-12T11:31:16 | 121,236,443 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 194 | r | raster.R |
r<-raster(xmn=0, xmx=200, ymn=0, ymx=100, ncol=20, nrow=10)
r[] <- 1:ncell(r)
e <- extent(10, 220, 10, 100)
r <- extend(r, e)
ncol(r)
nrow(r)
res(r)
r
e<-extent(10,50,0,100)
r<-crop(r,e)
r |
68a519d59fba3fffcc7aaf9d1998cd8fa9e6030f | 5d63b292b8cdce7101fa0142d90f06c08c35b515 | /R/internal.R | 10892d0e89a7ef6c39218d8737de8e7290446d51 | [] | no_license | mayoverse/dq | b9ce7fe335a4186bb69c5821885175dececea8bc | 7e12f010572c884b9aed554baa4049374265ee53 | refs/heads/master | 2022-04-13T17:45:32.520154 | 2020-03-13T13:12:48 | 2020-03-13T13:12:48 | 203,206,478 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 369 | r | internal.R |
is.numericish <- function(x)
(is.numeric(x) || inherits(x, "difftime") || inherits(x, "Date") || inherits(x, "POSIXt")) && length(unique(x)) >= getOption("dq.min.unique", 10)
fix.dates <- function(dat)
{
idx <- vapply(dat, function(x) inherits(x, "Date") || inherits(x, "POSIXt") || inherits(x, "difftime"), NA)
dat[idx] <- lapply(dat[idx], as.numeric)
dat
}
|
fcab0334247f3510d63a56ae8bc89d995783485f | 43456e1928807abe61fc01c1027ded9b775000c4 | /R/dimModels.R | 21575c272bc9e73fd27b2bfdab78eeea74caa395 | [] | no_license | prosodylab/ArticleFocusPhrasingJphon | d3b27dc7c1b5ce15c5d682786e585e5a237867bb | 4141d02e41ac2a4850c2ce079caee5463e3a9fa9 | refs/heads/master | 2020-07-05T07:58:53.646765 | 2020-03-03T21:59:47 | 2020-03-03T21:59:47 | 202,580,391 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,293 | r | dimModels.R | #
# Models
#
library(texreg)
##
##
## look at raw measures by syllable
modelDuration1=lmer(data=filter(dd1,Position=='B'),
syllable_duration~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelDuration1)
print(modelDuration1,correlations=T)
modelDuration2=lmer(data=filter(dd2,Position=='B'),
syllable_duration~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelDuration2)
sink("../Paper/Models/modelsDuration.tex", append=FALSE, split=FALSE)
texreg(list(modelDuration1,modelDuration2),
label="modelDuration",
custom.model.names=c("Initial","Final"),
naive=TRUE,single.row = T,
include.aic=F,
include.deviance=F,
include.bic=F,
include.loglik=F,
include.variance=F,
dcolumn=T,
include.nobs=F,
include.groups=F,
caption = "Mixed Effects Regression Models for the duration of word B (estimates in sec, SE in parentheses)",
use.packages=F,float.pos="h!",fontsize = "footnotesize",
# base stars on lmertest Sattersthwaite p-values:
override.pval=c(list(summary(modelDuration1)$coefficients[,'Pr(>|t|)'],summary(modelDuration2)$coefficients[,'Pr(>|t|)']))
# (warning for SE can be ignored--SEs in lmertest are identical)
)
sink()
#
# intensity model
#
modelIntensity1=lmer(data=filter(dd1,Position=='B'),
Mean_Intensity~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelIntensity1)
modelIntensity2=lmer(data=filter(dd2,Position=='B'),
Mean_Intensity~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelIntensity2)
sink("../Paper/Models/modelsIntensity.tex", append=FALSE, split=FALSE)
texreg(list(modelIntensity1,modelIntensity2),
label="modelIntensity",
custom.model.names=c("Initial","Final"),
naive=TRUE,single.row = T,
include.aic=F,
include.deviance=F,
include.bic=F,
include.loglik=F,
include.variance=F,
dcolumn=T,
include.nobs=F,
include.groups=F,
caption = "Mixed Effects Regression Models for the mean intensity of word B (estimate in dB, SE in parentheses).",use.packages=F,float.pos="h!",fontsize = "footnotesize",
# base stars on lmertest Sattersthwaite p-values:
override.pval=c(list(summary(modelIntensity1)$coefficients[,'Pr(>|t|)'],summary(modelIntensity2)$coefficients[,'Pr(>|t|)']))
# (warning for SE can be ignored--SEs in lmertest are identical)
)
sink()
#
# pitch model
#
modelPitch1=lmer(data=filter(dd1,Position=='B'),
Max_F0~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelPitch1)
modelPitch2=lmer(data=filter(dd2,Position=='B'),
Max_F0~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelPitch2)
sink("../Paper/Models/modelsPitch.tex", append=FALSE, split=FALSE)
texreg(list(modelPitch1,modelPitch2),
label="modelPitch",
custom.model.names=c("Initial","Final"),
naive=TRUE,single.row = T,
include.aic=F,
include.deviance=F,
include.bic=F,
include.loglik=F,
include.variance=F,
dcolumn=T,
include.nobs=F,
include.groups=F,
caption = "Mixed Effects Regression Models for the Max F$_0$ of word B (estimate in Hz, SE in parentheses).",use.packages=F,float.pos="h!",fontsize = "footnotesize",
# base stars on lmertest Sattersthwaite p-values:
override.pval=c(list(summary(modelPitch1)$coefficients[,'Pr(>|t|)'],summary(modelPitch2)$coefficients[,'Pr(>|t|)']))
# (warning for SE can be ignored--SEs in lmertest are identical)
)
sink()
##
## models for word A
modelDuration1A=lmer(data=filter(dd1,Position=='A'),
syllable_duration~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelDuration1A)
modelDuration2A=lmer(data=filter(dd2,Position=='A'),
syllable_duration~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelDuration2A)
sink("../Paper/Models/modelsDurationA.tex", append=FALSE, split=FALSE)
texreg(list(modelDuration1A,modelDuration2A),
label="modelDurationA",
custom.model.names=c("Initial","Final"),
naive=TRUE,single.row = T,
include.aic=F,
include.deviance=F,
include.bic=F,
include.loglik=F,
include.variance=F,
dcolumn=T,
include.nobs=F,
include.groups=F,
caption = "Mixed Effects Regression Models for the duration of word A (estimate in sec, SE in parentheses).",
use.packages=F,float.pos="h!",fontsize = "footnotesize",
# base stars on lmertest Sattersthwaite p-values:
override.pval=c(list(summary(modelDuration1A)$coefficients[,'Pr(>|t|)'],summary(modelDuration2A)$coefficients[,'Pr(>|t|)']))
# (warning for SE can be ignored--SEs in lmertest are identical)
)
sink()
#
# intensity model
#
modelIntensity1A=lmer(data=filter(dd1,Position=='A'),
Mean_Intensity~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelIntensity1A)
modelIntensity2A=lmer(data=filter(dd2,Position=='A'),
Mean_Intensity~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelIntensity2A)
sink("../Paper/Models/modelsIntensityA.tex", append=FALSE, split=FALSE)
texreg(list(modelIntensity1A,modelIntensity2A),
label="modelIntensityA",
custom.model.names=c("Initial","Final"),
naive=TRUE,single.row = T,
include.aic=F,
include.deviance=F,
include.bic=F,
include.loglik=F,
include.variance=F,
dcolumn=T,
include.nobs=F,
include.groups=F,
caption = "Mixed Effects Regression Models for the mean intensity of word A (estimate in dB, SE in parentheses).",use.packages=F,float.pos="h!",fontsize = "footnotesize",
# base stars on lmertest Sattersthwaite p-values:
override.pval=c(list(summary(modelIntensity1A)$coefficients[,'Pr(>|t|)'],summary(modelIntensity2A)$coefficients[,'Pr(>|t|)']))
# (warning for SE can be ignored--SEs in lmertest are identical)
)
sink()
#
# pitch model
#
modelPitch1A=lmer(data=filter(dd1,Position=='A'),
Max_F0~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelPitch1)
modelPitch2A=lmer(data=filter(dd2,Position=='A'),
Max_F0~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelPitch2A)
sink("../Paper/Models/modelsPitchA.tex", append=FALSE, split=FALSE)
texreg(list(modelPitch1A,modelPitch2A),
label="modelPitchA",
custom.model.names=c("Initial","Final"),
naive=TRUE,single.row = T,
include.aic=F,
include.deviance=F,
include.bic=F,
include.loglik=F,
include.variance=F,
dcolumn=T,
include.nobs=F,
include.groups=F,
caption = "Mixed Effects Regression Models for the Max F$_0$ of word A (estimate in Hz, SE in parentheses).",use.packages=F,float.pos="h!",fontsize = "footnotesize",
# base stars on lmertest Sattersthwaite p-values:
override.pval=c(list(summary(modelPitch1A)$coefficients[,'Pr(>|t|)'],summary(modelPitch2A)$coefficients[,'Pr(>|t|)']))
# (warning for SE can be ignored--SEs in lmertest are identical)
)
sink()
#
# models for word C
#
modelDuration1C=lmer(data=filter(dd1,Position=='C'),
syllable_duration~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelDuration1C)
modelDuration2C=lmer(data=filter(dd2,Position=='C'),
syllable_duration~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelDuration2C)
sink("../Paper/Models/modelsDurationC.tex", append=FALSE, split=FALSE)
texreg(list(modelDuration1C,modelDuration2C),
label="modelDurationC",
custom.model.names=c("Initial","Final"),
naive=TRUE,single.row = T,
include.aic=F,
include.deviance=F,
include.bic=F,
include.loglik=F,
include.variance=F,
dcolumn=T,
include.nobs=F,
include.groups=F,
caption = "Mixed Effects Regression Models for the duration of word C (estimate in sec, SE in parentheses).",
use.packages=F,float.pos="h!",fontsize = "footnotesize",
# base stars on lmertest Sattersthwaite p-values:
override.pval=c(list(summary(modelDuration1C)$coefficients[,'Pr(>|t|)'],summary(modelDuration2C)$coefficients[,'Pr(>|t|)']))
# (warning for SE can be ignored--SEs in lmertest are identical)
)
sink()
#
# intensity model
#
modelIntensity1C=lmer(data=filter(dd1,Position=='C'),
Mean_Intensity~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelIntensity1C)
modelIntensity2C=lmer(data=filter(dd2,Position=='C'),
Mean_Intensity~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelIntensity2C)
sink("../Paper/Models/modelsIntensityC.tex", append=FALSE, split=FALSE)
texreg(list(modelIntensity1C,modelIntensity2C),
label="modelIntensityC",
custom.model.names=c("Initial","Final"),
naive=TRUE,single.row = T,
include.aic=F,
include.deviance=F,
include.bic=F,
include.loglik=F,
include.variance=F,
dcolumn=T,
include.nobs=F,
include.groups=F,
caption = "Mixed Effects Regression Models for the mean intensity of word C (estimate in dB, SE in parentheses).",use.packages=F,float.pos="h!",fontsize = "footnotesize",
# base stars on lmertest Sattersthwaite p-values:
override.pval=c(list(summary(modelIntensity1C)$coefficients[,'Pr(>|t|)'],summary(modelIntensity2C)$coefficients[,'Pr(>|t|)']))
# (warning for SE can be ignored--SEs in lmertest are identical)
)
sink()
#
# pitch model
#
modelPitch1C=lmer(data=filter(dd1,Position=='C'),
Max_F0~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelPitch1C)
modelPitch2C=lmer(data=filter(dd2,Position=='C'),
Max_F0~
(Broad.vs.Narrow+First.vs.Late+Second.vs.Third)*
Decl.vs.Inter*Left.vs.Right+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||itemOriginal)+
((Broad.vs.Narrow+First.vs.Late+Second.vs.Third)+
Decl.vs.Inter+Left.vs.Right||participant),
)
summary(modelPitch2C)
sink("../Paper/Models/modelsPitchC.tex", append=FALSE, split=FALSE)
texreg(list(modelPitch1C,modelPitch2C),
label="modelPitchC",
custom.model.names=c("Initial","Final"),
naive=TRUE,single.row = T,
include.aic=F,
include.deviance=F,
include.bic=F,
include.loglik=F,
include.variance=F,
dcolumn=T,
include.nobs=F,
include.groups=F,
caption = "Mixed Effects Regression Models for the mean F$_0$ of word C (estimate in Hz, SE in parentheses).",use.packages=F,float.pos="h!",fontsize = "footnotesize",
# base stars on lmertest Sattersthwaite p-values:
override.pval=c(list(summary(modelPitch1)$coefficients[,'Pr(>|t|)'],summary(modelPitch2)$coefficients[,'Pr(>|t|)']))
# (warning for SE can be ignored--SEs in lmertest are identical)
)
sink()
|
f07e3568ec1e01f6bbf52320dc94c67195d694de | 874b22613ad34cc0cedeaca5002c7a9a58eba3c2 | /plot3.R | eea7b4e110d78d3adab548c038db05c0587a9f9f | [] | no_license | urubatan-pacheco/ExData_Plotting1 | 78b88a9643309b5ccc91b19b866b3779606dfe6d | 1c28fc681df57b441907c0b6ccf4b9c28eff7f34 | refs/heads/master | 2020-12-13T20:49:46.351723 | 2015-06-04T02:19:23 | 2015-06-04T02:19:23 | 36,840,520 | 0 | 0 | null | 2015-06-04T01:29:02 | 2015-06-04T01:29:02 | null | UTF-8 | R | false | false | 1,793 | r | plot3.R | library(sqldf)
library(gsubfn)
library(dplyr)
library(lubridate)
# Loads rows with Date between '2007-02-01' and '2007-02-02'
query <- "select * from file where substr(Date, -4) || '-' || substr('0' || replace(substr(Date, instr(Date, '/') + 1, 2), '/', ''), -2) || '-' || substr('0' || replace(substr(Date, 1, 2), '/', ''), -2) between '2007-02-01' and '2007-02-02'"
data_file <- "/media/My Passport/work/materia/ds_eda/project/p1/household_power_consumption.txt"
hpc_df <- read.csv.sql(data_file,
sql= query,
stringsAsFactors=FALSE,
sep = ";", header = TRUE)
hpc_tb_df <- ( hpc_df
%>% mutate(datetime = dmy_hms(paste(hpc_df$Date, hpc_df$Time)))
%>% select(-Date,-Time))
op_restore <- par(no.readonly = TRUE) # the whole list of settable par's.
png(file="./data/figure/plot3.png", width = 480, height = 480, units = "px" )
plot(rep(hpc_tb_df$datetime,3),
c(hpc_tb_df$Sub_metering_1,
hpc_tb_df$Sub_metering_2,
hpc_tb_df$Sub_metering_3),
type = "n", xlab = "",
ylab = "Energy sub metering",
bg = "transparent",
ylim = range(pretty(c(0,40))),
yaxt = "n"
)
axis(2, seq(0,30,10) )
lines(hpc_tb_df$datetime, hpc_tb_df$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black", bg = "transparent")
lines(hpc_tb_df$datetime, hpc_tb_df$Sub_metering_2, type = "l", xlab = "", ylab = "Energy sub metering", col = "red", bg = "transparent")
lines(hpc_tb_df$datetime, hpc_tb_df$Sub_metering_3, type = "l", xlab = "", ylab = "Energy sub metering", col = "blue", bg = "transparent")
legend("topright", col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd = 2)
dev.off()
|
aa15430c188d171f5f3b2137aa58a4f258a110e4 | e0051b6791a5fc11d75746f58968bbfbe6183e73 | /man/url_character.Rd | 6363c98ea8714607d7b3d2379284a6c19b1f8358 | [] | no_license | LuffyLuffy/baidumap | 1d735a634acc6b2c14922a073c98495a6b167c5d | 59656d2ff6d0e2749adb53032ca81d5f374860b0 | refs/heads/master | 2022-11-22T07:57:07.015625 | 2020-07-22T10:22:32 | 2020-07-22T10:22:32 | 281,645,479 | 4 | 0 | null | 2020-07-22T10:20:49 | 2020-07-22T10:20:48 | null | UTF-8 | R | false | true | 542 | rd | url_character.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getPlace.R
\name{url_character}
\alias{url_character}
\title{Transform the query character to raw character
Take in query and city, return the informations}
\usage{
url_character(x)
}
\arguments{
\item{a}{character}
}
\value{
raw character with %. It's used in getPlace.
}
\description{
Transform the query character to raw character
Take in query and city, return the informations
}
\examples{
\dontrun{
url_character('北京')
# "\%e5\%8c\%97\%e4\%ba\%ac"
}
}
|
534cd631f56a6672245c46cb3308d83cf1d2879f | bf1d99664cde8686366907d92829b8d395ec0f6f | /Tree_Plot_Function.R | 3f74f57d13018d146dd4b5c7a4108f67f3ca07ee | [] | no_license | csheehan3/Sheehan_Biocomputing_Final | 30bd8217b4dba2130828379fca98e9733ae6e908 | 92de188d01b07ca0321101a8c83bb11dfca14a25 | refs/heads/master | 2021-04-08T13:05:25.493824 | 2020-04-02T22:14:14 | 2020-04-02T22:14:14 | 248,778,512 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,029 | r | Tree_Plot_Function.R | Tree_Plot_Function <- function(the_dataframe, the_gene, the_GO_term){
if(!exists("GO_metabolic_list", mode = "list")){
print("Warning: need to load GO list for this function")
load("GO_metabolic_list.Rdata")
}
BACH1_row <- filter(the_dataframe, the_dataframe$entrez_dictionary==the_gene) %>%
dplyr::select(matches("TCGA")) %>%
as.numeric()
filtered_frame <- filter(the_dataframe, the_dataframe$entrez_dictionary %in% GO_metabolic_list[[the_GO_term]]) ##filters frame to only genes part of this GO term
GO_single_spearman_values <- c()
GO_single_p_values <- c()
for (n in 1:nrow(filtered_frame)){ ##runs correlations for each gene in set against the gene-of-interest
single_gene_row <- filtered_frame[n,] %>%
dplyr::select(matches("TCGA")) %>%
as.numeric()
correlation_value <- cor.test(BACH1_row, single_gene_row, method=c("spearman"))
GO_single_spearman_values <- c(GO_single_spearman_values, correlation_value$estimate)
GO_single_p_values <- c(GO_single_p_values, correlation_value$p.value)
}
hgnc_conversion <- getBM(attributes = c('entrezgene_id', ###get all the gene symbols for the genes of the GO set for plotting
'hgnc_symbol'),
filters = 'entrezgene_id',
values = filtered_frame$entrez_dictionary,
mart = mart)
OXPHOS_tibble <- tibble("Gene_Name"=hgnc_conversion$hgnc_symbol, "Spearman_Coefficient"=GO_single_spearman_values, "P-Value"=GO_single_p_values)
OXPHOS_tibble <- arrange(OXPHOS_tibble, OXPHOS_tibble$`P-Value`)
####Generate the Tree Plots for each of the interesting terms
ggplot(OXPHOS_tibble, aes(x=reorder(OXPHOS_tibble$Gene_Name, OXPHOS_tibble$`P-Value`), y=OXPHOS_tibble$Spearman_Coefficient)) +
geom_bar(stat="identity" , width=0.3, col="red") +
theme_light() +
theme(axis.text=element_text(size=3)) +
labs(title="BACH1 correlations with OXPHOS Gene Set", y="Spearman Coefficient", x="P-Value")
}
|
e7c4197f4f84367ec82cafb17b3b6578c1e635b1 | 4af685718ce2c5f45a9b82e2f0e6527c17c97f5c | /R/ggplot2scales.R | fd911dd3065ddf950a27b4fb4bf4ce0c1330d68c | [] | no_license | liston/palettetown | c98684c62bd16c32c5569136954c7cf491586c0d | 4f772f78578d2ae9167ab76024b7ec8886d4567e | refs/heads/master | 2022-02-17T13:49:04.482462 | 2017-09-08T10:33:02 | 2017-09-08T10:33:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,316 | r | ggplot2scales.R |
#' Add a pokemon palette to a ggplot2 colour or fill scale.
#'
#' Get a pokemon palette by either giving a pokemon number or name.
#'
#'@inheritParams pokepal
#'@param ... Other arguments passed on to \code{discrete_scale} to control
#' name, limits, breaks, labels and so forth.
#'
#'@name scale_colour_poke
#'@details If \code{spread} is given an integer, the full palette is
#' clustered into that many groups (ward clustering in HSV space).
#' The most common colour in each cluster is then returned. It is
#' hoped this will give a good balance between reflecting the pokemons
#' colouring while giving relatively distinct colours.
#'@examples
#'library(ggplot2)
#'qplot(Sepal.Length, Sepal.Width, colour = Species, data=iris) +
#' scale_colour_poke(pokemon = 'Metapod')
#'@rdname scale_colour_poke
#'@export
scale_colour_poke <- function(..., pokemon = 1, spread = NULL){
ggplot2::scale_colour_manual(..., values = pokepal(pokemon, spread))
}
#'@rdname scale_colour_poke
#'@export
scale_fill_poke <- function(..., pokemon = 1, spread = NULL){
ggplot2::scale_fill_manual(..., values = pokepal(pokemon, spread))
}
#'@rdname scale_colour_poke
#'@export
scale_color_poke <- function(..., pokemon = 1, spread = NULL){
ggplot2::scale_color_manual(..., values = pokepal(pokemon, spread))
}
|
ece8fe8d4ed2602a4bad1b64503f4ad5cbe7bec2 | fee345e168c62f95576e24326f75463f5b349dd0 | /combine.R | 15038b70514c71b127295787c134c25df20f5cf4 | [] | no_license | ryanburge/tags_twitter | 1df03507d3521a191d339495bb363ccf97f681e9 | ab08060614388a237f8f3ffb3ca955ea6d73a194 | refs/heads/master | 2021-01-18T20:02:57.378195 | 2018-12-17T17:00:30 | 2018-12-17T17:00:30 | 86,930,310 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,404 | r | combine.R | library(tidyverse)
library(tidytext)
library(lubridate)
library(stringr)
library(httr)
library(dplyr)
library(wordcloud2)
library(extrafont)
library(scales)
tweets_dec <- read.csv("tweets_dec.csv")
tweets_dec <- tweets_dec %>%
mutate(date = dmy_hms(time)) %>% mutate(day = as.Date(date))
count_tweets_dec <- tweets_dec %>% group_by(day) %>% count()
tweets <- read.csv("tweets.csv")
tweets <- tweets %>%
mutate(date = dmy_hms(time)) %>% mutate(day = as.Date(date))
count_tweets <- tweets %>% group_by(day) %>% count()
t_count <- bind_rows(count_tweets, count_tweets_dec) %>% mutate(date = factor(day))
bar_rb <- function(base_size = 25, base_family = "IBM Plex Serif")
{theme(legend.position = "bottom",
legend.title = element_blank(),
legend.spacing.x = unit(1, "cm"),
legend.spacing.y = unit(1, "cm"),
panel.background = element_rect(fill = "white"),
panel.grid.major.y = element_line(colour = "gray48", size = .25),
panel.grid.minor.y = element_line(colour = "gray48", size = .25, linetype = "dashed"),
text = element_text(base_family, size = 28),
plot.title = element_text(family = "IBM Plex Serif", size = 40, vjust =2, face = "bold"),
plot.subtitle = element_text(family = "IBM Plex Serif", size = 20, vjust =-1),
plot.caption = element_text(family = "IBM Plex Serif", size =20),
axis.title.x = element_text(family = "IBM Plex Serif", size =32),
axis.title.y = element_text(family = "IBM Plex Serif", size =32),
axis.text.x = element_text(family = "IBM Plex Serif", size =24, angle = 45, hjust = 1)
)
}
t_count %>%
na.omit() %>%
ggplot(., aes(x=as.factor(day), y=n)) +
geom_col(fill = "cornflowerblue", color = "black") +
bar_rb() +
# scale_x_date(breaks = date_breaks("weeks"), labels = date_format("%b. %d")) +
labs(x= "Date", y = "Number of Tweets", title = "Explosion in the Use of 'Evangelical' on Twitter") +
geom_vline(xintercept = 38.45, linetype = "dashed", color = "red", size = 2) +
geom_rect(data=NULL,aes(xmin=38.45,xmax=44.5,ymin=0,ymax=Inf), fill="gray74", alpha = 0.015)
ggsave(file="D://tags_twitter/count_day_compare_small.png", type = "cairo-png", width = 18, height = 15)
reg_words <- "([^A-Za-z_\\d#@']|'(?![A-Za-z_\\d#@]))"
tidy_tweets_dec <- tweets_dec %>%
filter(!str_detect(text, "^RT")) %>%
mutate(text = str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
unnest_tokens(word, text, token = "regex", pattern = reg_words) %>%
filter(!word %in% stop_words$word,
str_detect(word, "[a-z]"))
tidy_tweets <- tweets %>%
filter(!str_detect(text, "^RT")) %>%
mutate(text = str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
unnest_tokens(word, text, token = "regex", pattern = reg_words) %>%
filter(!word %in% stop_words$word,
str_detect(word, "[a-z]"))
a1 <- tidy_tweets %>%
inner_join(get_sentiments("bing")) %>%
count(day, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative)
a2 <- tidy_tweets_dec %>%
inner_join(get_sentiments("bing")) %>%
count(day, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative)
sent <- bind_rows(a1, a2)
sent <- sent %>%
mutate(group = c("Feb. - Mar.", "Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.","Feb. - Mar.", "December", "December", "December","December","December","December" ))
sent$group_f = factor(sent$group, levels=c('Feb. - Mar.','December'))
ggplot(sent, aes(x=day, y=sentiment)) +
geom_col(fill = "red4", color = "black") +
bar_rb() +
# scale_x_date(breaks = date_breaks("weeks"), labels = date_format("%b. %d")) +
labs(x= "Date", y = "Total Daily Sentiment", title = "The Sentiment of Tweets Containing 'Evangelical'", subtitle = "Using the 'Bing' Lexicon") +
facet_grid(~group_f, scale = "free_x")
ggsave(file="D://tags_twitter/sentiment_compare_facet_small.png", type = "cairo-png", width = 18, height = 15)
positive <- tidy_tweets_dec %>%
inner_join(get_sentiments("bing")) %>%
filter(sentiment == "positive") %>% count(word) %>% arrange(-n)
negative <- tidy_tweets_dec %>%
inner_join(get_sentiments("bing")) %>%
filter(sentiment == "negative") %>% count(word) %>% arrange(-n)
pos <- positive %>%
filter(n >150) %>%
ggplot(., aes(x=reorder(word,n), y=n)) +
geom_col(color = "black", fill = "darkorchid") +
coord_flip()+
flip_bar_rb() +
labs(x="", y="", title = "Most Common Positive Words")
neg <- negative %>%
filter(n >150) %>%
ggplot(., aes(x=reorder(word,n), y=n)) +
geom_col(color = "black", fill = "black") +
coord_flip()+
flip_bar_rb() +
labs(x="", y="", title = "Most Common Negative Words")
pos + neg
ggsave(file="D://tags_twitter/sentiment_compare_words.png", type = "cairo-png", width = 18, height = 15)
|
cf42c0a65ee1e5f5c3dc0982f26914d2b373a812 | be8d70e60dd86be6f9f3b2e33d71226763053f6f | /Rpackage/man/plot_nyc_web_images.Rd | ed6387e75978eb22908cadbf9938feea4adcd513 | [
"MIT"
] | permissive | yoni/insta-sound | 0101c9d6352eea853b7655dc1a8d3a2fcd9a9b73 | c9ee4c6be98b065052a566ee9852e8fc0f463ae6 | refs/heads/master | 2021-01-18T07:15:01.917677 | 2014-01-27T00:00:34 | 2014-01-27T00:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 359 | rd | plot_nyc_web_images.Rd | \name{plot_nyc_web_images}
\alias{plot_nyc_web_images}
\title{Generates nyc plots.}
\usage{
plot_nyc_web_images(posts, path)
}
\arguments{
\item{posts}{instagram posts for all neighborhoods}
\item{path}{path in which to store the plots}
}
\description{
Generates nyc plots.
}
\examples{
data(posts_sample)
plot_nyc_web_images(posts_sample, tempdir())
}
|
d6cee00577f5504a7b690b431520b8f19c579d7b | 819a5f52b0bb3be25156257c214b0b18f8c2612c | /R/jhu_data.R | 63e82e8af37c3beb225e1ae85ebc2b0791a9f219 | [
"MIT"
] | permissive | kotliary/sars2pack | f605a81004535e382ee411a89fcae61af9753ee7 | 43abdafcc73ccbfb95c23b27cc20d3aadfd1fd9e | refs/heads/master | 2022-04-21T15:04:23.176685 | 2020-04-14T17:28:54 | 2020-04-14T17:28:54 | 255,723,912 | 1 | 0 | NOASSERTION | 2020-04-14T20:54:44 | 2020-04-14T20:54:44 | null | UTF-8 | R | false | false | 6,908 | r | jhu_data.R | # THIS CODE IS MODIFIED FROM MOREFIELD/MALLERY WITH SOME ADDITIONAL OPTIONS
# ORIGINAL CODE IS IN SOURCE PACKAGE sars2pack/inst/original
# RUN PATTERN DEVELOPED BY C. MOREFIELD and ABSTRACTED by J. Mallery
#' simple function to munge JHU data into long-form tibble
#'
#' This function takes one of three subsets--confirmed,
#' deaths, recovered--and munges.
#'
#' @param subset character(1) of Confirmed, Deaths, Recovered
#'
#' @importFrom readr read_csv cols
#' @importFrom tidyr pivot_longer
#'
#' @return a long-form tibble
#'
#' @keywords internal
.munge_data_from_jhu <- function(subset) {
stopifnot(
subset %in% c('confirmed', 'deaths', 'recovered')
)
url = sprintf("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_%s_global.csv", subset)
rpath = s2p_cached_url(url)
csv = readr::read_csv(rpath, col_types=cols(), guess_max=5000)
csv = tidyr::pivot_longer(csv,-c('Province/State','Country/Region','Lat','Long'), names_to = 'date', values_to='count')
names(csv)[1] <- "ProvinceState"
names(csv)[2] <- "CountryRegion"
csv$subset = tolower(subset)
return(csv)
}
#' Global COVID-19 data from [JHU CSSEGIS](https://github.com/CSSEGISandData/COVID-19/)
#'
#' This function access and munges the cumulative time series confirmed,
#' deaths and recovered from the data in the repository for the 2019 Novel Coronavirus Visual
#' Dashboard operated by the Johns Hopkins University Center for
#' Systems Science and Engineering (JHU CSSE). Also, Supported by ESRI
#' Living Atlas Team and the Johns Hopkins University Applied Physics
#' Lab (JHU APL).
#'
#' @details
#' Data are updated daily by JHU. Each call to this function redownloads the data
#' from github. No data cleansing is performed. Data are downloaded and then munged
#' into long-form tidy `data.frame`.
#'
#' @importFrom dplyr bind_rows
#' @importFrom lubridate mdy
#'
#' @note Uses https://raw.githubusercontent.com/CSSEGISandData/... as data
#' source, then modifies column names and munges to long form table.
#'
#' @return
#' A tidy `data.frame` (actually, a `tbl_df`) with columns:
#'
#' - ProvinceState: Province or state. **Note**:
#' - CountryRegion: This is the main column for finding countries of interest
#' - Lat: Latitude
#' - Long: Longitude
#' - date: Date
#' - count: The cumulative count of cases for a given geographic area.
#' - subset: either `confirmed`, `deaths`, or `recovered`.
#'
#' @note
#'
#' - US States are treated different from other countries, so are not directly included right now.
#' - Although numbers are meant to be cumulative, there are instances where a day's count might
#' be less than the prior day due to a reclassification of a case. These are not currently corrected
#' in the source data
#'
#' @examples
#' res = jhu_data()
#' colnames(res)
#' head(res)
#' glimpse(res)
#'
#' @source
#' - \url{https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series,mGT, method=c('EG','TD'))}
#'
#' @family data-import
#'
#' @export
jhu_data <- function() {
res = dplyr::bind_rows(lapply(c('confirmed', 'deaths', 'recovered'), .munge_data_from_jhu))
res$date = lubridate::mdy(res$date)
return(res)
}
#' simple function to munge JHU US counties data into long-form tibble
#'
#' This function takes one of two subsets--confirmed,
#' deaths--and munges.
#'
#' @param subset character(1) of Confirmed, Deaths
#'
#' @importFrom readr read_csv cols
#' @importFrom tidyr pivot_longer
#'
#' @return a long-form tibble
#'
#' @keywords internal
.munge_us_data_from_jhu <- function(subset) {
stopifnot(
subset %in% c('confirmed', 'deaths')
)
csv = readr::read_csv(url(sprintf("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_%s_US.csv", subset)),col_types=cols())
if (is.null(csv[["Population"]])) csv[["Population"]] <- NA_integer_
csv = tidyr::pivot_longer(csv,-c("UID", "iso2", "iso3", "code3", "FIPS", "Admin2", "Province_State", "Country_Region", "Lat", "Long_", "Combined_Key", "Population"), names_to = 'date', values_to='count')
names(csv)[names(csv)=='FIPS'] <- 'fips'
csv$fips = integer_to_fips(csv$fips)
names(csv)[names(csv)=='Admin2'] <- 'county'
names(csv)[names(csv)=='Province_State'] <- 'state'
names(csv)[names(csv)=='Country_Region'] <- 'country'
names(csv)[names(csv)=='Long_'] <- "Long"
csv$subset = tolower(subset)
return(csv)
}
#' US counties COVID-19 data from [JHU CSSEGIS](https://github.com/CSSEGISandData/COVID-19/)
#'
#' This function access and munges the cumulative time series of confirmed,
#' and deaths from the US data in the repository for the 2019 Novel Coronavirus Visual
#' Dashboard operated by the Johns Hopkins University Center for
#' Systems Science and Engineering (JHU CSSE). Also, Supported by ESRI
#' Living Atlas Team and the Johns Hopkins University Applied Physics
#' Lab (JHU APL).
#'
#' @details
#' Data are updated daily by JHU. Each call to this function redownloads the data
#' from github. No data cleansing is performed. Data are downloaded and then munged
#' into long-form tidy `data.frame`.
#'
#' @importFrom dplyr bind_rows
#' @importFrom lubridate mdy
#'
#' @note Uses https://raw.githubusercontent.com/CSSEGISandData/... as data
#' source, then modifies column names and munges to long form table.
#'
#' @return
#' A tidy `data.frame` (actually, a `tbl_df`) with columns:
#'
#' - UID: Universal Identifier
#' - iso2: ISO 3166-1 alpha-2 code
#' - iso3: ISO 3166-1 alpha-3 code
#' - code3
#' - FIPS: Federal Information Processing Standard Publication code
#' - Admin2: County
#' - ProvinceState: Province or state.
#' - CountryRegion: US
#' - Lat: Latitude
#' - Long_: Longitude
#' - Combined_Key: Comma-separated combination of columns `Admin2`, `ProvinceState`, and `CountryRegion`
#' - date: Date
#' - count: The cumulative count of cases for a given geographic area.
#' - subset: either `confirmed` or `deaths`
#'
#' @note
#'
#' - US States are treated different from other countries, so are not directly included right now.
#' - Although numbers are meant to be cumulative, there are instances where a day's count might
#' be less than the prior day due to a reclassification of a case. These are not currently corrected
#' in the source data
#'
#' @examples
#' res = jhu_data()
#' colnames(res)
#' head(res)
#'
#' @source
#' - \url{https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series,mGT, method=c('EG','TD'))}
#'
#' @family data-import
#'
#' @export
jhu_us_data <- function() {
res = dplyr::bind_rows(lapply(c('confirmed', 'deaths'), .munge_us_data_from_jhu))
res$date = lubridate::mdy(res$date)
return(res)
}
|
93e2846003f28dc2554ccfae7cf23c1adfc84f64 | e5bc58f03acbcbad4bcfadf58f242769623679c8 | /plot1.R | cec7af01ff06469b7f2c646e2843b7a7df69cf35 | [] | no_license | chrisgs77/ExData_Plotting1 | 860a24622bb55af6b9c8049400d4c5f2d18e2550 | 830f168633554d1580219bea6b563f8bcd379912 | refs/heads/master | 2022-05-24T23:04:47.682563 | 2020-04-29T15:41:49 | 2020-04-29T15:41:49 | 259,937,478 | 0 | 0 | null | 2020-04-29T13:47:33 | 2020-04-29T13:47:32 | null | UTF-8 | R | false | false | 649 | r | plot1.R |
#download data then read
#note that the file is semicolon (;) separated with ? as NA
plot_data<-read.table(file="household_power_consumption.txt",
header=TRUE,sep=';',stringsAsFactors = F,na.strings = "?")
#convert to date format
plot_data$Date<-as.Date(plot_data$Date,"%d/%m/%Y")
#subset to 2/1/2007 and 2/2/2007
plot_sub<-subset(plot_data,Date>=as.Date("2/1/2007","%m/%d/%Y")
& Date<=as.Date("2/2/2007","%m/%d/%Y"))
# histogram plot 1
png(filename="plot1.png")
hist(plot_sub$Global_active_power,col='red',main="Global Active Power",
xlab="Global Active Power (kilowatts)",ylim=c(0,1200))
dev.off()
|
759f5604f167abb4cf90e1e89eb01942ee274237 | 6ab308e123a091936a86ca70f98372ccba2603b0 | /Session5/Session 4.R | 71a064e4ff54d0d47b6c3a04817d9e30ff9243ce | [] | no_license | laura-green/RepoLauraProgramming | 3089f86734716f3b0d260e38851424f3e8fd6adf | 035812fc1ad7f4494ee24732dbb2d8fcd8d50907 | refs/heads/master | 2020-04-15T00:49:40.211864 | 2016-12-01T16:52:49 | 2016-12-01T16:52:49 | 68,156,971 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,042 | r | Session 4.R | # Origin: Code for week 4 of programming class.
# Author: Laura Green
# Title: week_4_code.ipynb
# Last modified: 2016.10.05
# Load the package
# NOTE: Uncomment this line if need to install package
#install.packages("MASS", repos="http://cran.rstudio.com/")
require(MASS)
# Set the correlation parameter and mean
beta = 0.5
SIGMA = matrix(c(1,beta,beta,1), ncol=2)
MU = c(2.0, 1.0)
# Set the sample size
N = 50
# Draw your sample
out <- mvrnorm(N, mu = MU, Sigma = SIGMA)
# Look at a section of the data
dim(out)
out[1:10,]
# Plot the random variables in the x-y plane
plot(out)
# Add a regression line
plot(out)
abline(lm(out[,2]~out[,1]), col="red") # regression line (y~x)
# Our data set is named `out`, which we split into y and X
y <- out[, 2]
X <- out[, 1]
# Now carry out intermediate calculations
XT = t(X)
XTX = XT%*%X
invXTX = solve(XTX)
XTy = XT%*%y
beta = invXTX %*% XTy
beta
# Now add this line to the plot
plot(out)
abline(lm(out[,2]~out[,1]), col="red") # regression line (y~x)
abline(a=0, b=beta, col="blue")
|
b2b335906062923cadca3943f563edbfbe0aa499 | f32dbf645fa99d7348210951818da2275f9c3602 | /R/GLUEseisMAT.R | 2badccefd78e4b24892d92ce9bf02d644cc1c831 | [] | no_license | cran/RSEIS | 68f9b760cde47cb5dc40f52c71f302cf43c56286 | 877a512c8d450ab381de51bbb405da4507e19227 | refs/heads/master | 2023-08-25T02:13:28.165769 | 2023-08-19T12:32:32 | 2023-08-19T14:30:39 | 17,713,884 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 497 | r | GLUEseisMAT.R | `GLUEseisMAT` <-
function(GFIL)
{
### find duplicated stations in a matrix and
### fill in the traces that are continuations
### return the new matrix and the vector duplicates
dot = which(duplicated(GFIL$KNOTES))
G = GFIL$JMAT
for(i in 1:length(dot))
{
w = which(!is.na(match(GFIL$KNOTES, GFIL$KNOTES[dot[i]])))
a = G[,w[1]]
a[!is.na(G[,w[2]])] = G[!is.na(G[,w[2]]), w[2]]
G[,w[1]] = a
}
invisible(list(JMAT=G, dpl=dot) )
}
|
0142bf1830d4ff46cabeac8a7286b4a3eb67c7e2 | 02bf4177ad6159a427ec0a95850fa65b05e9af77 | /confinterval_tstudent_02.R | df980d2f45699b60ed1436424f77553e0f163917 | [] | no_license | vcwild/statinference | f6312e1dce8f6af23412f69f2c1bedf868bec4d9 | 0f17f35864865f35e3dd16d1a56a54a417431221 | refs/heads/master | 2022-07-12T18:18:28.751742 | 2020-05-13T00:08:25 | 2020-05-13T00:08:25 | 263,422,234 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 505 | r | confinterval_tstudent_02.R | "A diet pill is given to 9 subjects over six weeks.
The average difference in weight (follow up - baseline) is -2 pounds.
What would the standard deviation of the difference in weight have to be
for the upper endpoint of the 95% T confidence interval to touch 0?"
n = 9 # subjects
t = 6 # weeks
xbar = -2
mu = 0
p = 0.975 #both sides 2.5%
# mu = xbar +/- t_n-1 * s/sqrt(n)
# mu - xbar = t_n-1 * s/sqrt(n)
# s = (mu - xbar) * sqrt(n) / t_(n-1)
s <- (mu - xbar)*sqrt(n)/qt(p,df = n - 1)
#>[1] 2.601903
|
f760574fb18f7dcd8ca7ffde4737e053094fc473 | ec2d6f790c243428084c6c8f708955e31129a431 | /man/odds_ratio_test_description.Rd | 3d41bde3c5b58d1d3ad7e36b5d4aff7bdd93c97f | [] | no_license | jaropis/shiny-tools | a221a279c600ca46d3f73620dab80018329579fa | b3d4fdda883585e562d030adf8ac307907d5e8d7 | refs/heads/master | 2023-03-15T03:53:49.461990 | 2021-03-20T12:08:37 | 2021-03-20T12:08:37 | 220,004,701 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 457 | rd | odds_ratio_test_description.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constants.R
\name{odds_ratio_test_description}
\alias{odds_ratio_test_description}
\title{adds the description of the RunTest_Fisher}
\usage{
odds_ratio_test_description(language)
}
\arguments{
\item{accepts}{the language in which the app will be written}
}
\value{
the information line for the program RunTest_Fisher
}
\description{
adds the description of the RunTest_Fisher
}
|
85089a94758b69bf3b2d6f69ba7914575699e4a8 | bee3492da4b235152794f3ec17485bc854f362a7 | /cgatshowcase/R/counts2tpm.R | 0eab417bfbe4f0918891fd1c59dab8cf1881992f | [
"MIT"
] | permissive | cgat-developers/cgat-showcase | e62547dee9967d69d21b0bf9c9ef8b972e50e595 | 1bb08796012be1859286e60483407bc34b4b8158 | refs/heads/master | 2020-03-30T20:15:09.053929 | 2019-03-11T13:32:02 | 2019-03-11T13:32:02 | 151,579,954 | 0 | 0 | NOASSERTION | 2019-02-21T16:07:34 | 2018-10-04T13:54:49 | Python | UTF-8 | R | false | false | 2,222 | r | counts2tpm.R | library(optparse)
option_list <- list(
make_option(c("--counts"), default="must_specify",
help="Specify a counts table to convert to tpm's"),
make_option(c("--genome"), default="must_specify",
help="Specify a genome name in ensembl convention"),
make_option(c("--meanfraglength"), default="must_specify",
help="Specify a mean fragment length of your libaries"),
make_option(c("--effectivelength"), default="must_specify",
help="The effective lengths from kallisto"))
opt <- parse_args(OptionParser(option_list=option_list))
print("Running with the following options:")
print(opt)
effectivelength <- read.csv(opt$effectivelength)
rownames(effectivelength) <- effectivelength$X
effectivelength$X <- NULL
effectivelength <- rowMeans(effectivelength)
################################
# tpm function
################################
# function from github gist https://gist.github.com/slowkow/c6ab0348747f86e2748b
counts_to_tpm <- function(counts, featureLength, meanFragmentLength) {
# Ensure valid arguments.
stopifnot(length(featureLength) == nrow(counts))
stopifnot(length(meanFragmentLength) == ncol(counts))
# Compute effective lengths of features in each library.
effLen <- do.call(cbind, lapply(1:ncol(counts), function(i) {
featureLength - meanFragmentLength[i] + 1
}))
# Exclude genes with length less than the mean fragment length.
idx <- apply(effLen, 1, function(x) min(x) > 1)
counts <- counts[idx,]
effLen <- effLen[idx,]
featureLength <- featureLength[idx]
# Process one column at a time.
tpm <- do.call(cbind, lapply(1:ncol(counts), function(i) {
rate = log(counts[,i]) - log(effLen[,i])
denom = log(sum(exp(rate)))
exp(rate - denom + log(1e6))
}))
# Copy the row and column names from the original matrix.
colnames(tpm) <- colnames(counts)
rownames(tpm) <- rownames(counts)
return(tpm)
}
counts <- read.csv(opt$counts)
rownames(counts) <- counts$X
counts$X <- NULL
# calculating tpm values assuming a
tpm <- counts_to_tpm(counts, effectivelength, rep(as.numeric(opt$meanfraglength), length(colnames(counts))))
# output the tpm as a table
write.table(tpm, "DEresults.dir/tpm.tsv", sep="\t")
|
5956d03b2da7401dfc0350ac1deb325a1a8a4ebb | 28540d37a1aa353193e6573391d27a98c9c378c9 | /MechaCarChallenge.RScript.R | f82438e6b025a8acf9de1293a92b0b52af767505 | [] | no_license | linb960/MechaCar_Statistical_Analysis | 8de594fa1fdcceb756c1ca813deb1b99d0e86016 | 88ccaf55e140ebaac103d206f65657ef94d6ee2f | refs/heads/main | 2023-06-04T14:58:54.374135 | 2021-06-22T20:38:32 | 2021-06-22T20:38:32 | 379,079,091 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,188 | r | MechaCarChallenge.RScript.R | # Deliverable 1
library(dplyr)
MechaCar_table <- read.csv(file='MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
head(MechaCar_table)
lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD ,data=MechaCar_table) #generate multiple linear regression model
summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data=MechaCar_table))
# Deliverable 2
SuspensionCoil_table <- read.csv(file='Suspension_Coil.csv',check.names=F,stringsAsFactors = F)
head(SuspensionCoil_table)
total_summary <- SuspensionCoil_table %>% summarize(Mean = mean(PSI), Median = median(PSI), Variance = var(PSI), SD = sd(PSI), .groups = 'keep')
total_summary
lot_summary <- SuspensionCoil_table %>% group_by(Manufacturing_Lot) %>% summarize(Mean = mean(PSI), Median = median(PSI), Variance = var(PSI), SD = sqrt(var(PSI)), .groups = 'keep')
lot_summary
# Deliverable 3
t.test(SuspensionCoil_table$PSI,mu=1500)
t.test(subset(SuspensionCoil_table, Manufacturing_Lot=="Lot1")$PSI,mu=1500)
t.test(subset(SuspensionCoil_table, Manufacturing_Lot=="Lot2")$PSI,mu=1500)
t.test(subset(SuspensionCoil_table, Manufacturing_Lot=="Lot3")$PSI,mu=1500)
|
f62ad87532e9d23b1a9d64755997ac0f4e2acfd1 | f1897fae82edc098385a75d60ee934691a1eddcd | /binomial/tests/testthat/test-check.R | 21b19db1d0224c9745a17fd5c065eb51693fe330 | [] | no_license | stat133-sp19/hw-stat133-Zehao1006 | 6bb9367077e24ee8926dd0af78c38314ba2767df | 17bf42c3b0d93d854fb56d3b46a8f85b3b8efe14 | refs/heads/master | 2020-04-28T14:52:57.081374 | 2019-05-01T02:23:01 | 2019-05-01T02:23:01 | 175,352,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 641 | r | test-check.R | context('check for checkers')
test_that("check_prob() works as expected",{
expect_error(check_prob(2))
expect_error(check_prob(c(0.5,0.3)))
expect_error(check_prob('a'))
expect_error(check_prob(TRUE))
})
test_that("check_trials() works as expected",{
expect_error(check_trials(-2))
expect_error(check_trials(c(1,2)))
expect_error(check_trials('a'))
expect_error(check_trials(TRUE))
})
test_that("check_success() works as expected",{
expect_error(check_success(success = c(-2,2),trials = 5))
expect_error(check_success(success = c(5,4),trials = 3))
expect_error(check_success(success = c(0.3,0.4),trials = 3))
})
|
13cd770c6e4eef39294790267b99a9e2acd8062c | 302d026524486f0ad386599fac8dd4f57278ba38 | /man/trueLength.Rd | b0695779af9e314c4a46901205d14e482f032a24 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | cwhitman/GenEst | 96d72e50eafe5e71c25a230c8046f80e152b1963 | 7c84c887b3f671fa8786eee8077512b8d80b7883 | refs/heads/master | 2020-03-30T18:03:28.168191 | 2018-10-11T07:04:03 | 2018-10-11T07:04:03 | 151,481,672 | 0 | 0 | NOASSERTION | 2018-10-03T21:17:44 | 2018-10-03T21:17:44 | null | UTF-8 | R | false | true | 431 | rd | trueLength.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{trueLength}
\alias{trueLength}
\title{Get the length of real things}
\usage{
trueLength(x)
}
\arguments{
\item{x}{vector of values}
}
\value{
integer of how many non-NA values in x
}
\description{
Length of non-missing values in a vector
}
\examples{
x <- c(1, 2, NA, 3)
length(x)
trueLength(x)
}
|
44c8dbde146b3cf8e4d028f65d1ff33b2f23cbcd | 07c0afbc28b2dc1db824add0b971ba87adabb793 | /tests/testthat/test_arg_validation.R | 85abac894203b78c2ff49fc4e576a24fd83d2347 | [
"BSD-2-Clause"
] | permissive | teaguesterling/receptormarker | 0169ddf77b4c5ea53b903c8c428d176441cf824e | 44450cb8a73c6790484a731abb36c67ea1daed48 | refs/heads/master | 2020-12-25T20:42:47.149120 | 2016-03-16T22:38:00 | 2016-03-16T22:38:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,214 | r | test_arg_validation.R | context("Test internal functions that validate other function arguments")
test_that("making sure arguments are TRUE/FALSE works properly", {
arg_list <- list(fake_param=TRUE, another_param=23)
expect_error(validate_true_false(arg_list), "another_param")
arg_list <- list(fake_param=TRUE, another_param=1)
expect_error(validate_true_false(arg_list), "another_param")
arg_list <- list(fake_param=TRUE, another_param=0)
expect_error(validate_true_false(arg_list), "another_param")
expect_error(validate_true_false(list(fake_param=NULL), "fake_param"))
expect_error(validate_true_false(list(fake_param=NA), "fake_param"))
arg_list <- list(fake_param=TRUE, another_param=FALSE)
expect_that(validate_true_false(arg_list), not(throws_error()))
})
test_that("making sure arguments are not NULL works properly", {
arg_list <- list(fake_param=NULL, another_param=42)
expect_error(validate_not_null(arg_list), "fake_param")
arg_list <- list(fake_param=TRUE, another_param=23, third_param=c(1:10),
data_frame=data.frame(x=1, y=1:10, let="abc"),
fifth_param="test characters", sixth_param=NA)
expect_that(validate_not_null(arg_list), not(throws_error()))
})
|
993f863c661347c9a6ffc1ee15df617ff6de562c | a0aba1a0a7819f5f06d52108166913a04735a0d1 | /code/prediction/extern/R/ComputeGIES.R | 0c272dd0321839dd789fbde23b0d3a479c7aa1de | [
"BSD-2-Clause"
] | permissive | philipversteeg/validation-yeast | cebdc40908ce6fbda916f9df6b940aeee63e0234 | a977a2b038618530b75577495ade6b7a9e728da4 | refs/heads/master | 2021-05-04T22:14:28.638729 | 2018-02-20T23:00:21 | 2018-02-20T23:00:21 | 120,023,890 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,416 | r | ComputeGIES.R | ##########
# Author: Philip Versteeg (2017)
# Email: pjjpversteeg@gmail.com
# #######
# Compute Greedy Interventional Equivalence Search (GIES), callable from command line
# with arugments. Can be bootstrapped if and can use multiple processes if so.
#
# Args:
# input Input MicroArrayData hdf5 format file
# output Output MicroArrayData hdf5 format file
# maxDegree Parameter used to limit the vertex degree of the estimated graph. Possible values:
# 1. Vector of length 0 (default): vertex degree is not limited.
# 2. Real number r, 0 < r < 1: degree of vertex v is limited to r · nv, where nv
# denotes the number of data points where v was not intervened.
# 3. Single integer: uniform bound of vertex degree for all vertices of the graph.
# 4. Integer vector of length p: vector of individual bounds for the vertex degrees.
# selectCauses Integer vector of indices in 1:p that where causes will return
# Return:
# NULL Write to output hdf5 file 'data'
#
#
# Additional details on settings:
# phase: Character vector listing the phases that should be used;
# possible values: ‘forward’, ‘backward’, and ‘turning’ (cf.
# details).
#
# iterate: Logical indicating whether the phases listed in the argument
# ‘phase’ should be iterated more than once (‘iterate = TRUE’)
# or not.
#
# turning: Setting ‘turning = TRUE’ is equivalent to setting ‘phases =
# c("forward", "backward")’ and ‘iterate = FALSE’; the use of
# the argument ‘turning’ is deprecated.
#
#
##########
library(pcalg)
library(Matrix)
library(rhdf5)
library(foreach)
library(doMC)
source('../libs/LoadMicroArrayData.R')
source('../libs/LoadCausalArray.R')
# ComputeGIES <- function(input='../data/kemmeren/Kemmeren.hdf5',
# ComputeGIES <- function(input='kemmeren_100ints_100obs/__input__gies.hdf5',
ComputeGIES <- function(input='testset/__input__gies_100_prescreened.hdf5',
output='testset/__output_gies_prescreened.hdf5',
maxDegree=NULL,
bootstraps=1, # number of bootstrapsfor 'bagging' (more like stability selection)
bootstrapFraction=.5, # fracton in [0, 1] to sample in each bootstrap
processes=1, # number of simultaneous threads to compute with
selectCauses=NULL, # list of indices of parents in 1:p that need to be considered.
prescreeningFile=NULL, # the hdf5 file containing the binary CausalArray
verbose=FALSE) # detailed printout
{
# load data
data <- LoadMicroArrayData(file.in=input, verbose=TRUE)
# set maxDegree default
if (is.null(maxDegree)) {
maxDegree <- integer(0)
}
# used for both selcetCauses and prescreening
variableSelMat <- matrix(TRUE, nrow=data$p, ncol=data$p)
rownames(variableSelMat) <- data$genenames
colnames(variableSelMat) <- data$genenames
# return only results for selectCauses.
if (!is.null(selectCauses)) {
cat('Selected causes only!\n-->\tWARNING: USE R-ARRAY ENCODING OF 1...length!\n')
selectCauses <- sapply(strsplit(selectCauses,','), strtoi)[,1]
# --> need to fill a logical matrix variableSelMat with dimension p x p with TRUE for entry (i,j) if
# says that variable i should be considered as a potential parent for variable j and vice versa for false.
# variableSelMat <- matrix(FALSE, nrow=data$p, ncol=data$p)
variableSelMat[-selectCauses,] <- FALSE # put to false all the rows that are not in selectCauses
}
if (is.character(prescreeningFile)) {
print(paste('Pre-screening file given:', prescreeningFile, '.'))
beforeScreeningSum <- sum(variableSelMat == TRUE)
selectCausalArray <- LoadCausalArray(prescreeningFile)
# check for all causes and effects...
if(!is.null(selectCauses)) {
causesGeneId <- intersect(data$genenames[selectCauses], selectCausalArray$causes)
} else {
causesGeneId <- intersect(data$genenames, selectCausalArray$causes)
}
effectsGeneId <- intersect(data$genenames, selectCausalArray$effects)
for (i in causesGeneId) {
for (j in effectsGeneId) {
# look up if each i,j pair exists in causalarray
if (selectCausalArray$array[i, j] == 0) {
variableSelMat[i, j] <- FALSE # put to zero terms
}
}
}
print(paste('Pre-screening removed', beforeScreeningSum - sum(variableSelMat == TRUE), 'pairs from computation.'))
}
# Result matrix
result <- matrix(0, nrow=data$p, ncol=data$p)
## bootstrap sample interventions
if (bootstraps > 1) {
registerDoMC(processes)
foreachResult = foreach(j=1:bootstraps, .inorder=FALSE) %dopar% {
# sample with replacements set of interventions
# intervention.bootstrap <- sample(data$intpos, floor(data$nInt * bootstrapFraction), replace=FALSE) # easier for now
mutants <- sample(data$mutants, floor(data$nInt * bootstrapFraction), replace=FALSE)
# and set of observations
obs <- sample(1:data$nObs, floor(data$nObs * bootstrapFraction), replace=FALSE)
intpos <- sapply(mutants,function(x) which(data$genenames == x))
interventions <- lapply(intpos, as.integer) # all interventions
targets <- unique(interventions) # unique interventions
target.index <- match(interventions, targets) # indexin target list
cat('Performing bootstrap:', j, '\n')
cat(length(targets), 'unique interventions sampled out of', length(interventions), 'total.\n')
score <- new("GaussL0penIntScore", data=rbind(data$obs, data$int),
targets=targets, target.index=target.index)
if(is.null(selectCauses) & is.null(prescreeningFile)) {
fixedGaps <- NULL
} else {
fixedGaps <- !variableSelMat
}
str(fixedGaps)
tmp <- gies(score,
fixedGaps=fixedGaps,
phase=c("forward", "backward"),
iterate=FALSE,
maxDegree=maxDegree,
verbose=verbose)
tmp.result <- as(tmp$essgraph, "matrix")
tmp.result <- tmp.result & ! t(tmp.result) # result per j
}
# combine results
for (j in foreachResult) {
result <- result + j
}
## no bootstrap
} else {
# get intervention targets for Gaussian score
interventions <- lapply(data$intpos, as.integer)
targets <- unique(interventions)
target.index <- match(interventions, targets)
# compute gies
score <- new("GaussL0penIntScore", data=rbind(data$obs, data$int),
targets=targets, target.index=target.index)
tmp <- gies(score,
fixedGaps=if (is.null(selectCauses)) NULL else (!variableSelMat),
maxDegree=maxDegree,
phase='turning',
verbose=verbose)
result <- as(tmp$essgraph, "matrix")
result <- result & ! t(result)
}
# only take the selected causes as output
if (!is.null(selectCauses)) {
result <- result[selectCauses,]
}
cat('Found', length(which(result != 0)), 'non-zero edges out of', ncol(result) * nrow(result), 'total.\n')
#####
# saving results
###
cat('Saving results.\n')
if (!h5createFile(output)) {
unlink(output)
h5createFile(output)
}
# HAVE TO LOAD IT TRANSPOSED IN PYTHON TO GET THE CORRECT SHAPE WHEN LOADING IN!
# --> fixed by using libs/misc.load_array(..., load_from_r=True).
h5write(result, output, 'data', level=9)
# h5write(round(ida.rel.predictions, round.size), output, 'data', level=9)
H5close()
}
#############
# Wrapper code for executing by external bash/python script with arguments
# - need load and save data from disk.
# - use kwargs input= and output= to get input and output data location
# - if none are given, 1st argument is the input data location
# - rest is the keyword arguments
#############
func <- 'ComputeGIES'
#############
commandlineargs <- commandArgs(trailingOnly=TRUE)
# (1.) get argument and default values!
functionargs <- as.list(formals(func))
# (2.) fill in positional args
args <- commandlineargs[grep('=',commandlineargs, invert=TRUE)]
if (length(args) > 0) {
for (i in 1:length(args)) {
functionargs[[i]] <- args[i]
}
}
# (3.) fill in kwargs (if okay formatted)
kwargs <- commandlineargs[grep('=',commandlineargs)]
for (i in kwargs) {
tmpvec <- unlist(strsplit(i, split='=', fixed=TRUE))
if (length(tmpvec) < 2) {
stop('** argument parse error ** invalid argument: ', i, '\n')
}
if (!tmpvec[1] %in% names(functionargs)) {
stop('** argument parse error ** argument not found: ', tmpvec[1], '\n')
}
functionargs[tmpvec[1]] <- paste(tmpvec[-1], collapse='=')
}
# (4.) check if all arguments are filled and parse strings to numeric, int, bool or NULL if possible
if(!all(sapply(functionargs, function(x) !is.symbol(x)))) {
stop('** argument parse error ** required argument(s) not filled: ', names(functionargs)[!sapply(functionargs, function(x) !is.symbol(x))], '\n')
}
for (i in names(functionargs)) {
x <- functionargs[i]
# the default NULL arguments are still here, skip these.
if (!is.null(x)) {
# check if argument value is numeric
is.num <- FALSE
try(is.num <- !is.na(suppressWarnings(as.numeric(x))), silent=TRUE)
if (is.num) {
functionargs[i] <- as.numeric(x)}
# check if argument value is integer
else {
is.int <- FALSE
try(is.int <- !is.na(suppressWarnings(as.integer(x))), silent=TRUE)
if (is.int) {functionargs[i] <- as.integer(x)}
}
# check if argument value is boolean
if (x == 'TRUE') {functionargs[i] <- TRUE}
if (x == 'FALSE') {functionargs[i] <- FALSE}
# check if argument value is 'NULL' using list syntax or it removes the element
if (x == 'NULL') {functionargs[i] <- list(NULL)}
}
}
# (5.) call function
cat('********************************\nCalling', func, 'with arguments:\n')
for (i in names(functionargs)) {
if (is.null(functionargs[[i]])) {
val <- 'NULL'
} else {
val <- functionargs[[i]]
}
cat(' ', i, '=', val, '\n')
}
cat('********************************\n')
do.call(func, functionargs) |
5d95870af96af0f8b6bdc522e690265710b8cf4f | 608adcf47ef5c776429dfe2e555c20c0ef54547a | /man/Hst.sumup.Rd | cc835e69e804bb76e5ed2fbb3bac4cab8f8cb794 | [] | no_license | cran/widals | b722ad1e1e0938998461d8fe83e8b76437cbc031 | c431b52c0455ad4568072220838b571bacc3b6ba | refs/heads/master | 2021-05-15T01:43:27.321897 | 2019-12-07T21:20:02 | 2019-12-07T21:20:02 | 17,700,881 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,381 | rd | Hst.sumup.Rd | \name{Hst.sumup}
\alias{Hst.sumup}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create Covariance Matrix
}
\description{
Calculate the covariance matrix of all model covariates
}
\usage{
Hst.sumup(Hst.ls, Hs = NULL, Ht = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Hst.ls}{
Space-time covariates. A list of length \eqn{\tau}, each element containing a \eqn{n} x \eqn{p_st} numeric matrix.
}
\item{Hs}{
Spacial covariates. An \eqn{n} x \eqn{p_s} numeric matrix.
}
\item{Ht}{
Temporal covariates. An \eqn{\tau} x \eqn{p_t} numeric matrix.
}
}
\details{
Important: The order of the arguments in this function is NOT the same as in the returned covariance matrix. The order in the covariance matrix is the same as in other functions in this package: \code{Hs}, \code{Ht}, \code{Hst.ls}.
}
\value{
A \eqn{(p_s+p_t+p_st)} x \eqn{(p_s+p_t+p_st)} numeric, symmetrix, non-negative definite matrix.
}
%\references{
%% ~put references to the literature/web site here ~
%}
%\author{
%% ~~who you are~~
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
tau <- 20
n <- 10
Ht <- cbind(sin(1:tau), cos(1:tau))
Hs <- cbind(rnorm(10), rnorm(n, 5, 49))
Hst.ls <- list()
for(tt in 1:tau) {
Hst.ls[[tt]] <- cbind(rnorm(n, 1, 0.1), rnorm(n, -200, 21))
}
Hst.sumup(Hst.ls, Hs, Ht)
########### standardize all covariates
x1 <- stnd.Hst.ls(Hst.ls, NULL)$sHst.ls
x2 <- stnd.Hs(Hs, NULL, FALSE)$sHs
x3 <- stnd.Ht(Ht, n)
Hst.sumup(x1, x2, x3)
## The function is currently defined as
function (Hst.ls, Hs = NULL, Ht = NULL)
{
tau <- length(Hst.ls)
if(tau < 1) { tau <- nrow(Ht) }
if(is.null(tau)) { tau <- 10 ; cat("tau assumed to be 10.", "\n") }
n <- nrow(Hst.ls[[1]])
if(is.null(n)) { n <- nrow(Hs) }
big.sum <- 0
for (i in 1:tau) {
if (!is.null(Ht)) {
Ht.mx <- matrix(Ht[i, ], n, ncol(Ht), byrow = TRUE)
}
else {
Ht.mx <- NULL
}
big.sum <- big.sum + crossprod(cbind(Hs, Ht.mx, Hst.ls[[i]]))
}
return(big.sum)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
51e201880416297708bf251a3776ea136f114419 | 4b10c2e443fcbec746cb8f5db8aedf0a0933a439 | /man/SphericalAngleForEquilateralTriangleFromGreatCircleSideLength.Rd | 163e7b8c04c9e403fe8a2b04b536e7fc6cf002c8 | [] | no_license | laurasoul/dispeRse | 81968d976ce9477f45584f62c9a7baa87bb42273 | 0f1316bc963fa8cea3ed3da0f7bb585e8acd7079 | refs/heads/master | 2021-06-05T09:02:45.991357 | 2021-05-24T21:15:14 | 2021-05-24T21:15:14 | 33,941,723 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 920 | rd | SphericalAngleForEquilateralTriangleFromGreatCircleSideLength.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in
% R/SphericalAngleForEquilateralTriangleFromGreatCircleSideLength.R
\name{SphericalAngleForEquilateralTriangleFromGreatCircleSideLength}
\alias{SphericalAngleForEquilateralTriangleFromGreatCircleSideLength}
\title{Finds the spherical angle for an equilateral triangle}
\usage{
SphericalAngleForEquilateralTriangleFromGreatCircleSideLength(
side_length,
EarthRad = 6367.4447
)
}
\arguments{
\item{side_length}{The great circle distance of a side of the triangle in kilometres.}
\item{EarthRad}{Radius of the Earth in kilometres.}
}
\value{
Spherical angle in degrees.
}
\description{
Returns the spherical angle in degrees for an equilateral triangle of known side length
}
\details{
Nothing yet.
}
\examples{
SphericalAngleForEquilateralTriangleFromGreatCircleSideLength(1000)
}
\author{
Graeme T. Lloyd \email{graemetlloyd@gmail.com}
}
|
a1f6c67a631035eff001396dc496cf6f7aa032d6 | e56078d8c6c239152fcb05828ce4ed956b9d7741 | /man/getHyperPathway.Rd | 5b989258fceb8cc24969b6a97d4115bb22e84448 | [] | no_license | sbwilson91/cellcall | 2d9dd5e22870d187daa06a5ba7e030cc0ce0b5d6 | d8e34033714611c0c325c6266aed7c01cf1f1b0a | refs/heads/master | 2023-08-16T18:48:15.400378 | 2021-08-13T03:38:33 | 2021-08-13T03:38:33 | 407,876,678 | 0 | 0 | null | 2021-09-18T14:04:57 | 2021-09-18T14:04:57 | null | UTF-8 | R | false | true | 779 | rd | getHyperPathway.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getSignificantPathway.R
\name{getHyperPathway}
\alias{getHyperPathway}
\title{enrich communication relation on the pathway}
\usage{
getHyperPathway(data, cella_cellb, IS_core = TRUE, Org = "Homo sapiens")
}
\arguments{
\item{data}{a dataframe of communication score with row LR and column cellA-cellB}
\item{cella_cellb}{explore the LR between sender cellA and receiver cellB, eg: "A-B"}
\item{IS_core}{logical variable ,whether use reference LR data or include extended datasets}
\item{Org}{choose the species source of gene, only "Homo sapiens" in this version.}
}
\value{
the dataframe with column: Pvalue, Jaccard, NES and pathway
}
\description{
enrich communication relation on the pathway
}
|
e470b1336f2c7c4d13f61c60ab267413d6d17920 | 1a63c15398a830a9447d86828e76cc2790e6af1e | /R/reports.R | 8534a180d558139ebd28b763c27c8750a4011913 | [
"MIT"
] | permissive | sckott/discgolf | 327fff393389b1f955f85ce50b88265263a49c95 | 7fd0de8878ddc2a014b358def8ba1580165be5e6 | refs/heads/master | 2021-07-12T09:01:07.138933 | 2021-02-26T21:02:48 | 2021-03-01T16:35:14 | 28,190,714 | 7 | 3 | NOASSERTION | 2021-03-01T16:35:14 | 2014-12-18T16:11:01 | R | UTF-8 | R | false | false | 611 | r | reports.R | #' Reports
#'
#' @name reports
#' @param start_date,end_date start and end dates
#' @param category_id a category id
#' @param group_id a group id
#' @param ... Named parameters passed on to [crul::verb-GET]
#' @examples \dontrun{
#' x <- reports_page_views(start_date = "2019-08-01", end_date = "2019-09-11")
#' }
#' @export
#' @rdname reports
reports_page_views <- function(start_date, end_date, category_id = NULL,
group_id = NULL, ...) {
args <- dc(list(start_date = start_date,
end_date = end_date, category_id = category_id, group_id = group_id))
disc_GET("page_view_total_reqs", args, ...)
}
|
05c97ead98714801ad1b956478745950cd9a0d5d | 9eaf80388eab753441863c288c8c45967488894c | /MM1.R | 0075d1444729a8c092f12206e488aadb0ee86694 | [] | no_license | LillyannaHilario/Repositorio | f3cf78f7de853f7b807abf86d59c6a49dea544c9 | 9edc60ca55d011d9bccd31c49923bb766985f79a | refs/heads/master | 2020-06-16T12:07:56.285799 | 2016-11-29T18:39:31 | 2016-11-29T18:39:31 | 75,106,119 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,652 | r | MM1.R |
mm1<-function(){
lambda = 2
promedio_lambda=(1/lambda)
cat("Tiempo promedio de llegada:",promedio_lambda,"\n")
mu = 4
promedio_mu=(1/mu)
cat("Tiempo promedio del servicio:",promedio_mu,"\n")
cat("\n")
tiempo_limite = 20
tiempo_actual = 0
total_cola = 0
store = 0
tiempo_exp = rexp(1,lambda)
cat("Tiempo que ocurre el evento:",tiempo_exp,"\n")
cola = 1
tiempo_actual = tiempo_exp
numero_de_evento = 1
llegadas=1
servicios=0
cat("Numero de eventos:",numero_de_evento,"\n")
cat("Cola actual:",cola,"\n")
cat("\n")
while (tiempo_actual<tiempo_limite)
{
numero_de_evento = numero_de_evento+1
cat("Evento #",numero_de_evento,"\n")
if(cola>0)
{
tiempo_exp = rexp(1,lambda+mu)
cat("Tiempo que ocurre el evento:",tiempo_exp,"\n")
p = runif(1,0,1)
total_cola[numero_de_evento] = cola
cat("En cola antes del evento:",total_cola[numero_de_evento],"\n")
cola=ifelse(p<(lambda/(lambda+mu)),cola+1,cola-1)
llegadas=ifelse(p<(lambda/(lambda+mu)),llegadas+1,llegadas+0)
servicios=ifelse(p<(lambda/(lambda+mu)),servicios+0,servicios+1)
cat("Cola actual:",cola,"\n")
}
else
{
tiempo_exp = rexp(1,lambda)
cat("Tiempo que ocurre el evento:",tiempo_exp,"\n")
total_cola[numero_de_evento] = cola
cola = 1
llegadas= llegadas + 1
cat("Cola actual:",cola,"\n")
}
tiempo_actual = tiempo_actual+tiempo_exp
cat("Tiempo transcurrido:",tiempo_actual,"\n")
store = store+tiempo_exp*total_cola[numero_de_evento]
cat("\n")
}
cat("Total de llegadas:",llegadas,"\n")
cat("Total de servicios completados:",servicios,"\n")
cat("Longitud promedio de linea:",store/tiempo_actual,"\n")
}
|
820002e724c80833089fbf190f521741199aeec5 | 38747ebed43ead47e4f39c83e34c2810e3c87df8 | /code/one-time-processing.r | 4cddc7193191579f92ce8f82916de85caa94a989 | [] | no_license | affeder/SHIV-structure | 2ffb44cd6ff150615f7064993a743189ebfa2022 | f2c1b2d69ebf01b4c158f467b926958c6f813f93 | refs/heads/master | 2020-06-10T20:30:02.283918 | 2017-04-26T03:14:37 | 2017-04-26T03:14:37 | 75,883,271 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,323 | r | one-time-processing.r | #This file converts fasta files into the format usable for the analysis
require(ape)
require(foreach)
require(seqinr)
require(stringdist)
RNA <- read.dna("../dat/RT-SHIV-RNA.fa", format = "fasta")
DNA <- read.dna("../dat/RT-SHIV-DNA.fa", format = "fasta")
seqnames <- c(rownames(RNA), rownames(DNA))
rnanuc <- foreach(i = 1:nrow(RNA), .combine = 'rbind') %do% {
toupper(paste(RNA[i,]))
}
dnanuc <- foreach(i = 1:nrow(DNA), .combine = 'rbind') %do% {
toupper(paste(DNA[i,]))
}
bothnuc <- rbind(rnanuc, dnanuc)
colnames(bothnuc) <- paste("nuc", 1:900, sep = "")
rownames(bothnuc) <- NULL
aas <- matrix(data = NA, ncol = 299, nrow = nrow(bothnuc))
for(i in 1:nrow(bothnuc)){
aas[i,] <- (translate(bothnuc[i,1+3:(900-1)]))
}
colnames(aas) <- paste("AA", 1:299, sep = "")
rownames(aas) <- NULL
infnew <- foreach(nameval = seqnames, .combine = 'rbind') %do% {
strsplit(nameval, "-")[[1]][c(2, 1, 3:5)]
}
colnames(infnew) <- c("samp.loc", "monkid", "weeks", "pID", "f.id")
rownames(bothnuc) <- NULL
write.table(infnew, "../tmp/seqinfo.txt")
write.table(aas, "../tmp/aminoacids.txt")
write.table(bothnuc, "../tmp/nucleotides.txt")
#This is slow (hours)
haps <- apply(bothnuc[, 135:900], 1, paste, collapse = "")
allDists <- stringdistmatrix(haps)
distMat <- allDists
save(distMat, file = "../tmp/distmat")
|
27d0864ae7be3dfcc61e2dafcaa3986373b21447 | 2a87ad7ed0d4944a499fbc4ad174d2231e954938 | /R/Rice_geno_map.R | c682b99f96b1ea89eb463932fb40f37737415428 | [
"MIT"
] | permissive | KosukeHamazaki/RAINBOWR | cb5dcdcb051c90f643e6e1462c7d63b306a7df5d | 0af152aa7c0ea87caa628931135acbca6bf4d2b3 | refs/heads/master | 2023-08-04T20:05:47.833628 | 2023-07-25T08:18:44 | 2023-07-25T08:18:44 | 216,477,356 | 16 | 5 | null | null | null | null | UTF-8 | R | false | false | 808 | r | Rice_geno_map.R | #' Physical map of rice genome
#'
#' @name Rice_geno_map
#'
#' @description A dataset containing the information of phycical map of rice genome (Zhao et al., 2010; PLoS One 5(5): e10780).
#'
#' @format A data frame with 1311 rows and 3 variables:
#' \describe{
#' \item{marker}{marker name for each marker, character}
#' \item{chr}{chromosome number for each marker, integer}
#' \item{pos}{physical position for each marker, integer, (b.p.)}
#' }
#' @source \url{http://www.ricediversity.org/data/}
#' @references
#' Zhao K, Wright M, Kimball J, Eizenga G, McClung A, Kovach M, Tyagi W, Ali ML, Tung CW, Reynolds A, Bustamante CD, McCouch SR (2010). Genomic Diversity and Introgression in O. sativa Reveal the Impact of Domestication and Breeding on the Rice Genome. PLoS One. 2010; 5(5): e10780.
NULL
|
669af81c08016f4421bc549c535c658d4896adf7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/NISTunits/examples/NISTacreFtTOcubMeter.Rd.R | c22755ab20621df91f2250e1fd3313f692a55011 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 204 | r | NISTacreFtTOcubMeter.Rd.R | library(NISTunits)
### Name: NISTacreFtTOcubMeter
### Title: Convert acre foot 7 to cubic meter
### Aliases: NISTacreFtTOcubMeter
### Keywords: programming
### ** Examples
NISTacreFtTOcubMeter(10)
|
d25fb942b3e079f3dbf6d7b92130f13da94be800 | 907aaa2ef40dd8beeb9d533fa519fac0afaf8e37 | /man/aggreg.Rd | a1512c3c739c88bda067143de8d949873fe132d4 | [] | no_license | AndreasFischer1985/qqBaseX | eaee341155d66d4ff92ca00d6b4d419c3bf1f28a | 98bec0ce041666d09d2c89a4ddc6b84a2349fa53 | refs/heads/master | 2022-09-14T18:58:05.493380 | 2022-08-26T11:52:38 | 2022-08-26T11:52:38 | 189,703,556 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,156 | rd | aggreg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggreg.r
\name{aggreg}
\alias{aggreg}
\title{Function aggreg}
\usage{
aggreg(x, y = NULL, fun = NULL, verbose = F, y.is.dummy = F,
attr = F)
}
\arguments{
\item{x}{Numeric vector or matrix.}
\item{y}{Vector or matrix specifying the subsets to compute summary statistics for.}
\item{fun}{Function specifying the summary statistic to compute. If NULL (default), mean is calculated.}
\item{verbose}{Logical value specifying the verbocity of output. Defaults to F.}
\item{y.is.dummy}{Logical value specifying whether y is already dummy-coded and thus does not have to be converted. Defaults to F.}
\item{attr}{Logical value specifying whether subset-sizes should be added as an attribute of the returned result.}
}
\description{
Splits a numeric vector or matrix into subsets, computes summary statistics for each, and returns the result in a convenient form.
}
\details{
Splits a numeric vector or matrix into subsets, computes summary statistics for each, and returns the result in a convenient form.
}
\examples{
aggreg(x=1:10,y=c(rep(1,5),rep(2,5)))
}
\keyword{modeling}
|
888143cb842f0e845819415ac2024f7a54b0ce50 | 58bc1dcc03e2d21e6649fcc03e9b3014c92d5360 | /man/set_confounds.Rd | 3fd9acbf6c94ce3c76cc790af2c6e555e88ad657 | [] | no_license | yadmasu1/CausalQueries | 58c6435bcd7a6cc09d37b8d9c2140228b6c20b94 | a137adf5ae5031562e56ed589e015fedc069b12f | refs/heads/master | 2022-11-11T03:30:12.983147 | 2020-07-03T22:23:37 | 2020-07-03T22:23:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 531 | rd | set_confounds.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_confounds.R
\name{set_confounds}
\alias{set_confounds}
\title{Set confounds}
\usage{
set_confounds(...)
}
\arguments{
\item{...}{arguments passed to set_confound}
}
\value{
An object of class \code{causal_model}. It essentially returns a list containing the elements comprising
a model (e.g. 'statement', 'nodal_types' and 'DAG') with the parameter matrix updated according to `confound`.
}
\description{
alias for set_confound. See set_confound.
}
|
8434a8b794db62ed8e95e08e1cf2be7240a9fc72 | 5a7e12e77006ddd46c9cd69bbb3985945138894b | /man/mesh_triangle_integration.Rd | a7eea87bf116877a7318a0c8a7f3a011b4df160c | [] | no_license | cran/inlabru | ba002f0eb10ba85144dfbfeb1f3af4755b9d8acb | 77e5590164955a652e9af2d6a814fdf2c8d9a1f2 | refs/heads/master | 2023-07-12T01:38:40.674227 | 2023-06-20T13:10:02 | 2023-06-20T13:10:02 | 110,278,992 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,132 | rd | mesh_triangle_integration.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{mesh_triangle_integration}
\alias{mesh_triangle_integration}
\title{Integration scheme for mesh triangle interiors}
\usage{
mesh_triangle_integration(mesh, tri_subset = NULL, nsub = NULL)
}
\arguments{
\item{mesh}{Mesh on which to integrate}
\item{tri_subset}{Optional triangle index vector for integration on a subset
of the mesh triangles (Default \code{NULL})}
\item{nsub}{number of subdivision points along each triangle edge, giving
\code{(nsub + 1)^2} proto-integration points used to compute
the vertex weights
(default \code{NULL=9}, giving 100 integration points for each triangle)}
}
\value{
\code{list} with elements \code{loc} and \code{weight} with
integration points for the mesh
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} Use \code{\link[=fm_int_inla_mesh_core]{fm_int_inla_mesh_core()}} instead.
}
\author{
Finn Lindgren \email{finn.lindgren@gmail.com}
}
\keyword{internal}
|
32e43315f9beb5c83c30d0093d8c7e4a8e992aca | 6c47f3bee1b4d808141312a720b70d2705e5516c | /lectures/lecture10/problemset10_solutions.R | ced55d508405959234c5e552287b3575d5da358c | [] | no_license | ZhouLinli/rclass | 9c9d754b75637e8cbae21c5664dfd2b8c0a2a0ec | d13cca7d59549fb7719f1771b853dab647b3b337 | refs/heads/master | 2023-04-09T21:01:01.027776 | 2020-10-29T18:01:39 | 2020-10-29T18:01:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,376 | r | problemset10_solutions.R | #-------------------------------------------------------------------------------------------------------------------------------------------------------
#Grade (/20)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#=======================================================================================================================================================
#LOAD LIBRARIES
#options(max.print=999999)
library(tidyverse)
library(haven)
library(labelled)
#=======================================================================================================================================================
#=======================================================================================================================================================
#READ IN IC DIRECTORY DATA
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#Question 1 (/2)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
getwd()
#Set working directory before downloading
#setwd()
#Downloading file from the ipeds website
download.file("https://nces.ed.gov/ipeds/datacenter/data/HD2017.zip",
destfile = "hd2017", mode = "wb")
#unzip zip file and keep original name
unzip(zipfile = "hd2017" , unzip = "unzip")
#Review documentation before reading in data
hd <- read_csv("hd2017.csv")
#Change names to lowercase
names(hd) <- tolower(names(hd))
names(hd)
#Subset dataframe to only a few columns
hd <- hd %>%
select(unitid, instnm, city, stabbr, zip, opeid, sector,
iclevel, control, hbcu, tribal, c15basic)
#Label variable and value labels
#Variable labels
hd <- hd %>%
set_variable_labels(
unitid = "Unique identification number of the institution",
instnm = "Institution name",
city = "City location of institution",
stabbr = "State abbreviation",
zip = "Zip code",
opeid = "Office of Postsecondary Education (OPE) ID Number",
sector = "Sector of institution",
iclevel = "Level of institution",
control = "Control of institution",
hbcu = "Historically Black College or University",
tribal = "Tribal college",
c15basic = "Carnegie Classification 2015: Basic"
)
hd %>%
count(hbcu)
hd %>%
count(tribal)
hd %>%
count(c15basic)
#Set value labels
hd <- hd %>%
set_value_labels(sector = c("Administrative Unit" = 0,
"Public, 4-year or above" = 1,
"Private not-for-profit, 4-year or above" = 2,
"Private for-profit, 4-year or above" = 3,
"Public, 2-year" = 4,
"Private not-for-profit, 2-year" = 5,
"Private for-profit, 2-year" = 6,
"Public, less-than 2-year" = 7,
"Private not-for-profit, less-than 2-year" = 8,
"Private for-profit, less-than 2-year" = 9,
"Sector unknown (not active)" = 99),
iclevel = c("Four or more years" = 1,
"At least 2 but less than 4 years" = 2,
"Less than 2 years (below associate)" = 3,
"{Not available}" = -3),
control = c("Public" = 1, "Private not-for-profit" = 2,
"Private for-profit" = 3,
"{Not available}" = -3),
hbcu = c("Yes" = 1,
"No" = 2),
tribal = c("Yes" = 1,
"No" = 2),
c15basic = c("Associate's Colleges: High Transfer-High Traditional" = 1,
"Associate's Colleges: High Transfer-Mixed Traditional/Nontraditional" = 2,
"Associate's Colleges: High Transfer-High Nontraditional" = 3,
"Associate's Colleges: Mixed Transfer/Career & Technical-High Traditional" = 4,
"Associate's Colleges: Mixed Transfer/Career & Technical-Mixed Traditional/Nontraditional" = 5,
"Associate's Colleges: Mixed Transfer/Career & Technical-High Nontraditional" = 6,
"Associate's Colleges: High Career & Technical-High Traditional" = 7,
"Associate's Colleges: High Career & Technical-Mixed Traditional/Nontraditional" = 8,
"Associate's Colleges: High Career & Technical-High Nontraditional" = 9,
"Special Focus Two-Year: Health Professions" = 10,
"Special Focus Two-Year: Technical Professions" = 11,
"Special Focus Two-Year: Arts & Design" = 12,
"Special Focus Two-Year: Other Fields" = 13,
"Baccalaureate/Associate's Colleges: Associate's Dominant" = 14,
"Doctoral Universities: Highest Research Activity" = 15,
"Doctoral Universities: Higher Research Activity" = 16,
"Doctoral Universities: Moderate Research Activity" = 17,
"Master's Colleges & Universities: Larger Programs" = 18,
"Master's Colleges & Universities: Medium Programs" = 19,
"Master's Colleges & Universities: Small Programs" = 20,
"Baccalaureate Colleges: Arts & Sciences Focus" = 21,
"Baccalaureate Colleges: Diverse Fields" = 22,
"Baccalaureate/Associate's Colleges: Mixed Baccalaureate/Associate's" = 23,
"Special Focus Four-Year: Faith-Related Institutions" = 24,
"Special Focus Four-Year: Medical Schools & Centers" = 25,
"Special Focus Four-Year: Other Health Professions Schools" = 26,
"Special Focus Four-Year: Engineering Schools" = 27,
"Special Focus Four-Year: Other Technology-Related Schools" = 28,
"Special Focus Four-Year: Business & Management Schools" = 29,
"Special Focus Four-Year: Arts, Music & Design Schools" = 30,
"Special Focus Four-Year: Law Schools" = 31,
"Special Focus Four-Year: Other Special Focus Institutions" = 32,
"Tribal Colleges" = 33,
"Not applicable, not in Carnegie universe (not accredited or nondegree-granting)" = -2)
)
#Check a few vars
class(hd$sector)
typeof(hd$sector)
hd %>%
count(sector) %>%
as_factor()
class(hd$c15basic)
typeof(hd$c15basic)
hd %>%
count(c15basic) %>%
as_factor()
#View all variable labels
var_label(hd)
#View all value labels
val_labels(hd)
#Investigate data/tidy
head(hd, n = 20)
hd %>%
filter(c15basic == 8)
#investigate data structure
hd %>% # start with data frame object
group_by(unitid) %>% # group by unitid
summarise(n_per_group=n()) %>% # create measure of number of obs per group
ungroup %>% # ungroup (otherwise frequency table [next step] created) separately for each group (i.e., separate frequency table for each value of unitid)
count(n_per_group)
#=======================================================================================================================================================
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> YOUR CODE STARTS HERE. FOLLOW THE INSTRUCTIONS FROM PDF >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#=======================================================================================================================================================
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#Question 2 (/4.5)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
########################### READ IN FLAGS DATA
download.file("https://nces.ed.gov/ipeds/datacenter/data/FLAGS2017.zip",
destfile = "flags2017", mode = "wb")
#unzip zip file and keep original name
unzip(zipfile = "flags2017" , unzip = "unzip")
#Review documentation before reading in data
flags <- read_csv("flags2017.csv") #No parsing errors
#Change names to lowercase
names(flags) <- tolower(names(flags))
names(flags)
#Subset dataframe to include unitid and stat_e12, read documentation
flags <- flags %>%
select(unitid, contains("e12"))
names(flags)
#Label variable and value labels
#Variable labels
flags <- flags %>%
set_variable_labels(
unitid = "Unique identification number of the institution",
stat_e12 = "Response status of institution - 12-month enrollment",
lock_e12 = "Status of 12-month enrollment component whe data collection closed",
prch_e12 = "Parent/child indicator for 12-month enrollment",
idx_e12 = "ID number of parent institution - 12-month enrollment",
pce12_f = "Parent/child allocation factor - 12-month enrollment",
imp_e12 = "Type of imputation method - 12 month enrollment"
)
#Value labels
flags <- flags %>%
set_value_labels(
stat_e12 = c("Respondent" = 1,
"Non respondent, imputed" = 4,
"Nonrespondent hurricane-related problems, imputed" = 8,
"Nonrespondent not imputed" = 5,
"Not applicable" = -2,
"Not active" = -9),
lock_e12 = c("No data submitted" = 0,
"Complete, final lock applied" = 8,
"Not applicable" = -2),
imp_e12 = c("Nearest neighbor (NN)" = 2,
"Not applicable" = -2))
class(flags$stat_e12)
typeof(flags$stat_e12)
flags %>%
count(stat_e12)
flags %>%
count(stat_e12) %>%
as_factor()
flags %>%
group_by(prch_e12, idx_e12, pce12_f) %>%
count()
var_label(flags)
val_labels(flags)
#investigate data structure
flags %>% # start with data frame object
group_by(unitid) %>% # group by unitid
summarise(n_per_group=n()) %>% # create measure of number of obs per group
ungroup %>% # ungroup (otherwise frequency table [next step] created) separately for each group (i.e., separate frequency table for each value of unitid)
count(n_per_group)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#Question 3 (/4)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#Inner join to keep all obs for stat_e12
hd_flags <- hd %>%
inner_join(flags, by = "unitid")
hd_flags %>%
filter(is.na(stat_e12)) #No missing obs for stat_e12
anti_hd_flags <- hd %>%
anti_join(flags, by = "unitid") #all merged
#print anti merge dataset; note, no observations don't merge
anti_hd_flags
rm(anti_hd_flags)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#Question 4 (/5)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#Downloading file from the ipeds website
download.file("https://nces.ed.gov/ipeds/datacenter/data/EFFY2017.zip",
destfile = "effy2017", mode = "wb")
#unzip zip file and keep original name
unzip(zipfile = "effy2017" , unzip = "unzip")
#Review documentation before reading in data
enroll <- read_csv("effy2017.csv") %>%
select(-starts_with("X")) #No parsing errors
#Change variable names to lower-case
names(enroll) <- tolower(names(enroll))
names(enroll)
#Subset dataframe, read documentation
enroll <- enroll %>% select(unitid, effylev, efytotlt)
enroll %>% head(n=10)
#Label variable and value labels
#Variable labels
enroll <- enroll %>%
set_variable_labels(
unitid = "Unique identification number of the institution",
effylev = "Level of student",
efytotlt = "Grand total"
)
#Value labels
enroll <- enroll %>%
set_value_labels(
effylev = c("All students total" = 1,
"Undergraduate" = 2,
"Graduate" = 4)
)
#investigate data
class(enroll$effylev)
typeof(enroll$effylev)
enroll %>%
count(effylev)
enroll %>%
count(effylev) %>%
as_factor()
var_label(enroll)
val_labels(enroll)
#Investigate structure
enroll %>% # start with data frame object
group_by(unitid,effylev) %>% # group by unitid
summarise(n_per_group=n()) %>% # create measure of number of obs per group
ungroup %>% # ungroup (otherwise frequency table [next step] created) separately for each group (i.e., separate frequency table for each value of unitid)
count(n_per_group)
enroll %>%
count(effylev)
#Tidy
enroll_v2 <- enroll %>%
mutate(level=recode(as.integer(effylev),
`1` = "all",
`2` = "ug",
`4` = "grad")
) %>% select(-effylev) %>% # drop variable effylev
pivot_wider(names_from = level, values_from = efytotlt)
names(enroll_v2)
enroll_v2
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#Question 5 (/4.5)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#Investigate structure
enroll_v2 %>% # start with data frame object
group_by(unitid) %>% # group by unitid
summarise(n_per_group=n()) %>% # create measure of number of obs per group
ungroup %>% # ungroup (otherwise frequency table [next step] created) separately for each group (i.e., separate frequency table for each value of unitid)
count(n_per_group)
#Left join
hd_enroll <- hd_flags %>%
left_join(enroll_v2, by = "unitid")
hd_enroll %>% count(stat_e12) %>% as_factor()
anti_hd_enroll <- hd_flags %>%
anti_join(enroll_v2, by = "unitid")
anti_hd_enroll %>% count(stat_e12) %>% as_factor()
anti_hd_enroll %>% group_by(unitid) %>%
summarise(by_school_id = n()) %>%
ungroup() %>%
count(by_school_id)
#Bonus question
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#Bonus (/4)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
## USAGE -----------------------------------------------------------------------
##
## (1) download relevant Stata data and label files from IPEDS (leave zipped)
##
## - Stata data: *_Data_Stata.zip
## - Stata labels: *_Stata.zip
##
## (2) change input/output directories below if desired
##
## (3) run
##
## NB: You can download zipped IPEDS files using < downloadipeds.r > script @
## https://github.com/btskinner/downloadipeds
## -----------------------------------------------------------------------------
## -----------------------------------------------------------------------------
## SET I/O DIRECTORIES (DEFAULT = everything in the current directory)
## -----------------------------------------------------------------------------
## If directory structure like this EXAMPLE:
##
## ./
## |__/r_data
## |
## |__/stata_data
## | |-- ADM2014_Data_Stata.zip
## | |-- ADM2015_Data_Stata.zip
## |
## |__/stata_labels
## | |-- ADM2014_Stata.zip
## | |-- ADM2015_Stata.zip
## |
## |-- label_ipeds.r
##
## Then:
##
## labs_ddir <- file.path('.', 'stata_labels')
## stata_ddir <- file.path('.', 'stata_data')
## r_ddir <- file.path('.', 'r_data')
getwd()
labs_ddir <- file.path('~/Dropbox/r_class_problem_sets/lecture8/r_data/stata_labels') # path to folder w/ zipped label files
stata_ddir <- file.path('~/Dropbox/r_class_problem_sets/lecture8/r_data/stata_data') # path to folder w/ zipped Stata data
r_ddir <- file.path('~/Dropbox/r_class_problem_sets/lecture8/r_data/r_files') # path to output folder for Rdata files
## -----------------------------------------------------------------------------
## WANT NOISIER OUTPUT? (DEFAULT = FALSE)
## -----------------------------------------------------------------------------
## allow readr::read_csv() messages?
noisy <- FALSE
## -----------------------------------------------------------------------------
## LIBRARIES & FUNCTIONS
## -----------------------------------------------------------------------------
## libraries
libs <- c('tidyverse','labelled')
lapply(libs, require, character.only = TRUE)
read_zip <- function(zipfile, type, noisy) {
## create a name for the dir where we'll unzip
zipdir <- tempfile()
## create the dir using that name
dir.create(zipdir)
## unzip the file into the dir
unzip(zipfile, exdir = zipdir)
## get the files into the dir
files <- list.files(zipdir, recursive = TRUE)
## chose rv file if more than two b/c IPEDS likes revisions
if (length(files) > 1) {
file <- grep('*_rv_*', tolower(files), value = TRUE)
if (length(file) == 0) {
file <- grep('*\\.csv', files, value = TRUE)
}
} else {
file <- files[1]
}
## get the full name of the file
file <- file.path(zipdir, file)
## read the file
if (type == 'csv') {
if (noisy) {
out <- read_csv(file)
} else {
out <- suppressMessages(suppressWarnings(read_csv(file,
progress = FALSE)))
}
} else {
out <- readLines(file, encoding = 'latin1')
}
## remove tmp
unlink(zipdir, recursive = TRUE)
## return
return(out)
}
read_labels <- function(zipfile) {
## read in label file
labs <- read_zip(zipfile, 'do')
## get insheet line and add one to get next line
line_no <- grep('insheet', labs) + 1
## drop header
labs <- labs[line_no:length(labs)]
## drop first asterisk
labs <- gsub('^\\*(.+)$', '\\1', labs)
## return
return(labs)
}
assign_var_labels <- function(df, label_vec) {
## get variable label lines
varlabs <- grep('^label variable', label_vec, value = TRUE)
## if no labels, exit
if (length(varlabs) == 0) { return(df) }
## get variables that have labels
vars <- unlist(lapply(varlabs, function(x) { strsplit(x, ' ')[[1]][[3]] }))
## get the labels belonging to those variables
labs <- gsub('label variable .+"(.+)"', '\\1', varlabs)
## create list
varlabs <- setNames(as.list(labs), vars)
## assign to variables
var_label(df) <- varlabs
## return new data frame
return(df)
}
assign_val_labels <- function(df, label_vec) {
## get value label lines
vallabs <- grep('^label define', label_vec, value = TRUE)
## if no labels, exit
if (length(vallabs) == 0) { return(df) }
## get unique defined labels
labdefs <- unique(gsub('^label define (\\w+).+', '\\1', vallabs))
## get label value lines
vars <- grep('^label values', label_vec, value = TRUE)
## make list of variable plus its value definition
vardef <- setNames(as.list(gsub('^label values (\\w+).+', '\\1', vars)),
gsub('^label values \\w+ (\\w+)\\*?.*', '\\1', vars))
## make unique b/c of some double labels
vardef <- vardef[!duplicated(vardef)]
## loop through each variable
for (i in 1:length(labdefs)) {
## get label
labdef <- labdefs[i]
## skip if missing
if (!is.null(vardef[[labdef]])) {
## subset lines with this definition
pattern <- paste0('\\b', labdef, '\\b')
vallab <- grep(pattern, vallabs, value = TRUE)
## get values
pattern <- paste0('label define ', labdef, ' +(-?\\w+).+')
values <- gsub(pattern, '\\1', vallab)
## convert values to class of variable...hacky fix here
suppressWarnings(class(values) <- class(df[[vardef[[labdef]]]]))
## get labels
pattern <- paste0('label define ', labdef, ' .+"(.+)" ?(, ?add ?)?')
labels <- gsub(pattern, '\\1', vallab)
## make list
labels <- setNames(values, labels)
## label values
df[[vardef[[labdef]]]] <- labelled(df[[vardef[[labdef]]]], labels)
}
}
## return dataframe
return(df)
}
assign_imp_labels <- function(df, label_vec) {
## find line numbers surrounding imputation values
line_no_start <- grep('imputation.*variable(s)?', label_vec) + 1
## if no imputation labels, exit
if (length(line_no_start) == 0) { return(df) }
line_no_stop <- grep('^tab\\b', label_vec)[[1]] - 1
labs <- label_vec[line_no_start:line_no_stop]
## get variables starting with 'x'
vars <- df %>% select(starts_with('x')) %>% names(.)
## make list of each impute value and label
values <- gsub('(\\w\\b).+', '\\1', labs)
labels <- gsub('\\w\\b (.+)', '\\1', labs)
labels <- setNames(values, labels)
## loop through each imputed variable
for (v in vars) {
if (class(df[[v]]) == class(values)) {
df[[v]] <- labelled(df[[v]], labels)
}
}
## return dataframe
return(df)
}
## -----------------------------------------------------------------------------
## RUN BY LOOPING THROUGH FILES
## -----------------------------------------------------------------------------
## get list of zip files
stata_zip <- grep('*_Data_Stata\\.zip', list.files(stata_ddir), value = TRUE)
stata_lab <- grep('_Stata\\.zip', list.files(labs_ddir), value = TRUE)
## if stata_ddir and labs_ddir are the same, subset
if (identical(stata_ddir, labs_ddir)) {
stata_lab <- stata_lab[!(stata_lab %in% stata_zip)]
}
## loop
for (i in 1:length(stata_zip)) {
f <- stata_zip[i]
## message
message(paste0('Working with: ', f))
## get basename
fname <- gsub('(^.+)_Data_Stata.zip', '\\1', f)
## get label file
lab_file <- grep(paste0('^', fname, '_Stata'), stata_lab, value = TRUE)
## skip if missing label file
if (length(lab_file) == 0) {
message(paste0(' NO LABEL FILE FOR: ', fname, ', skipping'))
next
}
## read in data
df <- read_zip(file.path(stata_ddir, f), 'csv', noisy) %>%
rename_all(tolower)
## get labels
labs <- read_labels(file.path(labs_ddir, lab_file))
## assign variable labels
df <- assign_var_labels(df, labs)
## assign value labels
df <- assign_val_labels(df, labs)
## assign imputation labels
df <- assign_imp_labels(df, labs)
## rename data frame to match file name
assign(tolower(fname), df)
## save
save(list = tolower(fname),
file = file.path(r_ddir, paste0(fname, '.Rdata')))
## garbage collect every 10 loops...may help...idk
if (i %% 10 == 0) { gc() }
}
## =============================================================================
## END SCRIPTstat
|
59906cff7bfc83840cee35012f7d3892b71d31b0 | 7f77551f86a4b5b9e6bacd39cacd6d170141c1fa | /1209/Lecture 7.R | 228100a3603955c5e0b46789822dcbc2e29a6c80 | [] | no_license | ocowchun/R_Computing-_for_Business_Data_Analytics | b96722ccd04c3d2a5c507694548c5764afb354e4 | 66276ec423f0cddf53215743019202546beef8dd | refs/heads/master | 2021-01-10T20:29:05.086711 | 2015-01-16T10:40:38 | 2015-01-16T10:40:38 | 24,643,703 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 5,597 | r | Lecture 7.R | #7.1
#
set.seed(3759)
b0=0.2
b1=0.5
n=1000
x=runif(n, -1, 1)
S=1000
par.est=matrix(NA, nrow=S, ncol=4)
for(s in 1:S){
y=rpois(n,exp(b0+b1*x))
model_glm=glm(y ~ x, family="poisson")
model_lm=lm(y ~ x)
par.est[s, 1]=model_glm$coef[1]
par.est[s, 2]=model_lm$coef[1]
par.est[s, 3]=model_glm$coef[2]
par.est[s, 4]=model_lm$coef[2]
}
#
dev.new(width=12, height=5)
par(mfrow=c(1,2))
hist(par.est[,3],main="Poisson Reg b1")
abline(v=b1,col='red',lwd=2)
hist(par.est[,4],main="Linear Reg b1")
abline(v=b1,col='red',lwd=2)
library(AER)
data(RecreationDemand)
head(RecreationDemand)
rd_Pois=glm(trips ~ ., data=RecreationDemand, family=poisson)
summary(rd_Pois)
#
library(arm)
attach(RecreationDemand)
#
SKI=ifelse(ski=="yes",1,0)
USERFEE=ifelse(userfee=="yes",1,0)
Poisloglikf=function(par){
lik=0
for(i in 1:nrow(RecreationDemand)){
lam=exp(1*par[1]+quality[i]*par[2]+SKI[i]*par[3]+
income[i]*par[4]+USERFEE[i]*par[5]+costC[i]*par[6]+
costS[i]*par[7]+costH[i]*par[8])
lik=lik+dpois(trips[i],lam,log=TRUE)
}
-lik
}
#
est=nlminb(c(1,rep(0.001,7)),Poisloglikf,control=list(trace=1))
#
summary(rd_Pois)
est$par
#
logLik(rd_Pois)
est$objective
#
#
var(trips)
mean(trips)
dispersiontest(rd_Pois)
#
library(MASS)
rd_NB=glm.nb(trips~.,data=RecreationDemand)
summary(rd_NB)
#
coeftest(rd_Pois)
coeftest(rd_NB)
#Zero-truncation
table(trips)[1:10]
table(round(fitted(rd_Pois)))[1:10]
table(round(fitted(rd_NB)))[1:10]
#
library(pscl)
rd_ziPois=zeroinfl(trips~.,data=RecreationDemand,dist="pois")
rd_ziNB=zeroinfl(trips~.|quality+income,data=RecreationDemand,dist="negbin")
round(colSums(predict(rd_ziPois,type="prob")[,1:10]))
round(colSums(predict(rd_ziNB,type="prob")[,1:10]))
table(trips)[1:10]
#
RD_NoZero=RecreationDemand[which(trips!=0),]
detach(RecreationDemand)
attach(RD_NoZero)
var(trips)/mean(trips)
table(trips)
#
library(VGAM)
rdnz_ztpois=vglm(trips~quality+ski+income+userfee+costC+costS+costH,
family=pospoisson,data=RD_NoZero)
rdnz_ztnb=vglm(trips~quality+ski+income+userfee+costC+costS+costH,
family=posnegbinomial,data=RD_NoZero)
summary(rdnz_ztnb)
#7.2
library(betareg)
data("GasolineYield",package="betareg")
head(GasolineYield)
gy.logit=betareg(yield~batch+temp,data=GasolineYield)
summary(gy.logit)
gy.logit2=betareg(yield~batch+temp|temp,data=GasolineYield)
summary(gy.logit2)
#
#
gy.probit=betareg(yield~batch+temp,data=GasolineYield,link="probit")
gy.loglog=betareg(yield~batch+temp,data=GasolineYield,link="loglog")
gy.cloglog=betareg(yield~batch+temp,data=GasolineYield,link="cloglog")
gy.cauchy=betareg(yield~batch+temp,data=GasolineYield,link="cauchit")
#
AIC(gy.logit,gy.logit2,gy.probit,gy.loglog,gy.cloglog,gy.cauchy)
#7.3
inv.logit=function(p){
return(exp(p)/(1+exp(p)))}
#
set.seed(32945)
b0=0.2
b1=0.5
n=1000
x=runif(n, -1, 1)
S=1000
par.est=matrix(NA, nrow=S, ncol=2)
for(s in 1:S){
y=rbinom(n, 1, inv.logit(b0+b1*x))
model_glm=glm(y ~ x, family=binomial(link=logit))
model_lm=lm(y ~ x)
par.est[s, 1]=model_glm$coef[2]
par.est[s, 2]=model_lm$coef[2]
}
#
dev.new(width=12, height=5)
par(mfrow=c(1,2))
hist(par.est[,1],main="Logistic Reg b1")
abline(v=b1,col='red',lwd=2)
hist(par.est[,2],main="Linear Reg b1",xlim=c(0,0.5))
abline(v=b1,col='red',lwd=2)
#
#
library(AER)
data("SwissLabor")
attach(SwissLabor)
plot(participation~age,ylevels=2:1)
plot(participation~education,ylevels=2:1)
#
swiss_logit=glm(participation~.+I(age^2),data=SwissLabor,
family=binomial(link="logit"))
swiss_probit=glm(participation~.+I(age^2),data=SwissLabor,
family=binomial(link="probit"))
#
swiss_logit0=update(swiss_logit,formula=.~1)
summary(swiss_logit0)
1-as.vector(logLik(swiss_logit)/logLik(swiss_logit0))
#
anova(swiss_logit0,swiss_logit,test="Chisq")
#
table(true=SwissLabor$participation,pred=round(fitted(swiss_logit)))
table(true=SwissLabor$participation,pred=round(fitted(swiss_probit)))
#
library(ROCR)
pred=prediction(fitted(swiss_probit),SwissLabor$participation)
#
dev.new(width=12,height=5)
par(mfrow=c(1,2))
plot(performance(pred,"acc"))
plot(performance(pred,"tpr","fpr"))
abline(0,1,lty=2)
#
swiss_cloglog=glm(participation~.+I(age^2),data=SwissLabor,
family=binomial(link="cloglog"))
pred2=prediction(fitted(swiss_cloglog),SwissLabor$participation)
plot(performance(pred2,"tpr","fpr"),col='red',lty=2,lwd=2)
plot(performance(pred,"tpr","fpr"),add=T)
#
#
#Complete separation
data("MurderRates")
murder_logit=glm(I(executions>0)~time+income+noncauc+lfp+southern,
data=MurderRates,family=binomial)
coeftest(murder_logit)
#
murder_logit2=glm(I(executions>0) ~ time + income + noncauc + lfp + southern,
data=MurderRates, family=binomial, control=list(epsilon=1e-15, maxit=50, trace=F))
coeftest(murder_logit2)
#
murder_logit3=glm(I(executions>0)~time+income+noncauc+lfp,
data=MurderRates,family=binomial)
coeftest(murder_logit3)
#7.4
library(quantreg)
data("CPS1988")
cps_f=log(wage) ~ experience + I(experience^2) + education
cps_lad=rq(cps_f, data=CPS1988)
cps_ols=lm(cps_f, data=CPS1988)
summary(cps_lad)
summary(cps_ols)
#
cps_rq=rq(cps_f, tau=c(0.25, 0.75), data=CPS1988)
summary(cps_rq)
#
cps_rq25=rq(cps_f, tau=0.25, data=CPS1988)
cps_rq75=rq(cps_f, tau=0.75, data=CPS1988)
anova(cps_rq25, cps_rq75)
#
cps_rqbig=rq(cps_f, tau=seq(0.05, 0.95, 0.05), data=CPS1988)
cps_rqbigs=summary(cps_rqbig)
plot(cps_rqbigs)
|
4e8abb9187d77aa051ad5d63b3e2f6d126a46bd5 | 77bfcf3f6d6e95df3dac88e31962e03182a5656d | /.R | b30f6a48fb5600ee0a8b6b185239aa78d029e63d | [] | no_license | ahaime/idealpointsElSalvador | be1924c625c36de024eccfd87b3195cfcb669e94 | b6749b31df4d39d433c1b4856992cc5747f76ea6 | refs/heads/master | 2021-01-23T21:23:49.121684 | 2018-02-28T22:21:35 | 2018-02-28T22:21:35 | 102,898,261 | 0 | 0 | null | 2018-02-28T21:47:25 | 2017-09-08T19:41:18 | R | UTF-8 | R | false | false | 3,197 | r | .R | library(pscl)
library (ggplot2)
##### Load the votes matrix (change the path accordingly in line 6) ##############
store<- read.csv("/Users/ah35/Google Drive/El Salvador roll calls/Data/Legislature 2012-2015/rollcalls_legislature2.csv")
store<- store[,-1]
store[ store == 9 ] <- NA
#### Dropping
#### Uncontested
#### Votes >2.5%
rollcall1<-data.frame(t(store[c(-1, -2)]))
yea<-nay<-c()
for(i in 1:length(rollcall1[,1])){
yea[i]<-sum(as.numeric(rollcall1[i,1:length(rollcall1[1,])]), na.rm=T)
nay[i]<-sum(as.numeric(rollcall1[i,1:length(rollcall1[1,])]==0), na.rm=T)
}
uncontested<-ifelse(yea<(yea+nay)*.025 | nay<(yea+nay)*.025, yes=0, no=1)
uncontested<-cbind(uncontested, rollcall1)
uncontested<-subset(uncontested, uncontested==1)
uncontested<- uncontested[!names(uncontested) %in% c("uncontested")]
uncontested<-data.frame(t(uncontested))
rollcall<-data.frame(cbind(store[c(1,2)], uncontested), row.names=NULL); rm(rollcall1)
######
#### Ideal-Points Estimation
######
###### Legislature 2012-2015 ####
store<- store [,-1]
store[ store == 9 ] <- NA
house.roll<-rollcall(data=store[,3:826], legis.names=store$legislator)
#### Exclude legislators with less than 25% votes ###
yea<-nay<-c()
for(i in 1:length(rollcall[,1])){
yea[i]<-sum(as.numeric(rollcall[i,3:length(rollcall[1,])])==1, na.rm=T)
nay[i]<-sum(as.numeric(rollcall[i,3:length(rollcall[1,])]==0), na.rm=T)
}
votes<-cbind(subset(store, select=c(legislator, party)), vote.count= yea+nay)
### Legislature 2012-2015
votes <- cbind(subset(votes, vote.count >= 13,
select=c(legislator, party)))
##### Getting eigenvalues and variance of each dimension for the legislature ###
library("wnominate")
ideal<- wnominate(house.roll, trials=3, polarity=c(1,1), dims=2)
plot.scree(ideal)
veigen<- cbind(1:length(ideal$eigenvalues), ideal$eigenvalues)
veigen<-data.frame(veigen)
ggplot(veigen, aes(X1, X2))+geom_point() +geom_line()+
geom_hline(yintercept=1)+ ylab("Eigenvalue")+ xlab("Number of Dimensions")+ ggtitle("12-15 Legislative Period")
ggsave("./images/eigenvalues_0813.pdf")
#### SHORT RUN Legislature 2012-2015
ideal.house<-ideal(house.roll, d=1, maxiter = 100, thin = 10, burnin = 10, verbose = TRUE)
plot(ideal.house)
#### FULL Run
ideal.house <- ideal(house.roll, d=1, maxiter = 1000000, thin = 1000, burnin = 100000, verbose = TRUE)
#### Saving Data in R format
setwd("/Users/ah35/Google Drive/El Salvador roll calls/Data/Legislature 2012-2015/ID Point Estimation")
######
#### Ideal Points: Store 2012-2015
######
rollcall.col<- votes[,c(1,2)]
id.points<-cbind(rollcall.col,
as.matrix(summary(ideal.house)[[c("xm")]]),
as.matrix(summary(ideal.house)[[c("xsd")]]),
matrix(summary(ideal.house)[[c("xHDR")]],ncol=2))
colnames(id.points)<-c("legislator", "party", "IP.mean","SD","CI.lower","CI.upper")
write.csv(id.points, file="/Users/ah35/Google Drive/El Salvador roll calls/Data/Legislature 2012-2015/ID Point Estimation/id.points.csv", na="", row.names=FALSE)
write.csv(rollcall.votes, file="/Users/ah35/Google Drive/El Salvador roll calls/Data/Legislature 2012-2015/ID Point Estimation/votesestimation.csv")
|
132b0ad639b9bfbfc01ac18fe9129318a97959b2 | d354983f75228b3aa82eea518f42de95e3fa32f7 | /functions/sort.R | b0639055ba1905dca056f724f021f7aba509dc02 | [] | no_license | ReneNyffenegger/about-r | f1f1d1f6c52f0446207978e78436ccbd91b89d20 | ae511ae632e1f8827cab91d4e36c1a9349fda5ab | refs/heads/master | 2022-01-14T10:19:45.230836 | 2021-12-27T19:50:37 | 2021-12-27T19:50:37 | 22,269,629 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 321 | r | sort.R | v <- c (5, 3, 6, 1, NA, 2, 7, 4 )
sort(v)
# [1] 1 2 3 4 5 6 7
sort(v, decreasing=TRUE)
# [1] 7 6 5 4 3 2 1
sort(v, na.last=TRUE)
# [1] 1 2 3 4 5 6 7 NA
sort(v, na.last=FALSE)
# [1] NA 1 2 3 4 5 6 7
sort(v, na.last=NA)
# [1] 1 2 3 4 5 6 7
is.unsorted(v)
# [1] NA
w<-sort(v)
is.unsorted(w)
# [1] FALSE
|
ca4a61144e718cf2061c153ec5d553c9eb9d765f | 8577e9d70acc9b3383684b8aedd5bdc36b72751d | /R/PasolliE_2018.R | 9fb95415f28ace99ce882a21471b5a0c3c046a39 | [
"Artistic-2.0"
] | permissive | Liuyangying/curatedMetagenomicData | f6a9bffb45e2e3b71d3429d798a8bcd2fe309aa2 | 9c5f2bfa7c15c839f30049a9ec41ca57c6393d3f | refs/heads/master | 2020-05-20T05:13:08.761177 | 2019-04-25T14:34:05 | 2019-04-25T14:34:05 | 185,399,986 | 1 | 0 | Artistic-2.0 | 2019-05-07T12:43:53 | 2019-05-07T12:43:52 | null | UTF-8 | R | false | false | 1,631 | r | PasolliE_2018.R | ## generated by make_data_documentation(): do not edit by hand
## see source in data-raw/make_data_documentation.R
#' Data from the PasolliE_2018 study
#'
#' @aliases PasolliE_2018.genefamilies_relab.stool PasolliE_2018.marker_abundance.stool PasolliE_2018.marker_presence.stool PasolliE_2018.metaphlan_bugs_list.stool PasolliE_2018.pathabundance_relab.stool PasolliE_2018.pathcoverage.stool
#'
#' @section Datasets:
#'
#' \subsection{PasolliE_2018.genefamilies_relab.stool}{
#' An ExpressionSet with 112 samples and 1,242,950 features specific to the stool body site
#' }
#'
#' \subsection{PasolliE_2018.marker_abundance.stool}{
#' An ExpressionSet with 112 samples and 87,770 features specific to the stool body site
#' }
#'
#' \subsection{PasolliE_2018.marker_presence.stool}{
#' An ExpressionSet with 112 samples and 82,466 features specific to the stool body site
#' }
#'
#' \subsection{PasolliE_2018.metaphlan_bugs_list.stool}{
#' An ExpressionSet with 112 samples and 1,111 features specific to the stool body site
#' }
#'
#' \subsection{PasolliE_2018.pathabundance_relab.stool}{
#' An ExpressionSet with 112 samples and 13,695 features specific to the stool body site
#' }
#'
#' \subsection{PasolliE_2018.pathcoverage.stool}{
#' An ExpressionSet with 112 samples and 13,695 features specific to the stool body site
#' }
#'
#' @section Source:
#'
#' \subsection{Title}{
#' NA
#' }
#'
#' \subsection{Author}{
#' NA
#' }
#'
#' \subsection{Lab}{
#' NA
#' }
#'
#' \subsection{PMID}{
#' NA
#' }
#'
#' @examples `PasolliE_2018.metaphlan_bugs_list.stool`()
#'
#' @name PasolliE_2018
NULL
|
02a7359f3ed5c7ce72b9f5130fc3bffac9c3aba0 | 2edcd98d334212b5bb8d49a39f961ab22950daaf | /R/personal-savings.R | c76ab2235eb015a47ac9bd12f9ebdf8489e71d9a | [] | no_license | davidallen02/personal-income-spending | 392004e36372e2af3f3d0430534f990fda4ea5ce | b51a0355bb87900b34201f94931f1a61cab383fc | refs/heads/master | 2023-01-04T09:47:15.959500 | 2020-10-28T19:10:24 | 2020-10-28T19:10:24 | 270,889,368 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 496 | r | personal-savings.R | library(magrittr)
dat <-pamngr::join_sheets(c("pidss", "pidsdi")) %>%
dplyr::mutate(savings_rate = pidss/pidsdi * 100) %>%
dplyr::select(dates, savings_rate) %>%
reshape2::melt(id.vars = "dates") %>%
dplyr::filter(dates >= as.POSIXct("2017-01-01"))
p <- dat %>%
pamngr::lineplot() %>%
pamngr::pam_plot(
plot_title = "Personal Savings Rate",
plot_subtitle = "Percent of Disposable Personal Income",
show_legend = FALSE
)
p %>% pamngr::all_output("personal-savings")
|
9d5d4da149171e9e0e4892ac1ce324fccdcd893c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/imagerExtra/examples/RestoreHue.Rd.R | 9cc2d015f13ebb911b125670fca23123ec0a9cf1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 268 | r | RestoreHue.Rd.R | library(imagerExtra)
### Name: RestoreHue
### Title: restore hue of color image
### Aliases: RestoreHue
### ** Examples
g <- Grayscale(boats)
hue <- GetHue(boats)
layout(matrix(1:2, 1, 2))
plot(g, main = "Original")
RestoreHue(g, hue) %>% plot(main="Resotred")
|
173cefbcbfd2ae236e7897d61d3603f52a35ae32 | 4d90245de35bc2df919f0b607803b4071d4f9456 | /man/get_results.Rd | 99d81657936d49f0006ad1e8fda14700c374e887 | [] | no_license | musically-ut/typeform | 1c41b9c07f95c7c3fbe4e4bf6d1ad5540b0a94da | 89c7017a64ef2655dc405fcf6ee2bd8659479759 | refs/heads/master | 2020-05-25T15:46:54.944109 | 2016-03-26T21:03:13 | 2016-03-26T21:03:13 | 54,839,427 | 2 | 0 | null | 2016-03-27T17:14:21 | 2016-03-27T17:14:20 | null | UTF-8 | R | false | true | 1,611 | rd | get_results.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_results.R
\name{get_results}
\alias{get_results}
\title{Download questionnaire results}
\usage{
get_results(uid, api, completed = NULL, since = NULL, until = NULL,
offset = NULL, limit = NULL, order_by = NULL, simplify = TRUE)
}
\arguments{
\item{uid}{The UID (unique identifier) of the typeform you want the results for.}
\item{api}{Your private api key.}
\item{completed, }{default \code{NULL}, return all results.
Fetch only completed results (\code{TRUE}), or only not-completed results
(=\code{FALSE}). If \code{NULL} return all results.}
\item{since, }{default \code{NULL}. Fetch only the results after a specific date and
time. If \code{NULL} return all results.}
\item{until, }{default \code{NULL}. Fetch only the results before a specific date and
time. If \code{NULL} return all results.}
\item{offset}{Fetch all results except the first \code{offset}.
i.e. Start listing results from result #\code{offset} onwards.}
\item{limit, }{default \code{NULL}. Fetch only \code{limit} results.
If \code{NULL} return all results.}
\item{order_by}{One of "completed", "date_land_desc", "date_land_incr",
"date_submit_desc", or "date_submit_incr".}
\item{simplify, }{Logical. By default, \code{TRUE}, and returns only the questionnaire
responses as a data frame. If \code{FALSE} return all results from the API call.}
}
\description{
Download results for a particular typeform questionnaire.
}
\examples{
\dontrun{
uid = "XXXX"
api = "YYYY"
get_results(uid, api)
}
}
\seealso{
https://www.typeform.com/help/data-api/
}
|
48397d7e5e3b8c1653b2ff338e5d1ea63a4aaead | 097e93a460f8a449fd3d2f2a9211a95546a4b8ab | /man/dggev.Rd | 9556134b924cbe3608cec86df7304f06c2ca314c | [] | no_license | cran/MCMC4Extremes | c589f1419caacffd076e9cacaf3296fb45ef038d | f4ac4f621d1c2527402c2ad7398587e1b1c0ab22 | refs/heads/master | 2020-04-15T17:29:27.007122 | 2016-07-14T07:18:48 | 2016-07-14T07:18:48 | 33,276,883 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,151 | rd | dggev.Rd | \name{dggev}
\alias{dggev}
\alias{pggev}
\alias{qggev}
\alias{rggev}
\title{
Dual Gamma Generalized Extreme Value Distribution
}
\description{
Cumulative probability, quantiles, density and random generation from the dual gamma generalized extreme value distribution.
}
\usage{
pggev(q, xi, mu, sigma, delta)
qggev(p, xi, mu, sigma, delta)
dggev(x, xi, mu, sigma, delta)
rggev(n, xi, mu, sigma, delta)
}
\arguments{
\item{q}{
vector of quantiles
}
\item{p}{
vector of probabilities
}
\item{x}{
vector of values at which to evaluate density
}
\item{n}{
sample size
}
\item{xi}{
shape parameter
}
\item{mu}{
location parameter
}
\item{sigma}{
scale parameter
}
\item{delta}{
additional shape parameter of GGEV extension
}
}
\value{
Probability (\code{pggev}), quantile (\code{qggev}), density (\code{dggev}) or random sample (\code{rggev}) for the GGEV distribution.
}
\references{
Nascimento, F. F.; Bourguigon, M. ; Leao, J. S. (2015). Extended generalized extreme value distribution with applications in environmental data. \emph{HACET J MATH STAT}.
}
\seealso{
\code{\link{ggevp}}
}
|
87dd0fb378b4a12a5d9bccf054ce9ddc69cd1625 | d040574327243ddb8712f28b99d51c390257c896 | /man/intensity_plot.Rd | 91c9b110ee9c6d16c266af68d0685de1ffaaa108 | [
"MIT"
] | permissive | faye-yang/MSPTM | 646fbfce7e900628edd515f29a07cea7880f4a3c | b7974dd46450221480092a0ec51c490b3231673e | refs/heads/master | 2020-03-30T17:37:09.179322 | 2018-12-06T01:12:07 | 2018-12-06T01:12:07 | 151,462,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,043 | rd | intensity_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{intensity_plot}
\alias{intensity_plot}
\title{\code{intensity_plot} output graph of intensity of amino acid.}
\usage{
intensity_plot(data, modification, mZmarker_ions, search_engine)
}
\arguments{
\item{data}{mass spetrometry information for the peptide}
\item{modification}{contain modification information , intensity of ion, amino acide that is modified}
\item{mZmarker_ions}{maker ion}
\item{search_engine}{can be Mascot or Tandem}
}
\description{
\code{intensity_plot} output graph of intensity of amino acid.
}
\examples{
modification<-data.frame("type"=c("Carbamidomethyl","Oxidation"),
"monomass"=c(57.022, 16.0), "AA"=c("C","M"))
result.file <- "/Users/yufei/Desktop/2018fall/BCB410/MSPTM/inst/extdata/output_mouse.2018_12_04_19_57_17.t.xml"
uids<-c(12,2,731)
library(rTANDEM)
result <- GetResultsFromXML(result.file)
data<-tandem_get_data(result,modification,uids)
intensity_plot(data,modification,mZmarker_ions, search_engine="Tandem")
}
|
94a358108caa0fe6e73048cde5a737ed97a2ef38 | e60ceb3886665655c29a00571156050b2a8d813e | /milestone.R | 0d4891be151abbef3a7c3a32bf4a08cb9e8223ac | [] | no_license | LarionovaAnna/PredictNextWord | 22a964e8599f02213b672fb67b2611321accb3f7 | 029cda58b4b5c68ca6cd70aeb1b3e1c9cb2011dc | refs/heads/master | 2020-04-05T00:04:13.317424 | 2015-08-19T17:48:43 | 2015-08-19T17:53:11 | 41,050,150 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,098 | r | milestone.R | #Libraries loading
library(tm)
library(rJava)
library(RWeka)
library(SnowballC)
library(wordcloud)
library(ggplot2)
library(tm.plugin.webmining)
library(stringr)
#Data loading
news <- readLines("en_US/en_US.news.txt", warn = FALSE, n = 50000)
blogs <- readLines("en_US/en_US.blogs.txt", warn = FALSE, n = 50000)
twitter <- readLines("en_US/en_US.twitter.txt", warn = FALSE, n = 50000)
#Data processing
##Convert documents into a corpus
news_vec <- VectorSource(news)
news_corpus <- Corpus(news_vec)
blogs_vec <- VectorSource(blogs)
blogs_corpus <- Corpus(blogs_vec)
twitter_vec <- VectorSource(twitter)
twitter_corpus <- Corpus(twitter_vec)
corpus <- sample(c(news_corpus, blogs_corpus, twitter_corpus), 150000, replace = FALSE)
##Transforming data
###convert to lower case
corpus <- tm_map(corpus, content_transformer(tolower))
###remove numbers
corpus <- tm_map(corpus, removeNumbers)
###remove punctuation
corpus <- tm_map(corpus, removePunctuation)
###remove non-ASCII
corpus <- tm_map(corpus, removeNonASCII)
###remove stopwords
#corpus <- tm_map(corpus, removeWords, stopwords("english"))
###remove profane words
#
###remove whitespaces (for all text)
corpus <- tm_map(corpus, stripWhitespace)
###transform to PlainTextDocument
#corpus <- tm_map(corpus, PlainTextDocument)
##Build TDM
#corpus_tdm <- DocumentTermMatrix(corpus)
UnigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
BigramTokenizer <- function(x) RWeka::NGramTokenizer(x, RWeka::Weka_control(min = 2, max = 2))
TrigramTokenizer <- function(x) RWeka::NGramTokenizer(x, RWeka::Weka_control(min = 3, max = 3))
FourgramTokenizer <- function(x) RWeka::NGramTokenizer(x, RWeka::Weka_control(min = 4, max = 4))
tdm_t1 <- TermDocumentMatrix(corpus, control = list(tokenize = UnigramTokenizer))
tdm_t2 <- TermDocumentMatrix(corpus, control = list(tokenize = BigramTokenizer))
tdm_t3 <- TermDocumentMatrix(corpus, control = list(tokenize = TrigramTokenizer))
tdm_t4 <- TermDocumentMatrix(corpus, control = list(tokenize = FourgramTokenizer))
freq_unig <- findFreqTerms(tdm_t1, 5000)
freq_big <- findFreqTerms(tdm_t2, 500)
freq_trig <- findFreqTerms(tdm_t3, 50)
freq_fourg <- findFreqTerms(tdm_t4, 50)
frequency_unig <- rowSums(as.matrix(tdm_t1[freq_unig,]))
frequency_unig <- data.frame(unigram = names(frequency_unig), frequency = frequency_unig)
frequency_unig <- frequency_unig[with(frequency_unig, order(frequency, decreasing = TRUE)),]
frequency_big <- rowSums(as.matrix(tdm_t2[freq_big,]))
frequency_big <- data.frame(bigram = names(frequency_big), frequency = frequency_big)
frequency_big <- frequency_big[with(frequency_big, order(frequency, decreasing = TRUE)),]
frequency_trig <- rowSums(as.matrix(tdm_t3[freq_trig,]))
frequency_trig <- data.frame(trigram = names(frequency_trig), frequency = frequency_trig)
frequency_trig <- frequency_trig[with(frequency_trig, order(frequency, decreasing = TRUE)),]
frequency_fourg <- rowSums(as.matrix(tdm_t4[freq_fourg,]))
frequency_fourg <- data.frame(fourgram = names(frequency_fourg), frequency = frequency_fourg)
frequency_fourg <- frequency_fourg[with(frequency_fourg, order(frequency, decreasing = TRUE)),]
###Probability of ngrams
frequency_unig$prob <- frequency_unig$frequency/sum(frequency_unig$frequency)
frequency_big$prob <- frequency_big$frequency/sum(frequency_big$frequency)
frequency_trig$prob <- frequency_trig$frequency/sum(frequency_trig$frequency)
frequency_fourg$prob <- frequency_fourg$frequency/sum(frequency_fourg$frequency)
#wc_big <- wordcloud(freq_big, max.words = 200, random.order = FALSE, rot.per = 0.35, colors = brewer.pal(8, "Dark2"))
#Plotting frequency of Ngrams
##Plotting frequency of unigrams
plot_freq_unig <- ggplot(frequency_unig,
aes(x = reorder(unigram, frequency), y = frequency)) +
geom_bar(stat = "identity") +
xlab("Unigram") + ylab("Frequency") +
labs(title = "Top Unigrams by Frequency") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(plot_freq_unig)
##Plotting frequency of bigrams
plot_freq_big <- ggplot(frequency_big,
aes(x = reorder(bigram, frequency), y = frequency)) +
geom_bar(stat = "identity") +
xlab("Bigram") + ylab("Frequency") +
labs(title = "Top Bigrams by Frequency") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(plot_freq_big)
##Plotting frequency of trigrams
plot_freq_trig <- ggplot(frequency_trig,
aes(x = reorder(trigram, frequency), y = frequency)) +
geom_bar(stat = "identity") +
xlab("Trigram") + ylab("Frequency") +
labs(title = "Top Trigrams by Frequency") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(plot_freq_trig)
##Plotting frequency of fourgrams
plot_freq_fourg <- ggplot(frequency_fourg,
aes(x = reorder(fourgram, frequency), y = frequency)) +
geom_bar(stat = "identity") +
xlab("Fourgram") + ylab("Frequency") +
labs(title = "Top Fourgrams by Frequency") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(plot_freq_fourg)
|
a957f1d54505439a39d10830967203759e001208 | 7bb3f64824627ef179d5f341266a664fd0b69011 | /Numerical_Methods_by_E_Balaguruswamy/CH6/EX6.7/Ex6_7.R | 8212cef0e10ea67575e641ef555908aa3cddb87b | [
"MIT"
] | permissive | prashantsinalkar/R_TBC_Uploads | 8bd0f71834814b1d03df07ce90b2eae3b7d357f8 | b3f3a8ecd454359a2e992161844f2fb599f8238a | refs/heads/master | 2020-08-05T23:06:09.749051 | 2019-10-04T06:54:07 | 2019-10-04T06:54:07 | 212,746,586 | 0 | 0 | MIT | 2019-10-04T06:03:49 | 2019-10-04T06:03:48 | null | UTF-8 | R | false | false | 1,322 | r | Ex6_7.R | # Example 7 Chapter 6 Page no.: 147
# Newton-Raphson Method
install.packages("numDeriv")
library("numDeriv")
# Given Function
u <- function(x) {
x^2-3*x+2
}
curve(u, xlim=c(-5,5), col='blue', lwd=2, lty=2, ylab='f(x)')
abline(h=0)
abline(v=0)
# From the curve the points in the vicinity are noted
a <- 0
b <-1
newton.raphson <- function(f, a, b, tol = 1e-5, n = 1000) {
require(numDeriv) # Package for computing f'(x)
x0 <- a # Set start value to supplied lower bound
k <- n # Initialize for iteration results
# Check the upper and lower bounds to see if approximations result in 0
fa <- f(a)
if (fa == 0.0) {
return(a)
}
fb <- f(b)
if (fb == 0.0) {
return(b)
}
for (i in 1:n) {
dx <- genD(func = f, x = x0)$D[1] # First-order derivative f'(x0)
x1 <- x0 - (f(x0) / dx)
k[i] <- x1
# Checking difference between values
if (abs(x1 - x0) < tol) {
root.approx <- tail(k, n=1)
res <- list('root approximation' = root.approx, 'iterations' = k)
return(res)
}
# If Newton-Raphson has not yet reached convergence set x1 as x0 and continue
x0 <- x1
}
print('Too many iterations in method')
}
N <- newton.raphson(u, a, b)
cat("The root closer to the point x=0 is",N) |
aedda7703c31f93cd0a43e6c127994a4e0247288 | f317887c7d83e62235ba2cf19065dcef9244f645 | /man/summary.pltdTable.Rd | 6b00f4c527bf427203f65c73f4d09589fcca8dad | [] | no_license | rrprf/tablesgg | 3fec64842266f8a7f28e29899d31c673b5dad09c | 1a60f894869326b34eff1804c9378a1c05e78a79 | refs/heads/master | 2023-05-07T14:12:05.102317 | 2021-06-03T14:45:34 | 2021-06-03T14:45:34 | 318,291,905 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,322 | rd | summary.pltdTable.Rd | % Auto-generated documentation for function summary.pltdTable
% 2021-06-02 11:12:19
\name{summary.pltdTable}
\alias{summary.pltdTable}
\title{Summarize the Dimensions and Options of a Plotted Table }
\description{
Summarize the dimensions and display options of a plotted table.
}
\usage{
\method{summary}{pltdTable}(object, ...)
}
\arguments{
\item{object}{A \code{pltdTable} object, a plotted 2D data summary table.
}
\item{...}{Additional arguments, ignored with a warning. (Included for compatibility
with the generic.)
}
}
\value{
An object of S3 class \code{summary.pltdTable}. It is a list with
components
\item{adim}{Dimensions of the augmented row-column grid for the table. See
\code{?adim} for details about this grid.
}
\item{parts}{Data frame with one row for each table part, giving the dimensions of the
part, in columns \code{nr}, \code{nc}.
}
\item{mergeRuns, rowheadInside, rowgroupSize, scale, plot.margin, sizeAdjust}{Display options used by the table. See \code{plot.textTable} for their
meaning.
}
}
\details{
There is a print method for objects of the returned class.
}
\seealso{
\code{\link{adim}}, \code{\link{plot.textTable}}
}
\examples{
ttbl <- textTable(iris2_tab, title="Summary statistics for the iris data")
plt <- plot(ttbl, rowheadInside=TRUE)
summary(plt)
}
|
26d75666b91c1eabda23abd4c78bc0774f02ee29 | f9bc24751d593694fbc98648519df43c70d253ee | /inst/unitTests/test_synapseLogin.R | 0e0a41ef7c0129f2c5160525eaf368671374d58b | [] | no_license | brian-bot/rSynapseClient | cf607b242fa292902f832d6a5ecffceeba80eaef | cef1a6bb1f28034a9de826f3e92f1b1139e56c61 | refs/heads/master | 2020-04-05T22:52:30.912248 | 2017-04-28T17:45:58 | 2017-04-28T17:45:58 | 3,354,254 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,659 | r | test_synapseLogin.R | ## Unit test synapseLogin
##
## Author: Matthew D. Furia <matt.furia@sagebase.org>
###############################################################################
.setUp <-
function()
{
synapseClient:::.setCache('oldSessionToken', synapseClient:::.getCache("sessionToken"))
synapseClient:::.setCache('oldHmacKey', synapseClient:::.getCache("hmacSecretKey"))
synapseClient:::sessionToken(NULL)
hmacSecretKey(NULL)
# Disable the welcome message
synapseClient:::.mock(".doWelcome", function(...) {})
}
.tearDown <- function() {
synapseClient:::sessionToken(synapseClient:::.getCache('oldSessionToken'))
hmacSecretKey(synapseClient:::.getCache('oldHmacKey'))
synapseClient:::.unmockAll()
}
unitTestNotLoggedInHmac <- function() {
gotException <- FALSE
tryCatch(getEntity(Project(list(id='bar'))),
error = function(e) {
gotException <<- TRUE
checkTrue(grepl("Please authenticate", e))
}
)
checkTrue(gotException)
}
unitTestDoAuth_username_password <- function() {
## TODO: remove this test? It doesn't test much.
credentials = list(username="foo", password="bar", sessionToken="", apiKey="")
# These will be called if the logic is correct
getSessionToken_called <- FALSE
doHmac_called <- FALSE
synapseClient:::.mock(".getSessionToken", function(...) {getSessionToken_called <<- TRUE})
synapseClient:::.mock(".doHmac", function(...) {doHmac_called <<- TRUE})
# These will not be called if the logic is correct
hmacSecretKey_called <- FALSE
refreshSessionToken_called <- FALSE
readSessionCache_called <- FALSE
synapseClient:::.mock("hmacSecretKey", function(...) {hmacSecretKey_called <<- TRUE})
synapseClient:::.mock(".refreshSessionToken", function(...) {refreshSessionToken_called <<- TRUE})
synapseClient:::.mock(".readSessionCache", function(...) {readSessionCache_called <<- TRUE})
# Perform the call and check
synapseClient:::.doAuth(credentials)
checkTrue(getSessionToken_called)
checkTrue(doHmac_called)
checkTrue(!hmacSecretKey_called)
checkTrue(!refreshSessionToken_called)
checkTrue(!readSessionCache_called)
}
unitTestDoAuth_most_recent <- function() {
credentials = list(username="", password="", sessionToken="", apiKey="")
# These will be called if the logic is correct
readSessionCache_called <- FALSE
userName_called <- FALSE
hmacSecretKey_called <- FALSE
synapseClient:::.mock(".readSessionCache",
function(...) {
readSessionCache_called <<- TRUE
username <- "foo"
json <- list()
json[[username]] <- "api key"
json[['<mostRecent>']] <- "foo"
return(json)
}
)
synapseClient:::.mock("userName", function(...) {userName_called <<- TRUE})
synapseClient:::.mock("hmacSecretKey", function(...) {hmacSecretKey_called <<- TRUE})
# These will not be called if the logic is correct
configParser_called <- FALSE
synapseClient:::.mock("ConfigParser", function(...) {configParser_called <<- TRUE})
synapseClient:::.doAuth(credentials)
checkTrue(readSessionCache_called)
checkTrue(userName_called)
checkTrue(hmacSecretKey_called)
checkTrue(!configParser_called)
}
unitTestDoAuth_session_and_config <- function() {
credentials = list(username="", password="", sessionToken="", apiKey="")
# These will be called if the logic is correct
readSessionCache_called <- FALSE
configParser_called <- FALSE
hasOption_called_correctly <- FALSE
synapseClient:::.mock(".readSessionCache",
function(...) {
readSessionCache_called <<- TRUE
return(list(foo="api key"))
}
)
synapseClient:::.mock("ConfigParser", function(...) {configParser_called <<- TRUE})
synapseClient:::.mock("Config.hasOption",
function(ignore, section, option) {
hasOption_called_correctly <<- all(section == "authentication" && option == "username")
return(hasOption_called_correctly)
}
)
synapseClient:::.mock("Config.getOption",
function(ignore, section, option) {
if (all(section == "authentication" && option == "username")) {
return("foo")
}
stop(sprintf("Incorrect arguments to Config.getOption: %s, %s", section, option))
}
)
synapseClient:::.doAuth(credentials)
checkTrue(readSessionCache_called)
checkTrue(configParser_called)
checkTrue(hasOption_called_correctly)
}
unitTest_logout <- function() {
synapseClient:::userName("foo")
synapseClient:::sessionToken("bar")
synapseClient:::hmacSecretKey("baz")
synapseDelete_called <- FALSE
synapseClient:::.mock("synapseDelete", function(...) {synapseDelete_called <<- TRUE})
synapseLogout(silent=TRUE)
checkTrue(is.null(synapseClient:::userName()))
checkTrue(is.null(synapseClient:::sessionToken()))
checkTrue(class(try(synapseClient:::hmacSecretKey(), silent=TRUE)) == "try-error")
checkTrue(synapseDelete_called)
# Try again without the session token
synapseClient:::userName("foo")
synapseClient:::hmacSecretKey("baz")
synapseDelete_called <- FALSE
synapseLogout(silent=TRUE)
checkTrue(is.null(synapseClient:::userName()))
checkTrue(is.null(synapseClient:::sessionToken()))
checkTrue(class(try(synapseClient:::hmacSecretKey(), silent=TRUE)) == "try-error")
checkTrue(!synapseDelete_called)
}
unitTest_loginNoConfigFile <- function() {
credentials = list(username="", password="", sessionToken="", apiKey="")
readSessionCache_called <- FALSE
synapseClient:::.mock(".readSessionCache",
function(...) {
readSessionCache_called <<- TRUE
return(list(totally="ignored"))
}
)
configParser_called <- FALSE
synapseClient:::.mock("ConfigParser",
function(...) {
configParser_called <<- TRUE
synapseClient:::.getMockedFunction("ConfigParser")()
}
)
synapseClient:::.mock(".checkAndReadFile", function(...) {stop("Mwhaha! No config file here!")})
doTkLogin_called <- FALSE
doTerminalLogin_called <- FALSE
synapseClient:::.mock(".doTkLogin", function(...) {doTkLogin_called <<- TRUE})
synapseClient:::.mock(".doTerminalLogin", function(...) {doTerminalLogin_called <<- TRUE})
synapseClient:::.doAuth(credentials)
checkTrue(readSessionCache_called)
checkTrue(configParser_called)
checkTrue(doTkLogin_called || doTerminalLogin_called)
}
|
3496f42f06ed6ac3758c7cd508d655638fc0f0e3 | 3a5b24af385e8bd09526d4742c81bc3a2e01be4e | /R/visualization.R | 26ad81f99a064ef4b9de7d75263de9a09a3e8292 | [] | no_license | teryanarmen/CellChat | 019aa5099f53518aef45b3c1bf8a7cdc8370b2a2 | 56ac7b92718517ab5f9cddb80ca859d6ae29bf30 | refs/heads/master | 2023-03-29T19:42:54.226398 | 2021-04-08T19:05:21 | 2021-04-08T19:05:21 | 356,020,334 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 185,166 | r | visualization.R | #' ggplot theme in CellChat
#'
#' @return
#' @export
#'
#' @examples
#' @importFrom ggplot2 theme_classic element_rect theme element_blank element_line element_text
CellChat_theme_opts <- function() {
theme(strip.background = element_rect(colour = "white", fill = "white")) +
theme_classic() +
theme(panel.border = element_blank()) +
theme(axis.line.x = element_line(color = "black")) +
theme(axis.line.y = element_line(color = "black")) +
theme(panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank()) +
theme(panel.grid.major.x = element_blank(), panel.grid.major.y = element_blank()) +
theme(panel.background = element_rect(fill = "white")) +
theme(legend.key = element_blank()) + theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5))
}
#' Generate ggplot2 colors
#'
#' @param n number of colors to generate
#' @importFrom grDevices hcl
#' @export
#'
ggPalette <- function(n) {
hues = seq(15, 375, length = n + 1)
grDevices::hcl(h = hues, l = 65, c = 100)[1:n]
}
#' Generate colors from a customed color palette
#'
#' @param n number of colors
#'
#' @return A color palette for plotting
#' @importFrom grDevices colorRampPalette
#'
#' @export
#'
scPalette <- function(n) {
colorSpace <- c('#E41A1C','#377EB8','#4DAF4A','#984EA3','#F29403','#F781BF','#BC9DCC','#A65628','#54B0E4','#222F75','#1B9E77','#B2DF8A',
'#E3BE00','#FB9A99','#E7298A','#910241','#00CDD1','#A6CEE3','#CE1261','#5E4FA2','#8CA77B','#00441B','#DEDC00','#B3DE69','#8DD3C7','#999999')
if (n <= length(colorSpace)) {
colors <- colorSpace[1:n]
} else {
colors <- grDevices::colorRampPalette(colorSpace)(n)
}
return(colors)
}
#' Visualize the inferred cell-cell communication network
#'
#' Automatically save plots in the current working directory.
#'
#' @param object CellChat object
#' @param signaling a signaling pathway name
#' @param signaling.name alternative signaling pathway name to show on the plot
#' @param color.use the character vector defining the color of each cell group
#' @param vertex.receiver a numeric vector giving the index of the cell groups as targets in the first hierarchy plot
#' @param top the fraction of interactions to show (0 < top <= 1)
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param remove.isolate whether remove the isolate nodes in the communication network
#' @param weight.scale whether scale the edge weight
#' @param vertex.weight The weight of vertex: either a scale value or a vector
#' @param vertex.weight.max the maximum weight of vertex; defualt = max(vertex.weight)
#' @param vertex.size.max the maximum vertex size for visualization
#' @param edge.weight.max.individual the maximum weight of edge when plotting the individual L-R netwrok; defualt = max(net)
#' @param edge.weight.max.aggregate the maximum weight of edge when plotting the aggregated signaling pathway network
#' @param edge.width.max The maximum edge width for visualization
#' @param layout "hierarchy", "circle" or "chord"
#' @param height height of plot
#' @param thresh threshold of the p-value for determining significant interaction
#' @param pt.title font size of the text
#' @param title.space the space between the title and plot
#' @param vertex.label.cex The label size of vertex in the network
#' @param out.format the format of output figures: svg, png and pdf
#' @param from,to,bidirection Deprecated. Use `sources.use`,`targets.use`
#' @param vertex.size Deprecated. Use `vertex.weight`
#'
#' Parameters below are set for "chord" diagram. Please also check the function `netVisual_chord_cell` for more parameters.
#' @param group A named group labels for making multiple-group Chord diagrams. The sector names should be used as the names in the vector.
#' The order of group controls the sector orders and if group is set as a factor, the order of levels controls the order of groups.
#' @param cell.order a char vector defining the cell type orders (sector orders)
#' @param small.gap Small gap between sectors.
#' @param big.gap Gap between the different sets of sectors, which are defined in the `group` parameter
#' @param scale scale each sector to same width; default = FALSE; however, it is set to be TRUE when remove.isolate = TRUE
#' @param reduce if the ratio of the width of certain grid compared to the whole circle is less than this value, the grid is removed on the plot. Set it to value less than zero if you want to keep all tiny grid.
#' @param show.legend whether show the figure legend
#' @param legend.pos.x,legend.pos.y adjust the legend position
#' @param nCol number of columns when displaying the network mediated by ligand-receptor using "circle" or "chord"
#'
#' @param ... other parameters (e.g.,vertex.label.cex, vertex.label.color, alpha.edge, label.edge, edge.label.color, edge.label.cex, edge.curved)
#' passing to `netVisual_hierarchy1`,`netVisual_hierarchy2`,`netVisual_circle`. NB: some parameters might be not supported
#' @importFrom svglite svglite
#' @importFrom grDevices dev.off pdf
#'
#' @return
#' @export
#'
#' @examples
#'
netVisual <- function(object, signaling, signaling.name = NULL, color.use = NULL, vertex.receiver = NULL, sources.use = NULL, targets.use = NULL, top = 1, remove.isolate = FALSE,
vertex.weight = NULL, vertex.weight.max = NULL, vertex.size.max = 15,
weight.scale = TRUE, edge.weight.max.individual = NULL, edge.weight.max.aggregate = NULL, edge.width.max=8,
layout = c("hierarchy","circle","chord"), height = 5, thresh = 0.05, pt.title = 12, title.space = 6, vertex.label.cex = 0.8,from = NULL, to = NULL, bidirection = NULL,vertex.size = NULL,
out.format = c("svg","png"),
group = NULL,cell.order = NULL,small.gap = 1, big.gap = 10, scale = FALSE, reduce = -1, show.legend = FALSE, legend.pos.x = 20,legend.pos.y = 20, nCol = NULL,
...) {
layout <- match.arg(layout)
if (!is.null(vertex.size)) {
warning("'vertex.size' is deprecated. Use `vertex.weight`")
}
if (is.null(vertex.weight)) {
vertex.weight <- as.numeric(table(object@idents))
}
pairLR <- searchPair(signaling = signaling, pairLR.use = object@LR$LRsig, key = "pathway_name", matching.exact = T, pair.only = F)
if (is.null(signaling.name)) {
signaling.name <- signaling
}
net <- object@net
pairLR.use.name <- dimnames(net$prob)[[3]]
pairLR.name <- intersect(rownames(pairLR), pairLR.use.name)
pairLR <- pairLR[pairLR.name, ]
prob <- net$prob
pval <- net$pval
prob[pval > thresh] <- 0
if (length(pairLR.name) > 1) {
pairLR.name.use <- pairLR.name[apply(prob[,,pairLR.name], 3, sum) != 0]
} else {
pairLR.name.use <- pairLR.name[sum(prob[,,pairLR.name]) != 0]
}
if (length(pairLR.name.use) == 0) {
stop(paste0('There is no significant communication of ', signaling.name))
} else {
pairLR <- pairLR[pairLR.name.use,]
}
nRow <- length(pairLR.name.use)
prob <- prob[,,pairLR.name.use]
pval <- pval[,,pairLR.name.use]
if (is.null(nCol)) {
nCol <- min(length(pairLR.name.use), 2)
}
if (length(dim(prob)) == 2) {
prob <- replicate(1, prob, simplify="array")
pval <- replicate(1, pval, simplify="array")
}
# prob <-(prob-min(prob))/(max(prob)-min(prob))
if (is.null(edge.weight.max.individual)) {
edge.weight.max.individual = max(prob)
}
prob.sum <- apply(prob, c(1,2), sum)
# prob.sum <-(prob.sum-min(prob.sum))/(max(prob.sum)-min(prob.sum))
if (is.null(edge.weight.max.aggregate)) {
edge.weight.max.aggregate = max(prob.sum)
}
if (layout == "hierarchy") {
if (is.element("svg", out.format)) {
svglite::svglite(file = paste0(signaling.name, "_hierarchy_individual.svg"), width = 8, height = nRow*height)
par(mfrow=c(nRow,2), mar = c(5, 4, 4, 2) +0.1)
for (i in 1:length(pairLR.name.use)) {
#signalName_i <- paste0(pairLR$ligand[i], "-",pairLR$receptor[i], sep = "")
signalName_i <- pairLR$interaction_name_2[i]
prob.i <- prob[,,i]
netVisual_hierarchy1(prob.i, vertex.receiver = vertex.receiver, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.individual, edge.width.max=edge.width.max, title.name = signalName_i, vertex.label.cex = vertex.label.cex,...)
netVisual_hierarchy2(prob.i, vertex.receiver = setdiff(1:nrow(prob.i),vertex.receiver), sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.individual, edge.width.max=edge.width.max, title.name = signalName_i, vertex.label.cex = vertex.label.cex,...)
}
dev.off()
}
if (is.element("png", out.format)) {
grDevices::png(paste0(signaling.name, "_hierarchy_individual.png"), width = 8, height = nRow*height, units = "in",res = 300)
par(mfrow=c(nRow,2), mar = c(5, 4, 4, 2) +0.1)
for (i in 1:length(pairLR.name.use)) {
signalName_i <- pairLR$interaction_name_2[i]
prob.i <- prob[,,i]
netVisual_hierarchy1(prob.i, vertex.receiver = vertex.receiver, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.individual, edge.width.max=edge.width.max, title.name = signalName_i, vertex.label.cex = vertex.label.cex,...)
netVisual_hierarchy2(prob.i, vertex.receiver = setdiff(1:nrow(prob.i),vertex.receiver), sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max =edge.weight.max.individual, edge.width.max=edge.width.max, title.name = signalName_i, vertex.label.cex = vertex.label.cex,...)
}
dev.off()
}
if (is.element("pdf", out.format)) {
# grDevices::pdf(paste0(signaling.name, "_hierarchy_individual.pdf"), width = 8, height = nRow*height)
grDevices::cairo_pdf(paste0(signaling.name, "_hierarchy_individual.pdf"), width = 8, height = nRow*height)
par(mfrow=c(nRow,2), mar = c(5, 4, 4, 2) +0.1)
for (i in 1:length(pairLR.name.use)) {
signalName_i <- pairLR$interaction_name_2[i]
prob.i <- prob[,,i]
netVisual_hierarchy1(prob.i, vertex.receiver = vertex.receiver, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.individual, edge.width.max=edge.width.max, title.name = signalName_i, vertex.label.cex = vertex.label.cex,...)
netVisual_hierarchy2(prob.i, vertex.receiver = setdiff(1:nrow(prob.i),vertex.receiver), sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max =edge.weight.max.individual, edge.width.max=edge.width.max, title.name = signalName_i, vertex.label.cex = vertex.label.cex,...)
}
dev.off()
}
if (is.element("svg", out.format)) {
svglite::svglite(file = paste0(signaling.name, "_hierarchy_aggregate.svg"), width = 7, height = 1*height)
par(mfrow=c(1,2), ps = pt.title)
netVisual_hierarchy1(prob.sum, vertex.receiver = vertex.receiver, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.aggregate, edge.width.max=edge.width.max,title.name = NULL, vertex.label.cex = vertex.label.cex,...)
netVisual_hierarchy2(prob.sum, vertex.receiver = setdiff(1:nrow(prob.sum),vertex.receiver), sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.aggregate, edge.width.max=edge.width.max,title.name = NULL, vertex.label.cex = vertex.label.cex,...)
graphics::mtext(paste0(signaling.name, " signaling pathway network"), side = 3, outer = TRUE, cex = 1, line = -title.space)
dev.off()
}
if (is.element("png", out.format)) {
grDevices::png(paste0(signaling.name, "_hierarchy_aggregate.png"), width = 7, height = 1*height, units = "in",res = 300)
par(mfrow=c(1,2), ps = pt.title)
netVisual_hierarchy1(prob.sum, vertex.receiver = vertex.receiver, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.aggregate, edge.width.max=edge.width.max, title.name = NULL, vertex.label.cex = vertex.label.cex,...)
netVisual_hierarchy2(prob.sum, vertex.receiver = setdiff(1:nrow(prob.sum),vertex.receiver), sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.aggregate, edge.width.max=edge.width.max,title.name = NULL, vertex.label.cex = vertex.label.cex,...)
graphics::mtext(paste0(signaling.name, " signaling pathway network"), side = 3, outer = TRUE, cex = 1, line = -title.space)
dev.off()
}
if (is.element("pdf", out.format)) {
# grDevices::pdf(paste0(signaling.name, "_hierarchy_aggregate.pdf"), width = 7, height = 1*height)
grDevices::cairo_pdf(paste0(signaling.name, "_hierarchy_aggregate.pdf"), width = 7, height = 1*height)
par(mfrow=c(1,2), ps = pt.title)
netVisual_hierarchy1(prob.sum, vertex.receiver = vertex.receiver, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.aggregate, edge.width.max=edge.width.max, title.name = NULL, vertex.label.cex = vertex.label.cex,...)
netVisual_hierarchy2(prob.sum, vertex.receiver = setdiff(1:nrow(prob.sum),vertex.receiver), sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.aggregate, edge.width.max=edge.width.max, title.name = NULL, vertex.label.cex = vertex.label.cex,...)
graphics::mtext(paste0(signaling.name, " signaling pathway network"), side = 3, outer = TRUE, cex = 1, line = -title.space)
dev.off()
}
} else if (layout == "circle") {
if (is.element("svg", out.format)) {
svglite::svglite(file = paste0(signaling.name,"_", layout, "_individual.svg"), width = height, height = nRow*height)
# par(mfrow=c(nRow,1))
par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE)
for (i in 1:length(pairLR.name.use)) {
#signalName_i <- paste0(pairLR$ligand[i], "-",pairLR$receptor[i], sep = "")
signalName_i <- pairLR$interaction_name_2[i]
prob.i <- prob[,,i]
netVisual_circle(prob.i, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.individual, edge.width.max=edge.width.max, title.name = signalName_i, vertex.label.cex = vertex.label.cex,...)
}
dev.off()
}
if (is.element("png", out.format)) {
grDevices::png(paste0(signaling.name,"_", layout, "_individual.png"), width = height, height = nRow*height, units = "in",res = 300)
# par(mfrow=c(nRow,1))
par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE)
for (i in 1:length(pairLR.name.use)) {
#signalName_i <- paste0(pairLR$ligand[i], "-",pairLR$receptor[i], sep = "")
signalName_i <- pairLR$interaction_name_2[i]
prob.i <- prob[,,i]
netVisual_circle(prob.i, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.individual, edge.width.max=edge.width.max, title.name = signalName_i, vertex.label.cex = vertex.label.cex,...)
}
dev.off()
}
if (is.element("pdf", out.format)) {
# grDevices::pdf(paste0(signaling.name,"_", layout, "_individual.pdf"), width = height, height = nRow*height)
grDevices::cairo_pdf(paste0(signaling.name,"_", layout, "_individual.pdf"), width = height, height = nRow*height)
# par(mfrow=c(nRow,1))
par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE)
for (i in 1:length(pairLR.name.use)) {
#signalName_i <- paste0(pairLR$ligand[i], "-",pairLR$receptor[i], sep = "")
signalName_i <- pairLR$interaction_name_2[i]
prob.i <- prob[,,i]
netVisual_circle(prob.i, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.individual, edge.width.max=edge.width.max,title.name = signalName_i, vertex.label.cex = vertex.label.cex,...)
}
dev.off()
}
# prob.sum <- apply(prob, c(1,2), sum)
# prob.sum <-(prob.sum-min(prob.sum))/(max(prob.sum)-min(prob.sum))
if (is.element("svg", out.format)) {
svglite(file = paste0(signaling.name,"_", layout, "_aggregate.svg"), width = height, height = 1*height)
netVisual_circle(prob.sum, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.aggregate, edge.width.max=edge.width.max,title.name = paste0(signaling.name, " signaling pathway network"), vertex.label.cex = vertex.label.cex,...)
dev.off()
}
if (is.element("png", out.format)) {
grDevices::png(paste0(signaling.name,"_", layout, "_aggregate.png"), width = height, height = 1*height, units = "in",res = 300)
netVisual_circle(prob.sum, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.aggregate, edge.width.max=edge.width.max,title.name = paste0(signaling.name, " signaling pathway network"), vertex.label.cex = vertex.label.cex,...)
dev.off()
}
if (is.element("pdf", out.format)) {
# grDevices::pdf(paste0(signaling.name,"_", layout, "_aggregate.pdf"), width = height, height = 1*height)
grDevices::cairo_pdf(paste0(signaling.name,"_", layout, "_aggregate.pdf"), width = height, height = 1*height)
netVisual_circle(prob.sum, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max.aggregate, edge.width.max=edge.width.max, title.name = paste0(signaling.name, " signaling pathway network"), vertex.label.cex = vertex.label.cex,...)
dev.off()
}
} else if (layout == "chord") {
if (is.element("svg", out.format)) {
svglite::svglite(file = paste0(signaling.name,"_", layout, "_individual.svg"), width = height, height = nRow*height)
par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE)
# gg <- vector("list", length(pairLR.name.use))
for (i in 1:length(pairLR.name.use)) {
title.name <- pairLR$interaction_name_2[i]
net <- prob[,,i]
netVisual_chord_cell_internal(net, color.use = color.use, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate,
group = group, cell.order = cell.order,
lab.cex = vertex.label.cex,small.gap = small.gap, big.gap = big.gap,
scale = scale, reduce = reduce,
title.name = title.name, show.legend = show.legend, legend.pos.x = legend.pos.x,legend.pos.y=legend.pos.y)
}
dev.off()
}
if (is.element("png", out.format)) {
grDevices::png(paste0(signaling.name,"_", layout, "_individual.png"), width = height, height = nRow*height, units = "in",res = 300)
par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE)
# gg <- vector("list", length(pairLR.name.use))
for (i in 1:length(pairLR.name.use)) {
title.name <- pairLR$interaction_name_2[i]
net <- prob[,,i]
netVisual_chord_cell_internal(net, color.use = color.use, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate,
group = group, cell.order = cell.order,
lab.cex = vertex.label.cex,small.gap = small.gap, big.gap = big.gap,
scale = scale, reduce = reduce,
title.name = title.name, show.legend = show.legend, legend.pos.x = legend.pos.x,legend.pos.y=legend.pos.y)
}
dev.off()
}
if (is.element("pdf", out.format)) {
# grDevices::pdf(paste0(signaling.name,"_", layout, "_individual.pdf"), width = height, height = nRow*height)
grDevices::cairo_pdf(paste0(signaling.name,"_", layout, "_individual.pdf"), width = height, height = nRow*height)
par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE)
# gg <- vector("list", length(pairLR.name.use))
for (i in 1:length(pairLR.name.use)) {
title.name <- pairLR$interaction_name_2[i]
net <- prob[,,i]
netVisual_chord_cell_internal(net, color.use = color.use, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate,
group = group, cell.order = cell.order,
lab.cex = vertex.label.cex,small.gap = small.gap, big.gap = big.gap,
scale = scale, reduce = reduce,
title.name = title.name, show.legend = show.legend, legend.pos.x = legend.pos.x,legend.pos.y=legend.pos.y)
}
dev.off()
}
# prob.sum <- apply(prob, c(1,2), sum)
if (is.element("svg", out.format)) {
svglite(file = paste0(signaling.name,"_", layout, "_aggregate.svg"), width = height, height = 1*height)
netVisual_chord_cell_internal(prob.sum, color.use = color.use, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate,
group = group, cell.order = cell.order,
lab.cex = vertex.label.cex,small.gap = small.gap, big.gap = big.gap,
scale = scale, reduce = reduce,
title.name = paste0(signaling.name, " signaling pathway network"), show.legend = show.legend, legend.pos.x = legend.pos.x,legend.pos.y=legend.pos.y)
dev.off()
}
if (is.element("png", out.format)) {
grDevices::png(paste0(signaling.name,"_", layout, "_aggregate.png"), width = height, height = 1*height, units = "in",res = 300)
netVisual_chord_cell_internal(prob.sum, color.use = color.use, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate,
group = group, cell.order = cell.order,
lab.cex = vertex.label.cex,small.gap = small.gap, big.gap = big.gap,
scale = scale, reduce = reduce,
title.name = paste0(signaling.name, " signaling pathway network"), show.legend = show.legend, legend.pos.x = legend.pos.x,legend.pos.y=legend.pos.y)
dev.off()
}
if (is.element("pdf", out.format)) {
# grDevices::pdf(paste0(signaling.name,"_", layout, "_aggregate.pdf"), width = height, height = 1*height)
grDevices::cairo_pdf(paste0(signaling.name,"_", layout, "_aggregate.pdf"), width = height, height = 1*height)
netVisual_chord_cell_internal(prob.sum, color.use = color.use, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate,
group = group, cell.order = cell.order,
lab.cex = vertex.label.cex,small.gap = small.gap, big.gap = big.gap,
scale = scale, reduce = reduce,
title.name = paste0(signaling.name, " signaling pathway network"), show.legend = show.legend, legend.pos.x = legend.pos.x,legend.pos.y=legend.pos.y)
dev.off()
}
}
}
#' Visualize the inferred signaling network of signaling pathways by aggregating all L-R pairs
#'
#' @param object CellChat object
#' @param signaling a signaling pathway name
#' @param signaling.name alternative signaling pathway name to show on the plot
#' @param color.use the character vector defining the color of each cell group
#' @param vertex.receiver a numeric vector giving the index of the cell groups as targets in the first hierarchy plot
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param remove.isolate whether remove the isolate nodes in the communication network
#' @param top the fraction of interactions to show
#' @param weight.scale whether scale the edge weight
#' @param vertex.weight The weight of vertex: either a scale value or a vector
#' @param vertex.weight.max the maximum weight of vertex; defualt = max(vertex.weight)
#' @param vertex.size.max the maximum vertex size for visualization
#' @param edge.weight.max the maximum weight of edge; defualt = max(net)
#' @param edge.width.max The maximum edge width for visualization
#' @param layout "hierarchy", "circle" or "chord"
#' @param thresh threshold of the p-value for determining significant interaction
#' @param pt.title font size of the text
#' @param title.space the space between the title and plot
#' @param vertex.label.cex The label size of vertex in the network
#' @param from,to,bidirection Deprecated. Use `sources.use`,`targets.use`
#' @param vertex.size Deprecated. Use `vertex.weight`
#'
#' Parameters below are set for "chord" diagram. Please also check the function `netVisual_chord_cell` for more parameters.
#' @param group A named group labels for making multiple-group Chord diagrams. The sector names should be used as the names in the vector.
#' The order of group controls the sector orders and if group is set as a factor, the order of levels controls the order of groups.
#' @param cell.order a char vector defining the cell type orders (sector orders)
#' @param small.gap Small gap between sectors.
#' @param big.gap Gap between the different sets of sectors, which are defined in the `group` parameter
#' @param scale scale each sector to same width; default = FALSE; however, it is set to be TRUE when remove.isolate = TRUE
#' @param reduce if the ratio of the width of certain grid compared to the whole circle is less than this value, the grid is removed on the plot. Set it to value less than zero if you want to keep all tiny grid.
#' @param show.legend whether show the figure legend
#' @param legend.pos.x,legend.pos.y adjust the legend position
#'
#' @param ... other parameters (e.g.,vertex.label.cex, vertex.label.color, alpha.edge, label.edge, edge.label.color, edge.label.cex, edge.curved)
#' passing to `netVisual_hierarchy1`,`netVisual_hierarchy2`,`netVisual_circle`. NB: some parameters might be not supported
#' @importFrom grDevices recordPlot
#'
#' @return an object of class "recordedplot"
#' @export
#'
#'
netVisual_aggregate <- function(object, signaling, signaling.name = NULL, color.use = NULL, vertex.receiver = NULL, sources.use = NULL, targets.use = NULL, top = 1, remove.isolate = FALSE,
vertex.weight = NULL, vertex.weight.max = NULL, vertex.size.max = 15,
weight.scale = TRUE, edge.weight.max = NULL, edge.width.max=8,
layout = c("hierarchy","circle","chord"), thresh = 0.05, from = NULL, to = NULL, bidirection = NULL, vertex.size = NULL,
pt.title = 12, title.space = 6, vertex.label.cex = 0.8,
group = NULL,cell.order = NULL,small.gap = 1, big.gap = 10, scale = FALSE, reduce = -1, show.legend = FALSE, legend.pos.x = 20,legend.pos.y = 20,
...) {
layout <- match.arg(layout)
if (!is.null(vertex.size)) {
warning("'vertex.size' is deprecated. Use `vertex.weight`")
}
if (is.null(vertex.weight)) {
vertex.weight <- as.numeric(table(object@idents))
}
pairLR <- searchPair(signaling = signaling, pairLR.use = object@LR$LRsig, key = "pathway_name", matching.exact = T, pair.only = T)
if (is.null(signaling.name)) {
signaling.name <- signaling
}
net <- object@net
pairLR.use.name <- dimnames(net$prob)[[3]]
pairLR.name <- intersect(rownames(pairLR), pairLR.use.name)
pairLR <- pairLR[pairLR.name, ]
prob <- net$prob
pval <- net$pval
prob[pval > thresh] <- 0
if (length(pairLR.name) > 1) {
pairLR.name.use <- pairLR.name[apply(prob[,,pairLR.name], 3, sum) != 0]
} else {
pairLR.name.use <- pairLR.name[sum(prob[,,pairLR.name]) != 0]
}
if (length(pairLR.name.use) == 0) {
stop(paste0('There is no significant communication of ', signaling.name))
} else {
pairLR <- pairLR[pairLR.name.use,]
}
nRow <- length(pairLR.name.use)
prob <- prob[,,pairLR.name.use]
pval <- pval[,,pairLR.name.use]
if (length(dim(prob)) == 2) {
prob <- replicate(1, prob, simplify="array")
pval <- replicate(1, pval, simplify="array")
}
# prob <-(prob-min(prob))/(max(prob)-min(prob))
if (layout == "hierarchy") {
prob.sum <- apply(prob, c(1,2), sum)
# prob.sum <-(prob.sum-min(prob.sum))/(max(prob.sum)-min(prob.sum))
if (is.null(edge.weight.max)) {
edge.weight.max = max(prob.sum)
}
par(mfrow=c(1,2), ps = pt.title)
netVisual_hierarchy1(prob.sum, vertex.receiver = vertex.receiver, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max, edge.width.max=edge.width.max, title.name = NULL, vertex.label.cex = vertex.label.cex,...)
netVisual_hierarchy2(prob.sum, vertex.receiver = setdiff(1:nrow(prob.sum),vertex.receiver), sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max, edge.width.max=edge.width.max, title.name = NULL, vertex.label.cex = vertex.label.cex,...)
graphics::mtext(paste0(signaling.name, " signaling pathway network"), side = 3, outer = TRUE, cex = 1, line = -title.space)
# https://www.andrewheiss.com/blog/2016/12/08/save-base-graphics-as-pseudo-objects-in-r/
# grid.echo()
# gg <- grid.grab()
gg <- recordPlot()
} else if (layout == "circle") {
prob.sum <- apply(prob, c(1,2), sum)
# prob.sum <-(prob.sum-min(prob.sum))/(max(prob.sum)-min(prob.sum))
gg <- netVisual_circle(prob.sum, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max, edge.width.max=edge.width.max,title.name = paste0(signaling.name, " signaling pathway network"), vertex.label.cex = vertex.label.cex,...)
} else if (layout == "chord") {
prob.sum <- apply(prob, c(1,2), sum)
gg <- netVisual_chord_cell_internal(prob.sum, color.use = color.use, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate,
group = group, cell.order = cell.order,
lab.cex = vertex.label.cex,small.gap = small.gap, big.gap = big.gap,
scale = scale, reduce = reduce,
title.name = paste0(signaling.name, " signaling pathway network"), show.legend = show.legend, legend.pos.x = legend.pos.x, legend.pos.y= legend.pos.y)
}
return(gg)
}
#' Visualize the inferred signaling network of individual L-R pairs
#'
#' @param object CellChat object
#' @param signaling a signaling pathway name
#' @param signaling.name alternative signaling pathway name to show on the plot
#' @param pairLR.use a char vector or a data frame consisting of one column named "interaction_name", defining the L-R pairs of interest
#' @param color.use the character vector defining the color of each cell group
#' @param vertex.receiver a numeric vector giving the index of the cell groups as targets in the first hierarchy plot
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param remove.isolate whether remove the isolate nodes in the communication network
#' @param top the fraction of interactions to show
#' @param weight.scale whether scale the edge weight
#' @param vertex.weight The weight of vertex: either a scale value or a vector
#' @param vertex.weight.max the maximum weight of vertex; defualt = max(vertex.weight)
#' @param vertex.size.max the maximum vertex size for visualization
#' @param vertex.label.cex The label size of vertex in the network
#' @param edge.weight.max the maximum weight of edge; defualt = max(net)
#' @param edge.width.max The maximum edge width for visualization
#' @param layout "hierarchy", "circle" or "chord"
#' @param height height of plot
#' @param thresh threshold of the p-value for determining significant interaction
# #' @param from,to,bidirection Deprecated. Use `sources.use`,`targets.use`
# #' @param vertex.size Deprecated. Use `vertex.weight`
#'
#' Parameters below are set for "chord" diagram. Please also check the function `netVisual_chord_cell` for more parameters.
#' @param group A named group labels for making multiple-group Chord diagrams. The sector names should be used as the names in the vector.
#' The order of group controls the sector orders and if group is set as a factor, the order of levels controls the order of groups.
#' @param cell.order a char vector defining the cell type orders (sector orders)
#' @param small.gap Small gap between sectors.
#' @param big.gap Gap between the different sets of sectors, which are defined in the `group` parameter
#' @param scale scale each sector to same width; default = FALSE; however, it is set to be TRUE when remove.isolate = TRUE
#' @param reduce if the ratio of the width of certain grid compared to the whole circle is less than this value, the grid is removed on the plot. Set it to value less than zero if you want to keep all tiny grid.
#' @param show.legend whether show the figure legend
#' @param legend.pos.x,legend.pos.y adjust the legend position
#' @param nCol number of columns when displaying the figures using "circle" or "chord"
#'
#' @param ... other parameters (e.g.,vertex.label.cex, vertex.label.color, alpha.edge, label.edge, edge.label.color, edge.label.cex, edge.curved)
#' passing to `netVisual_hierarchy1`,`netVisual_hierarchy2`,`netVisual_circle`. NB: some parameters might be not supported
#' @importFrom grDevices dev.off pdf
#'
#' @return an object of class "recordedplot"
#' @export
#'
#'
netVisual_individual <- function(object, signaling, signaling.name = NULL, pairLR.use = NULL, color.use = NULL, vertex.receiver = NULL, sources.use = NULL, targets.use = NULL, top = 1, remove.isolate = FALSE,
vertex.weight = NULL, vertex.weight.max = NULL, vertex.size.max = 15, vertex.label.cex = 0.8,
weight.scale = TRUE, edge.weight.max = NULL, edge.width.max=8,
layout = c("hierarchy","circle","chord"), height = 5, thresh = 0.05, #from = NULL, to = NULL, bidirection = NULL,vertex.size = NULL,
group = NULL,cell.order = NULL,small.gap = 1, big.gap = 10, scale = FALSE, reduce = -1, show.legend = FALSE, legend.pos.x = 20, legend.pos.y = 20, nCol = NULL,
...) {
layout <- match.arg(layout)
# if (!is.null(vertex.size)) {
# warning("'vertex.size' is deprecated. Use `vertex.weight`")
# }
if (is.null(vertex.weight)) {
vertex.weight <- as.numeric(table(object@idents))
}
pairLR <- searchPair(signaling = signaling, pairLR.use = object@LR$LRsig, key = "pathway_name", matching.exact = T, pair.only = F)
if (is.null(signaling.name)) {
signaling.name <- signaling
}
net <- object@net
pairLR.use.name <- dimnames(net$prob)[[3]]
pairLR.name <- intersect(rownames(pairLR), pairLR.use.name)
if (!is.null(pairLR.use)) {
if (is.data.frame(pairLR.use)) {
pairLR.name <- intersect(pairLR.name, as.character(pairLR.use$interaction_name))
} else {
pairLR.name <- intersect(pairLR.name, as.character(pairLR.use))
}
if (length(pairLR.name) == 0) {
stop("There is no significant communication for the input L-R pairs!")
}
}
pairLR <- pairLR[pairLR.name, ]
prob <- net$prob
pval <- net$pval
prob[pval > thresh] <- 0
if (length(pairLR.name) > 1) {
pairLR.name.use <- pairLR.name[apply(prob[,,pairLR.name], 3, sum) != 0]
} else {
pairLR.name.use <- pairLR.name[sum(prob[,,pairLR.name]) != 0]
}
if (length(pairLR.name.use) == 0) {
stop(paste0('There is no significant communication of ', signaling.name))
} else {
pairLR <- pairLR[pairLR.name.use,]
}
nRow <- length(pairLR.name.use)
prob <- prob[,,pairLR.name.use]
pval <- pval[,,pairLR.name.use]
if (is.null(nCol)) {
nCol <- min(length(pairLR.name.use), 2)
}
if (length(dim(prob)) == 2) {
prob <- replicate(1, prob, simplify="array")
pval <- replicate(1, pval, simplify="array")
}
# prob <-(prob-min(prob))/(max(prob)-min(prob))
if (is.null(edge.weight.max)) {
edge.weight.max = max(prob)
}
if (layout == "hierarchy") {
par(mfrow=c(nRow,2), mar = c(5, 4, 4, 2) +0.1)
for (i in 1:length(pairLR.name.use)) {
signalName_i <- pairLR$interaction_name_2[i]
prob.i <- prob[,,i]
netVisual_hierarchy1(prob.i, vertex.receiver = vertex.receiver, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max, edge.width.max=edge.width.max, title.name = signalName_i,...)
netVisual_hierarchy2(prob.i, vertex.receiver = setdiff(1:nrow(prob.i),vertex.receiver), sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max, edge.width.max=edge.width.max, title.name = signalName_i,...)
}
# grid.echo()
# gg <- grid.grab()
gg <- recordPlot()
} else if (layout == "circle") {
# par(mfrow=c(nRow,1))
par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE)
gg <- vector("list", length(pairLR.name.use))
for (i in 1:length(pairLR.name.use)) {
signalName_i <- pairLR$interaction_name_2[i]
prob.i <- prob[,,i]
gg[[i]] <- netVisual_circle(prob.i, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate, top = top, color.use = color.use, vertex.weight = vertex.weight, vertex.weight.max = vertex.weight.max, vertex.size.max = vertex.size.max, weight.scale = weight.scale, edge.weight.max = edge.weight.max, edge.width.max=edge.width.max, title.name = signalName_i,...)
}
} else if (layout == "chord") {
par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE)
gg <- vector("list", length(pairLR.name.use))
for (i in 1:length(pairLR.name.use)) {
title.name <- pairLR$interaction_name_2[i]
net <- prob[,,i]
gg[[i]] <- netVisual_chord_cell_internal(net, color.use = color.use, sources.use = sources.use, targets.use = targets.use, remove.isolate = remove.isolate,
group = group, cell.order = cell.order,
lab.cex = vertex.label.cex,small.gap = small.gap, big.gap = big.gap,
scale = scale, reduce = reduce,
title.name = title.name, show.legend = show.legend, legend.pos.x = legend.pos.x, legend.pos.y = legend.pos.y)
}
}
return(gg)
}
#' Hierarchy plot of cell-cell communications sending to cell groups in vertex.receiver
#'
#' The width of edges represent the strength of the communication.
#'
#' @param net a weighted matrix defining the signaling network
#' @param vertex.receiver a numeric vector giving the index of the cell groups as targets in the first hierarchy plot
#' @param color.use the character vector defining the color of each cell group
#' @param title.name alternative signaling pathway name to show on the plot
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param remove.isolate whether remove the isolate nodes in the communication network
#' @param top the fraction of interactions to show
#' @param weight.scale whether rescale the edge weights
#' @param vertex.weight The weight of vertex: either a scale value or a vector
#' @param vertex.weight.max the maximum weight of vertex; defualt = max(vertex.weight)
#' @param vertex.size.max the maximum vertex size for visualization
#' @param edge.weight.max the maximum weight of edge; defualt = max(net)
#' @param edge.width.max The maximum edge width for visualization
#' @param label.dist the distance between labels and dot position
#' @param space.v the space between different columns in the plot
#' @param space.h the space between different rows in the plot
#' @param edge.curved Specifies whether to draw curved edges, or not.
#' This can be a logical or a numeric vector or scalar.
#' First the vector is replicated to have the same length as the number of
#' edges in the graph. Then it is interpreted for each edge separately.
#' A numeric value specifies the curvature of the edge; zero curvature means
#' straight edges, negative values means the edge bends clockwise, positive
#' values the opposite. TRUE means curvature 0.5, FALSE means curvature zero
#' @param shape The shape of the vertex, currently “circle”, “square”,
#' “csquare”, “rectangle”, “crectangle”, “vrectangle”, “pie” (see
#' vertex.shape.pie), ‘sphere’, and “none” are supported, and only by the
#' plot.igraph command. “none” does not draw the vertices at all, although
#' vertex label are plotted (if given). See shapes for details about vertex
#' shapes and vertex.shape.pie for using pie charts as vertices.
#' @param margin The amount of empty space below, over, at the left and right
#' of the plot, it is a numeric vector of length four. Usually values between
#' 0 and 0.5 are meaningful, but negative values are also possible, that will
#' make the plot zoom in to a part of the graph. If it is shorter than four
#' then it is recycled.
#' @param vertex.label.cex The label size of vertex
#' @param vertex.label.color The color of label for vertex
#' @param arrow.width The width of arrows
#' @param arrow.size the size of arrow
#' @param alpha.edge the transprency of edge
#' @param label.edge whether label edge
#' @param edge.label.color The color for single arrow
#' @param edge.label.cex The size of label for arrows
#' @param vertex.size Deprecated. Use `vertex.weight`
#' @importFrom igraph graph_from_adjacency_matrix ends E V layout_
#' @importFrom grDevices adjustcolor recordPlot
#' @importFrom shape Arrows
#' @return an object of class "recordedplot"
#' @export
netVisual_hierarchy1 <- function(net, vertex.receiver, color.use = NULL, title.name = NULL, sources.use = NULL, targets.use = NULL, remove.isolate = FALSE, top = 1,
weight.scale = FALSE, vertex.weight=20, vertex.weight.max = NULL, vertex.size.max = 15,
edge.weight.max = NULL, edge.width.max=8, alpha.edge = 0.6,
label.dist = 2.8, space.v = 1.5, space.h = 1.6, shape= NULL, label.edge=FALSE,edge.curved=0, margin=0.2,
vertex.label.cex=0.6,vertex.label.color= "black",arrow.width=1,arrow.size = 0.2,edge.label.color='black',edge.label.cex=0.5, vertex.size = NULL){
if (!is.null(vertex.size)) {
warning("'vertex.size' is deprecated. Use `vertex.weight`")
}
options(warn = -1)
thresh <- stats::quantile(net, probs = 1-top)
net[net < thresh] <- 0
cells.level <- rownames(net)
if ((!is.null(sources.use)) | (!is.null(targets.use))) {
df.net <- reshape2::melt(net, value.name = "value")
colnames(df.net)[1:2] <- c("source","target")
# keep the interactions associated with sources and targets of interest
if (!is.null(sources.use)){
if (is.numeric(sources.use)) {
sources.use <- cells.level[sources.use]
}
df.net <- subset(df.net, source %in% sources.use)
}
if (!is.null(targets.use)){
if (is.numeric(targets.use)) {
targets.use <- cells.level[targets.use]
}
df.net <- subset(df.net, target %in% targets.use)
}
df.net$source <- factor(df.net$source, levels = cells.level)
df.net$target <- factor(df.net$target, levels = cells.level)
df.net$value[is.na(df.net$value)] <- 0
net <- tapply(df.net[["value"]], list(df.net[["source"]], df.net[["target"]]), sum)
}
net[is.na(net)] <- 0
if (remove.isolate) {
idx1 <- which(Matrix::rowSums(net) == 0)
idx2 <- which(Matrix::colSums(net) == 0)
idx <- intersect(idx1, idx2)
net <- net[-idx, ]
net <- net[, -idx]
}
if (is.null(color.use)) {
color.use <- scPalette(nrow(net))
}
if (is.null(vertex.weight.max)) {
vertex.weight.max <- max(vertex.weight)
}
vertex.weight <- vertex.weight/vertex.weight.max*vertex.size.max+6
m <- length(vertex.receiver)
net2 <- net
reorder.row <- c(vertex.receiver, setdiff(1:nrow(net),vertex.receiver))
net2 <- net2[reorder.row,vertex.receiver]
# Expand out to symmetric (M+N)x(M+N) matrix
m1 <- nrow(net2); n1 <- ncol(net2)
net3 <- rbind(cbind(matrix(0, m1, m1), net2), matrix(0, n1, m1+n1))
row.names(net3) <- c(row.names(net)[vertex.receiver], row.names(net)[setdiff(1:m1,vertex.receiver)], rep("",m))
colnames(net3) <- row.names(net3)
color.use3 <- c(color.use[vertex.receiver], color.use[setdiff(1:m1,vertex.receiver)], rep("#FFFFFF",length(vertex.receiver)))
color.use3.frame <- c(color.use[vertex.receiver], color.use[setdiff(1:m1,vertex.receiver)], color.use[vertex.receiver])
if (length(vertex.weight) != 1) {
vertex.weight = c(vertex.weight[vertex.receiver], vertex.weight[setdiff(1:m1,vertex.receiver)],vertex.weight[vertex.receiver])
}
if (is.null(shape)) {
shape <- c(rep("circle",m), rep("circle", m1-m), rep("circle",m))
}
g <- graph_from_adjacency_matrix(net3, mode = "directed", weighted = T)
edge.start <- ends(g, es=E(g), names=FALSE)
coords <- matrix(NA, nrow(net3), 2)
coords[1:m,1] <- 0; coords[(m+1):m1,1] <- space.h; coords[(m1+1):nrow(net3),1] <- space.h/2;
coords[1:m,2] <- seq(space.v, 0, by = -space.v/(m-1)); coords[(m+1):m1,2] <- seq(space.v, 0, by = -space.v/(m1-m-1));coords[(m1+1):nrow(net3),2] <- seq(space.v, 0, by = -space.v/(n1-1));
coords_scale<-coords
igraph::V(g)$size<-vertex.weight
igraph::V(g)$color<-color.use3[igraph::V(g)]
igraph::V(g)$frame.color <- color.use3.frame[igraph::V(g)]
igraph::V(g)$label.color <- vertex.label.color
igraph::V(g)$label.cex<-vertex.label.cex
if(label.edge){
E(g)$label<-E(g)$weight
igraph::E(g)$label <- round(igraph::E(g)$label, digits = 1)
}
if (is.null(edge.weight.max)) {
edge.weight.max <- max(igraph::E(g)$weight)
}
if (weight.scale == TRUE) {
# E(g)$width<-0.3+edge.max.width/(max(E(g)$weight)-min(E(g)$weight))*(E(g)$weight-min(E(g)$weight))
E(g)$width<- 0.3+E(g)$weight/edge.weight.max*edge.width.max
}else{
E(g)$width<-0.3+edge.width.max*E(g)$weight
}
E(g)$arrow.width<-arrow.width
E(g)$arrow.size<-arrow.size
E(g)$label.color<-edge.label.color
E(g)$label.cex<-edge.label.cex
E(g)$color<-adjustcolor(igraph::V(g)$color[edge.start[,1]],alpha.edge)
label.dist <- c(rep(space.h*label.dist,m), rep(space.h*label.dist, m1-m),rep(0, nrow(net3)-m1))
label.locs <- c(rep(-pi, m), rep(0, m1-m),rep(-pi, nrow(net3)-m1))
# text.pos <- cbind(c(-space.h/1.5, space.h/10, space.h/1.2), space.v-space.v/10)
text.pos <- cbind(c(-space.h/1.5, space.h/22, space.h/1.5), space.v-space.v/7)
igraph::add.vertex.shape("fcircle", clip=igraph::igraph.shape.noclip,plot=mycircle, parameters=list(vertex.frame.color=1, vertex.frame.width=1))
plot(g,edge.curved=edge.curved,layout=coords_scale,margin=margin,rescale=T,vertex.shape="fcircle", vertex.frame.width = c(rep(1,m1), rep(2,nrow(net3)-m1)),
vertex.label.degree=label.locs, vertex.label.dist=label.dist, vertex.label.family="Helvetica")
text(text.pos, c("Source","Target","Source"), cex = 0.8, col = c("#c51b7d","#c51b7d","#2f6661"))
arrow.pos1 <- c(-space.h/1.5, space.v-space.v/4, space.h/100000, space.v-space.v/4)
arrow.pos2 <- c(space.h/1.5, space.v-space.v/4, space.h/20, space.v-space.v/4)
shape::Arrows(arrow.pos1[1], arrow.pos1[2], arrow.pos1[3], arrow.pos1[4], col = "#c51b7d",arr.lwd = 0.0001,arr.length = 0.2, lwd = 0.8,arr.type="triangle")
shape::Arrows(arrow.pos2[1], arrow.pos2[2], arrow.pos2[3], arrow.pos2[4], col = "#2f6661",arr.lwd = 0.0001,arr.length = 0.2, lwd = 0.8,arr.type="triangle")
if (!is.null(title.name)) {
title.pos = c(space.h/8, space.v)
text(title.pos[1],title.pos[2],paste0(title.name, " signaling network"), cex = 1)
}
# https://www.andrewheiss.com/blog/2016/12/08/save-base-graphics-as-pseudo-objects-in-r/
# grid.echo()
# gg <- grid.grab()
gg <- recordPlot()
return(gg)
}
#' Hierarchy plot of cell-cell communication sending to cell groups not in vertex.receiver
#'
#' This function loads the significant interactions as a weighted matrix, and colors
#' represent different types of cells as a structure. The width of edges represent the strength of the communication.
#'
#' @param net a weighted matrix defining the signaling network
#' @param vertex.receiver a numeric vector giving the index of the cell groups as targets in the first hierarchy plot
#' @param color.use the character vector defining the color of each cell group
#' @param title.name alternative signaling pathway name to show on the plot
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param remove.isolate whether remove the isolate nodes in the communication network
#' @param top the fraction of interactions to show
#' @param weight.scale whether rescale the edge weights
#' @param vertex.weight The weight of vertex: either a scale value or a vector
#' @param vertex.weight.max the maximum weight of vertex; defualt = max(vertex.weight)
#' @param vertex.size.max the maximum vertex size for visualization
#' @param edge.weight.max the maximum weight of edge; defualt = max(net)
#' @param edge.width.max The maximum edge width for visualization
#' @param label.dist the distance between labels and dot position
#' @param space.v the space between different columns in the plot
#' @param space.h the space between different rows in the plot
#' @param label.edge Whether or not shows the label of edges (number of connections between different cell types)
#' @param edge.curved Specifies whether to draw curved edges, or not.
#' This can be a logical or a numeric vector or scalar.
#' First the vector is replicated to have the same length as the number of
#' edges in the graph. Then it is interpreted for each edge separately.
#' A numeric value specifies the curvature of the edge; zero curvature means
#' straight edges, negative values means the edge bends clockwise, positive
#' values the opposite. TRUE means curvature 0.5, FALSE means curvature zero
#' @param shape The shape of the vertex, currently “circle”, “square”,
#' “csquare”, “rectangle”, “crectangle”, “vrectangle”, “pie” (see
#' vertex.shape.pie), ‘sphere’, and “none” are supported, and only by the
#' plot.igraph command. “none” does not draw the vertices at all, although
#' vertex label are plotted (if given). See shapes for details about vertex
#' shapes and vertex.shape.pie for using pie charts as vertices.
#' @param margin The amount of empty space below, over, at the left and right
#' of the plot, it is a numeric vector of length four. Usually values between
#' 0 and 0.5 are meaningful, but negative values are also possible, that will
#' make the plot zoom in to a part of the graph. If it is shorter than four
#' then it is recycled.
#' @param vertex.label.cex The label size of vertex
#' @param vertex.label.color The color of label for vertex
#' @param arrow.width The width of arrows
#' @param arrow.size the size of arrow
#' @param alpha.edge the transprency of edge
#' @param edge.label.color The color for single arrow
#' @param edge.label.cex The size of label for arrows
#' @param vertex.size Deprecated. Use `vertex.weight`
#' @importFrom igraph graph_from_adjacency_matrix ends E V layout_
#' @importFrom grDevices adjustcolor recordPlot
#' @importFrom shape Arrows
#' @return an object of class "recordedplot"
#' @export
netVisual_hierarchy2 <-function(net, vertex.receiver, color.use = NULL, title.name = NULL, sources.use = NULL, targets.use = NULL, remove.isolate = FALSE, top = 1,
weight.scale = FALSE, vertex.weight=20, vertex.weight.max = NULL, vertex.size.max = 15,
edge.weight.max = NULL, edge.width.max=8,alpha.edge = 0.6,
label.dist = 2.8, space.v = 1.5, space.h = 1.6, shape= NULL, label.edge=FALSE,edge.curved=0, margin=0.2,
vertex.label.cex=0.6,vertex.label.color= "black",arrow.width=1,arrow.size = 0.2,edge.label.color='black',edge.label.cex=0.5, vertex.size = NULL){
if (!is.null(vertex.size)) {
warning("'vertex.size' is deprecated. Use `vertex.weight`")
}
options(warn = -1)
thresh <- stats::quantile(net, probs = 1-top)
net[net < thresh] <- 0
cells.level <- rownames(net)
if ((!is.null(sources.use)) | (!is.null(targets.use))) {
df.net <- reshape2::melt(net, value.name = "value")
colnames(df.net)[1:2] <- c("source","target")
# keep the interactions associated with sources and targets of interest
if (!is.null(sources.use)){
if (is.numeric(sources.use)) {
sources.use <- cells.level[sources.use]
}
df.net <- subset(df.net, source %in% sources.use)
}
if (!is.null(targets.use)){
if (is.numeric(targets.use)) {
targets.use <- cells.level[targets.use]
}
df.net <- subset(df.net, target %in% targets.use)
}
df.net$source <- factor(df.net$source, levels = cells.level)
df.net$target <- factor(df.net$target, levels = cells.level)
df.net$value[is.na(df.net$value)] <- 0
net <- tapply(df.net[["value"]], list(df.net[["source"]], df.net[["target"]]), sum)
}
net[is.na(net)] <- 0
if (remove.isolate) {
idx1 <- which(Matrix::rowSums(net) == 0)
idx2 <- which(Matrix::colSums(net) == 0)
idx <- intersect(idx1, idx2)
net <- net[-idx, ]
net <- net[, -idx]
}
if (is.null(color.use)) {
color.use <- scPalette(nrow(net))
}
if (is.null(vertex.weight.max)) {
vertex.weight.max <- max(vertex.weight)
}
vertex.weight <- vertex.weight/vertex.weight.max*vertex.size.max+6
m <- length(vertex.receiver)
m0 <- nrow(net)-length(vertex.receiver)
net2 <- net
reorder.row <- c(setdiff(1:nrow(net),vertex.receiver), vertex.receiver)
net2 <- net2[reorder.row,vertex.receiver]
# Expand out to symmetric (M+N)x(M+N) matrix
m1 <- nrow(net2); n1 <- ncol(net2)
net3 <- rbind(cbind(matrix(0, m1, m1), net2), matrix(0, n1, m1+n1))
row.names(net3) <- c(row.names(net)[setdiff(1:m1,vertex.receiver)],row.names(net)[vertex.receiver], rep("",m))
colnames(net3) <- row.names(net3)
color.use3 <- c(color.use[setdiff(1:m1,vertex.receiver)],color.use[vertex.receiver], rep("#FFFFFF",length(vertex.receiver)))
color.use3.frame <- c(color.use[setdiff(1:m1,vertex.receiver)], color.use[vertex.receiver], color.use[vertex.receiver])
if (length(vertex.weight) != 1) {
vertex.weight = c(vertex.weight[setdiff(1:m1,vertex.receiver)], vertex.weight[vertex.receiver], vertex.weight[vertex.receiver])
}
if (is.null(shape)) {
shape <- rep("circle",nrow(net3))
}
g <- graph_from_adjacency_matrix(net3, mode = "directed", weighted = T)
edge.start <- ends(g, es=igraph::E(g), names=FALSE)
coords <- matrix(NA, nrow(net3), 2)
coords[1:m0,1] <- 0; coords[(m0+1):m1,1] <- space.h; coords[(m1+1):nrow(net3),1] <- space.h/2;
coords[1:m0,2] <- seq(space.v, 0, by = -space.v/(m0-1)); coords[(m0+1):m1,2] <- seq(space.v, 0, by = -space.v/(m1-m0-1));coords[(m1+1):nrow(net3),2] <- seq(space.v, 0, by = -space.v/(n1-1));
coords_scale<-coords
igraph::V(g)$size<-vertex.weight
igraph::V(g)$color<-color.use3[igraph::V(g)]
igraph::V(g)$frame.color <- color.use3.frame[igraph::V(g)]
igraph::V(g)$label.color <- vertex.label.color
igraph::V(g)$label.cex<-vertex.label.cex
if(label.edge){
igraph::E(g)$label<-igraph::E(g)$weight
igraph::E(g)$label <- round(igraph::E(g)$label, digits = 1)
}
if (is.null(edge.weight.max)) {
edge.weight.max <- max(igraph::E(g)$weight)
}
if (weight.scale == TRUE) {
# E(g)$width<-0.3+edge.max.width/(max(E(g)$weight)-min(E(g)$weight))*(E(g)$weight-min(E(g)$weight))
igraph::E(g)$width<- 0.3+igraph::E(g)$weight/edge.weight.max*edge.width.max
}else{
igraph::E(g)$width<-0.3+edge.width.max*igraph::E(g)$weight
}
igraph::E(g)$arrow.width<-arrow.width
igraph::E(g)$arrow.size<-arrow.size
igraph::E(g)$label.color<-edge.label.color
igraph::E(g)$label.cex<-edge.label.cex
igraph::E(g)$color<-adjustcolor(igraph::V(g)$color[edge.start[,1]],alpha.edge)
label.dist <- c(rep(space.h*label.dist,m), rep(space.h*label.dist, m1-m),rep(0, nrow(net3)-m1))
label.locs <- c(rep(-pi, m0), rep(0, m1-m0),rep(-pi, nrow(net3)-m1))
#text.pos <- cbind(c(-space.h/1.5, space.h/10, space.h/1.2), space.v-space.v/10)
text.pos <- cbind(c(-space.h/1.5, space.h/22, space.h/1.5), space.v-space.v/7)
igraph::add.vertex.shape("fcircle", clip=igraph::igraph.shape.noclip,plot=mycircle, parameters=list(vertex.frame.color=1, vertex.frame.width=1))
plot(g,edge.curved=edge.curved,layout=coords_scale,margin=margin,rescale=T,vertex.shape="fcircle", vertex.frame.width = c(rep(1,m1), rep(2,nrow(net3)-m1)),
vertex.label.degree=label.locs, vertex.label.dist=label.dist, vertex.label.family="Helvetica")
text(text.pos, c("Source","Target","Source"), cex = 0.8, col = c("#c51b7d","#2f6661","#2f6661"))
arrow.pos1 <- c(-space.h/1.5, space.v-space.v/4, space.h/100000, space.v-space.v/4)
arrow.pos2 <- c(space.h/1.5, space.v-space.v/4, space.h/20, space.v-space.v/4)
shape::Arrows(arrow.pos1[1], arrow.pos1[2], arrow.pos1[3], arrow.pos1[4], col = "#c51b7d",arr.lwd = 0.0001,arr.length = 0.2, lwd = 0.8,arr.type="triangle")
shape::Arrows(arrow.pos2[1], arrow.pos2[2], arrow.pos2[3], arrow.pos2[4], col = "#2f6661",arr.lwd = 0.0001,arr.length = 0.2, lwd = 0.8,arr.type="triangle")
if (!is.null(title.name)) {
title.pos = c(space.h/8, space.v)
text(title.pos[1],title.pos[2],paste0(title.name, " signaling network"), cex = 1)
}
# https://www.andrewheiss.com/blog/2016/12/08/save-base-graphics-as-pseudo-objects-in-r/
# grid.echo()
# gg <- grid.grab()
gg <- recordPlot()
return(gg)
}
#' Circle plot of cell-cell communication network
#'
#' The width of edges represent the strength of the communication.
#'
#' @param net A weighted matrix representing the connections
#' @param color.use Colors represent different cell groups
#' @param title.name the name of the title
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param remove.isolate whether remove the isolate nodes in the communication network
#' @param top the fraction of interactions to show
#' @param weight.scale whether scale the weight
#' @param vertex.weight The weight of vertex: either a scale value or a vector
#' @param vertex.weight.max the maximum weight of vertex; defualt = max(vertex.weight)
#' @param vertex.size.max the maximum vertex size for visualization
#' @param vertex.label.cex The label size of vertex
#' @param vertex.label.color The color of label for vertex
#' @param edge.weight.max the maximum weight of edge; defualt = max(net)
#' @param edge.width.max The maximum edge width for visualization
#' @param label.edge Whether or not shows the label of edges
#' @param alpha.edge the transprency of edge
#' @param edge.label.color The color for single arrow
#' @param edge.label.cex The size of label for arrows
#' @param edge.curved Specifies whether to draw curved edges, or not.
#' This can be a logical or a numeric vector or scalar.
#' First the vector is replicated to have the same length as the number of
#' edges in the graph. Then it is interpreted for each edge separately.
#' A numeric value specifies the curvature of the edge; zero curvature means
#' straight edges, negative values means the edge bends clockwise, positive
#' values the opposite. TRUE means curvature 0.5, FALSE means curvature zero
#' @param shape The shape of the vertex, currently “circle”, “square”,
#' “csquare”, “rectangle”, “crectangle”, “vrectangle”, “pie” (see
#' vertex.shape.pie), ‘sphere’, and “none” are supported, and only by the
#' plot.igraph command. “none” does not draw the vertices at all, although
#' vertex label are plotted (if given). See shapes for details about vertex
#' shapes and vertex.shape.pie for using pie charts as vertices.
#' @param layout The layout specification. It must be a call to a layout
#' specification function.
#' @param margin The amount of empty space below, over, at the left and right
#' of the plot, it is a numeric vector of length four. Usually values between
#' 0 and 0.5 are meaningful, but negative values are also possible, that will
#' make the plot zoom in to a part of the graph. If it is shorter than four
#' then it is recycled.
#' @param arrow.width The width of arrows
#' @param arrow.size the size of arrow
# #' @param from,to,bidirection Deprecated. Use `sources.use`,`targets.use`
#' @param vertex.size Deprecated. Use `vertex.weight`
#' @importFrom igraph graph_from_adjacency_matrix ends E V layout_ in_circle
#' @importFrom grDevices recordPlot
#' @return an object of class "recordedplot"
#' @export
netVisual_circle <-function(net, color.use = NULL,title.name = NULL, sources.use = NULL, targets.use = NULL, remove.isolate = FALSE, top = 1,
weight.scale = FALSE, vertex.weight = 20, vertex.weight.max = NULL, vertex.size.max = 15, vertex.label.cex=1,vertex.label.color= "black",
edge.weight.max = NULL, edge.width.max=8, alpha.edge = 0.6, label.edge = FALSE,edge.label.color='black',edge.label.cex=0.8,
edge.curved=0.2,shape='circle',layout=in_circle(), margin=0.2, vertex.size = NULL,
arrow.width=1,arrow.size = 0.2){
if (!is.null(vertex.size)) {
warning("'vertex.size' is deprecated. Use `vertex.weight`")
}
options(warn = -1)
thresh <- stats::quantile(net, probs = 1-top)
net[net < thresh] <- 0
if ((!is.null(sources.use)) | (!is.null(targets.use))) {
if (is.null(rownames(net))) {
stop("The input weighted matrix should have rownames!")
}
cells.level <- rownames(net)
df.net <- reshape2::melt(net, value.name = "value")
colnames(df.net)[1:2] <- c("source","target")
# keep the interactions associated with sources and targets of interest
if (!is.null(sources.use)){
if (is.numeric(sources.use)) {
sources.use <- cells.level[sources.use]
}
df.net <- subset(df.net, source %in% sources.use)
}
if (!is.null(targets.use)){
if (is.numeric(targets.use)) {
targets.use <- cells.level[targets.use]
}
df.net <- subset(df.net, target %in% targets.use)
}
df.net$source <- factor(df.net$source, levels = cells.level)
df.net$target <- factor(df.net$target, levels = cells.level)
df.net$value[is.na(df.net$value)] <- 0
net <- tapply(df.net[["value"]], list(df.net[["source"]], df.net[["target"]]), sum)
}
net[is.na(net)] <- 0
if (remove.isolate) {
idx1 <- which(Matrix::rowSums(net) == 0)
idx2 <- which(Matrix::colSums(net) == 0)
idx <- intersect(idx1, idx2)
net <- net[-idx, ]
net <- net[, -idx]
}
g <- graph_from_adjacency_matrix(net, mode = "directed", weighted = T)
edge.start <- igraph::ends(g, es=igraph::E(g), names=FALSE)
coords<-layout_(g,layout)
if(nrow(coords)!=1){
coords_scale=scale(coords)
}else{
coords_scale<-coords
}
if (is.null(color.use)) {
color.use = scPalette(length(igraph::V(g)))
}
if (is.null(vertex.weight.max)) {
vertex.weight.max <- max(vertex.weight)
}
vertex.weight <- vertex.weight/vertex.weight.max*vertex.size.max+5
loop.angle<-ifelse(coords_scale[igraph::V(g),1]>0,-atan(coords_scale[igraph::V(g),2]/coords_scale[igraph::V(g),1]),pi-atan(coords_scale[igraph::V(g),2]/coords_scale[igraph::V(g),1]))
igraph::V(g)$size<-vertex.weight
igraph::V(g)$color<-color.use[igraph::V(g)]
igraph::V(g)$frame.color <- color.use[igraph::V(g)]
igraph::V(g)$label.color <- vertex.label.color
igraph::V(g)$label.cex<-vertex.label.cex
if(label.edge){
igraph::E(g)$label<-igraph::E(g)$weight
igraph::E(g)$label <- round(igraph::E(g)$label, digits = 1)
}
if (is.null(edge.weight.max)) {
edge.weight.max <- max(igraph::E(g)$weight)
}
if (weight.scale == TRUE) {
#E(g)$width<-0.3+edge.width.max/(max(E(g)$weight)-min(E(g)$weight))*(E(g)$weight-min(E(g)$weight))
igraph::E(g)$width<- 0.3+igraph::E(g)$weight/edge.weight.max*edge.width.max
}else{
igraph::E(g)$width<-0.3+edge.width.max*igraph::E(g)$weight
}
igraph::E(g)$arrow.width<-arrow.width
igraph::E(g)$arrow.size<-arrow.size
igraph::E(g)$label.color<-edge.label.color
igraph::E(g)$label.cex<-edge.label.cex
igraph::E(g)$color<- grDevices::adjustcolor(igraph::V(g)$color[edge.start[,1]],alpha.edge)
if(sum(edge.start[,2]==edge.start[,1])!=0){
igraph::E(g)$loop.angle[which(edge.start[,2]==edge.start[,1])]<-loop.angle[edge.start[which(edge.start[,2]==edge.start[,1]),1]]
}
radian.rescale <- function(x, start=0, direction=1) {
c.rotate <- function(x) (x + start) %% (2 * pi) * direction
c.rotate(scales::rescale(x, c(0, 2 * pi), range(x)))
}
label.locs <- radian.rescale(x=1:length(igraph::V(g)), direction=-1, start=0)
label.dist <- vertex.weight/max(vertex.weight)+2
plot(g,edge.curved=edge.curved,vertex.shape=shape,layout=coords_scale,margin=margin, vertex.label.dist=label.dist,
vertex.label.degree=label.locs, vertex.label.family="Helvetica", edge.label.family="Helvetica") # "sans"
if (!is.null(title.name)) {
text(0,1.5,title.name, cex = 1.1)
}
# https://www.andrewheiss.com/blog/2016/12/08/save-base-graphics-as-pseudo-objects-in-r/
# grid.echo()
# gg <- grid.grab()
gg <- recordPlot()
return(gg)
}
#' generate circle symbol
#'
#' @param coords coordinates of points
#' @param v vetex
#' @param params parameters
#' @importFrom graphics symbols
#' @return
mycircle <- function(coords, v=NULL, params) {
vertex.color <- params("vertex", "color")
if (length(vertex.color) != 1 && !is.null(v)) {
vertex.color <- vertex.color[v]
}
vertex.size <- 1/200 * params("vertex", "size")
if (length(vertex.size) != 1 && !is.null(v)) {
vertex.size <- vertex.size[v]
}
vertex.frame.color <- params("vertex", "frame.color")
if (length(vertex.frame.color) != 1 && !is.null(v)) {
vertex.frame.color <- vertex.frame.color[v]
}
vertex.frame.width <- params("vertex", "frame.width")
if (length(vertex.frame.width) != 1 && !is.null(v)) {
vertex.frame.width <- vertex.frame.width[v]
}
mapply(coords[,1], coords[,2], vertex.color, vertex.frame.color,
vertex.size, vertex.frame.width,
FUN=function(x, y, bg, fg, size, lwd) {
symbols(x=x, y=y, bg=bg, fg=fg, lwd=lwd,
circles=size, add=TRUE, inches=FALSE)
})
}
#' Circle plot showing differential cell-cell communication network between two datasets
#'
#' The width of edges represent the relative number of interactions or interaction strength.
#' Red (or blue) colored edges represent increased (or decreased) signaling in the second dataset compared to the first one.
#'
#' @param object A merged CellChat objects
#' @param comparison a numerical vector giving the datasets for comparison in object.list; e.g., comparison = c(1,2)
#' @param measure "count" or "weight". "count": comparing the number of interactions; "weight": comparing the total interaction weights (strength)
#' @param color.use Colors represent different cell groups
#' @param title.name the name of the title
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param remove.isolate whether remove the isolate nodes in the communication network
#' @param top the fraction of interactions to show
#' @param weight.scale whether scale the weight
#' @param vertex.weight The weight of vertex: either a scale value or a vector
#' @param vertex.weight.max the maximum weight of vertex; defualt = max(vertex.weight)
#' @param vertex.size.max the maximum vertex size for visualization
#' @param vertex.label.cex The label size of vertex
#' @param vertex.label.color The color of label for vertex
#' @param edge.weight.max the maximum weight of edge; defualt = max(net)
#' @param edge.width.max The maximum edge width for visualization
#' @param label.edge Whether or not shows the label of edges
#' @param alpha.edge the transprency of edge
#' @param edge.label.color The color for single arrow
#' @param edge.label.cex The size of label for arrows
#' @param edge.curved Specifies whether to draw curved edges, or not.
#' This can be a logical or a numeric vector or scalar.
#' First the vector is replicated to have the same length as the number of
#' edges in the graph. Then it is interpreted for each edge separately.
#' A numeric value specifies the curvature of the edge; zero curvature means
#' straight edges, negative values means the edge bends clockwise, positive
#' values the opposite. TRUE means curvature 0.5, FALSE means curvature zero
#' @param shape The shape of the vertex, currently “circle”, “square”,
#' “csquare”, “rectangle”, “crectangle”, “vrectangle”, “pie” (see
#' vertex.shape.pie), ‘sphere’, and “none” are supported, and only by the
#' plot.igraph command. “none” does not draw the vertices at all, although
#' vertex label are plotted (if given). See shapes for details about vertex
#' shapes and vertex.shape.pie for using pie charts as vertices.
#' @param layout The layout specification. It must be a call to a layout
#' specification function.
#' @param margin The amount of empty space below, over, at the left and right
#' of the plot, it is a numeric vector of length four. Usually values between
#' 0 and 0.5 are meaningful, but negative values are also possible, that will
#' make the plot zoom in to a part of the graph. If it is shorter than four
#' then it is recycled.
#' @param arrow.width The width of arrows
#' @param arrow.size the size of arrow
# #' @param from,to,bidirection Deprecated. Use `sources.use`,`targets.use`
# #' @param vertex.size Deprecated. Use `vertex.weight`
#' @importFrom igraph graph_from_adjacency_matrix ends E V layout_ in_circle
#' @importFrom grDevices recordPlot
#' @return an object of class "recordedplot"
#' @export
netVisual_diffInteraction <- function(object, comparison = c(1,2), measure = c("count", "weight", "count.merged", "weight.merged"), color.use = NULL,title.name = NULL, sources.use = NULL, targets.use = NULL, remove.isolate = FALSE, top = 1,
weight.scale = FALSE, vertex.weight = 20, vertex.weight.max = NULL, vertex.size.max = 15, vertex.label.cex=1,vertex.label.color= "black",
edge.weight.max = NULL, edge.width.max=8, alpha.edge = 0.6, label.edge = FALSE,edge.label.color='black',edge.label.cex=0.8,
edge.curved=0.2,shape='circle',layout=in_circle(), margin=0.2,
arrow.width=1,arrow.size = 0.2){
options(warn = -1)
measure <- match.arg(measure)
obj1 <- object@net[[comparison[1]]][[measure]]
obj2 <- object@net[[comparison[2]]][[measure]]
net.diff <- obj2 - obj1
if (measure %in% c("count", "count.merged")) {
if (is.null(title.name)) {
title.name = "Differential number of interactions"
}
} else if (measure %in% c("weight", "weight.merged")) {
if (is.null(title.name)) {
title.name = "Differential interaction strength"
}
}
net <- net.diff
if ((!is.null(sources.use)) | (!is.null(targets.use))) {
df.net <- reshape2::melt(net, value.name = "value")
colnames(df.net)[1:2] <- c("source","target")
# keep the interactions associated with sources and targets of interest
if (!is.null(sources.use)){
if (is.numeric(sources.use)) {
sources.use <- rownames(net.diff)[sources.use]
}
df.net <- subset(df.net, source %in% sources.use)
}
if (!is.null(targets.use)){
if (is.numeric(targets.use)) {
targets.use <- rownames(net.diff)[targets.use]
}
df.net <- subset(df.net, target %in% targets.use)
}
cells.level <- rownames(net.diff)
df.net$source <- factor(df.net$source, levels = cells.level)
df.net$target <- factor(df.net$target, levels = cells.level)
df.net$value[is.na(df.net$value)] <- 0
net <- tapply(df.net[["value"]], list(df.net[["source"]], df.net[["target"]]), sum)
}
if (remove.isolate) {
idx1 <- which(Matrix::rowSums(net) == 0)
idx2 <- which(Matrix::colSums(net) == 0)
idx <- intersect(idx1, idx2)
net <- net[-idx, ]
net <- net[, -idx]
}
net[abs(net) < stats::quantile(abs(net), probs = 1-top)] <- 0
g <- graph_from_adjacency_matrix(net, mode = "directed", weighted = T)
edge.start <- igraph::ends(g, es=igraph::E(g), names=FALSE)
coords<-layout_(g,layout)
if(nrow(coords)!=1){
coords_scale=scale(coords)
}else{
coords_scale<-coords
}
if (is.null(color.use)) {
color.use = scPalette(length(igraph::V(g)))
}
if (is.null(vertex.weight.max)) {
vertex.weight.max <- max(vertex.weight)
}
vertex.weight <- vertex.weight/vertex.weight.max*vertex.size.max+5
loop.angle<-ifelse(coords_scale[igraph::V(g),1]>0,-atan(coords_scale[igraph::V(g),2]/coords_scale[igraph::V(g),1]),pi-atan(coords_scale[igraph::V(g),2]/coords_scale[igraph::V(g),1]))
igraph::V(g)$size<-vertex.weight
igraph::V(g)$color<-color.use[igraph::V(g)]
igraph::V(g)$frame.color <- color.use[igraph::V(g)]
igraph::V(g)$label.color <- vertex.label.color
igraph::V(g)$label.cex<-vertex.label.cex
if(label.edge){
igraph::E(g)$label<-igraph::E(g)$weight
igraph::E(g)$label <- round(igraph::E(g)$label, digits = 1)
}
igraph::E(g)$arrow.width<-arrow.width
igraph::E(g)$arrow.size<-arrow.size
igraph::E(g)$label.color<-edge.label.color
igraph::E(g)$label.cex<-edge.label.cex
#igraph::E(g)$color<- grDevices::adjustcolor(igraph::V(g)$color[edge.start[,1]],alpha.edge)
igraph::E(g)$color <- ifelse(igraph::E(g)$weight > 0,'#b2182b','#2166ac')
igraph::E(g)$color <- grDevices::adjustcolor(igraph::E(g)$color, alpha.edge)
igraph::E(g)$weight <- abs(igraph::E(g)$weight)
if (is.null(edge.weight.max)) {
edge.weight.max <- max(igraph::E(g)$weight)
}
if (weight.scale == TRUE) {
#E(g)$width<-0.3+edge.width.max/(max(E(g)$weight)-min(E(g)$weight))*(E(g)$weight-min(E(g)$weight))
igraph::E(g)$width<- 0.3+igraph::E(g)$weight/edge.weight.max*edge.width.max
}else{
igraph::E(g)$width<-0.3+edge.width.max*igraph::E(g)$weight
}
if(sum(edge.start[,2]==edge.start[,1])!=0){
igraph::E(g)$loop.angle[which(edge.start[,2]==edge.start[,1])]<-loop.angle[edge.start[which(edge.start[,2]==edge.start[,1]),1]]
}
radian.rescale <- function(x, start=0, direction=1) {
c.rotate <- function(x) (x + start) %% (2 * pi) * direction
c.rotate(scales::rescale(x, c(0, 2 * pi), range(x)))
}
label.locs <- radian.rescale(x=1:length(igraph::V(g)), direction=-1, start=0)
label.dist <- vertex.weight/max(vertex.weight)+2
plot(g,edge.curved=edge.curved,vertex.shape=shape,layout=coords_scale,margin=margin, vertex.label.dist=label.dist,
vertex.label.degree=label.locs, vertex.label.family="Helvetica", edge.label.family="Helvetica") # "sans"
if (!is.null(title.name)) {
text(0,1.5,title.name, cex = 1.1)
}
# https://www.andrewheiss.com/blog/2016/12/08/save-base-graphics-as-pseudo-objects-in-r/
# grid.echo()
# gg <- grid.grab()
gg <- recordPlot()
return(gg)
}
#' Visualization of network using heatmap
#'
#' This heatmap can be used to show differential number of interactions or interaction strength in the cell-cell communication network between two datasets;
#' the number of interactions or interaction strength in a single dataset
#' the inferred cell-cell communication network in single dataset, defined by `signaling`
#'
#' When show differential number of interactions or interaction strength in the cell-cell communication network between two datasets, the width of edges represent the relative number of interactions or interaction strength.
#' Red (or blue) colored edges represent increased (or decreased) signaling in the second dataset compared to the first one.
#'
#' The top colored bar plot represents the sum of column of values displayed in the heatmap. The right colored bar plot represents the sum of row of values.
#'
#'
#' @param object A merged CellChat object or a single CellChat object
#' @param comparison a numerical vector giving the datasets for comparison in object.list; e.g., comparison = c(1,2)
#' @param measure "count" or "weight". "count": comparing the number of interactions; "weight": comparing the total interaction weights (strength)
#' @param signaling a character vector giving the name of signaling networks in a single CellChat object
#' @param slot.name the slot name of object. Set is to be "netP" if input signaling is a pathway name; Set is to be "net" if input signaling is a ligand-receptor pair
#' @param color.use the character vector defining the color of each cell group
#' @param color.heatmap A vector of two colors corresponding to max/min values, or a color name in brewer.pal only when the data in the heatmap do not contain negative values
#' @param title.name the name of the title
#' @param width width of heatmap
#' @param height height of heatmap
#' @param font.size fontsize in heatmap
#' @param font.size.title font size of the title
#' @param cluster.rows whether cluster rows
#' @param cluster.cols whether cluster columns
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param remove.isolate whether remove the isolate nodes in the communication network
#' @param row.show,col.show a vector giving the index or the name of row or columns to show in the heatmap
#' @importFrom methods slot
#' @importFrom grDevices colorRampPalette
#' @importFrom RColorBrewer brewer.pal
#' @importFrom ComplexHeatmap Heatmap HeatmapAnnotation anno_barplot rowAnnotation
#' @return an object of ComplexHeatmap
#' @export
netVisual_heatmap <- function(object, comparison = c(1,2), measure = c("count", "weight"), signaling = NULL, slot.name = c("netP", "net"), color.use = NULL, color.heatmap = c("#2166ac","#b2182b"),
title.name = NULL, width = NULL, height = NULL, font.size = 8, font.size.title = 10, cluster.rows = FALSE, cluster.cols = FALSE,
sources.use = NULL, targets.use = NULL, remove.isolate = FALSE, row.show = NULL, col.show = NULL){
# obj1 <- object.list[[comparison[1]]]
# obj2 <- object.list[[comparison[2]]]
if (!is.null(measure)) {
measure <- match.arg(measure)
}
slot.name <- match.arg(slot.name)
if (is.list(object@net[[1]])) {
message("Do heatmap based on a merged object \n")
obj1 <- object@net[[comparison[1]]][[measure]]
obj2 <- object@net[[comparison[2]]][[measure]]
net.diff <- obj2 - obj1
if (measure == "count") {
if (is.null(title.name)) {
title.name = "Differential number of interactions"
}
} else if (measure == "weight") {
if (is.null(title.name)) {
title.name = "Differential interaction strength"
}
}
legend.name = "Relative values"
} else {
message("Do heatmap based on a single object \n")
if (!is.null(signaling)) {
net.diff <- slot(object, slot.name)$prob[,,signaling]
if (is.null(title.name)) {
title.name = paste0(signaling, " signaling network")
}
legend.name <- "Communication Prob."
} else if (!is.null(measure)) {
net.diff <- object@net[[measure]]
if (measure == "count") {
if (is.null(title.name)) {
title.name = "Number of interactions"
}
} else if (measure == "weight") {
if (is.null(title.name)) {
title.name = "Interaction strength"
}
}
legend.name <- title.name
}
}
net <- net.diff
if ((!is.null(sources.use)) | (!is.null(targets.use))) {
df.net <- reshape2::melt(net, value.name = "value")
colnames(df.net)[1:2] <- c("source","target")
# keep the interactions associated with sources and targets of interest
if (!is.null(sources.use)){
if (is.numeric(sources.use)) {
sources.use <- rownames(net.diff)[sources.use]
}
df.net <- subset(df.net, source %in% sources.use)
}
if (!is.null(targets.use)){
if (is.numeric(targets.use)) {
targets.use <- rownames(net.diff)[targets.use]
}
df.net <- subset(df.net, target %in% targets.use)
}
cells.level <- rownames(net.diff)
df.net$source <- factor(df.net$source, levels = cells.level)
df.net$target <- factor(df.net$target, levels = cells.level)
df.net$value[is.na(df.net$value)] <- 0
net <- tapply(df.net[["value"]], list(df.net[["source"]], df.net[["target"]]), sum)
}
net[is.na(net)] <- 0
if (remove.isolate) {
idx1 <- which(Matrix::rowSums(net) == 0)
idx2 <- which(Matrix::colSums(net) == 0)
idx <- intersect(idx1, idx2)
net <- net[-idx, ]
net <- net[, -idx]
}
mat <- net
if (is.null(color.use)) {
color.use <- scPalette(ncol(mat))
}
names(color.use) <- colnames(mat)
if (!is.null(row.show)) {
mat <- mat[row.show, ]
}
if (!is.null(col.show)) {
mat <- mat[ ,col.show]
color.use <- color.use[col.show]
}
if (min(mat) < 0) {
color.heatmap.use = colorRamp3(c(min(mat), 0, max(mat)), c(color.heatmap[1], "#f7f7f7", color.heatmap[2]))
colorbar.break <- c(round(min(mat, na.rm = T), digits = nchar(sub(".*\\.(0*).*","\\1",min(mat, na.rm = T)))+1), 0, round(max(mat, na.rm = T), digits = nchar(sub(".*\\.(0*).*","\\1",max(mat, na.rm = T)))+1))
# color.heatmap.use = colorRamp3(c(seq(min(mat), -(max(mat)-min(max(mat)))/9, length.out = 4), 0, seq((max(mat)-min(max(mat)))/9, max(mat), length.out = 4)), RColorBrewer::brewer.pal(n = 9, name = color.heatmap))
} else {
if (length(color.heatmap) == 3) {
color.heatmap.use = colorRamp3(c(0, min(mat), max(mat)), color.heatmap)
} else if (length(color.heatmap) == 2) {
color.heatmap.use = colorRamp3(c(min(mat), max(mat)), color.heatmap)
} else if (length(color.heatmap) == 1) {
color.heatmap.use = grDevices::colorRampPalette((RColorBrewer::brewer.pal(n = 9, name = color.heatmap)))(100)
}
colorbar.break <- c(round(min(mat, na.rm = T), digits = nchar(sub(".*\\.(0*).*","\\1",min(mat, na.rm = T)))+1), round(max(mat, na.rm = T), digits = nchar(sub(".*\\.(0*).*","\\1",max(mat, na.rm = T)))+1))
}
# col_fun(as.vector(mat))
df<- data.frame(group = colnames(mat)); rownames(df) <- colnames(mat)
col_annotation <- HeatmapAnnotation(df = df, col = list(group = color.use),which = "column",
show_legend = FALSE, show_annotation_name = FALSE,
simple_anno_size = grid::unit(0.2, "cm"))
row_annotation <- HeatmapAnnotation(df = df, col = list(group = color.use), which = "row",
show_legend = FALSE, show_annotation_name = FALSE,
simple_anno_size = grid::unit(0.2, "cm"))
ha1 = rowAnnotation(Strength = anno_barplot(rowSums(abs(mat)), border = FALSE,gp = gpar(fill = color.use, col=color.use)), show_annotation_name = FALSE)
ha2 = HeatmapAnnotation(Strength = anno_barplot(colSums(abs(mat)), border = FALSE,gp = gpar(fill = color.use, col=color.use)), show_annotation_name = FALSE)
if (sum(abs(mat) > 0) == 1) {
color.heatmap.use = c("white", color.heatmap.use)
} else {
mat[mat == 0] <- NA
}
ht1 = Heatmap(mat, col = color.heatmap.use, na_col = "white", name = legend.name,
bottom_annotation = col_annotation, left_annotation =row_annotation, top_annotation = ha2, right_annotation = ha1,
cluster_rows = cluster.rows,cluster_columns = cluster.rows,
row_names_side = "left",row_names_rot = 0,row_names_gp = gpar(fontsize = font.size),column_names_gp = gpar(fontsize = font.size),
# width = unit(width, "cm"), height = unit(height, "cm"),
column_title = title.name,column_title_gp = gpar(fontsize = font.size.title),column_names_rot = 90,
row_title = "Sources (Sender)",row_title_gp = gpar(fontsize = font.size.title),row_title_rot = 90,
heatmap_legend_param = list(title_gp = gpar(fontsize = 8, fontface = "plain"),title_position = "leftcenter-rot",
border = NA, #at = colorbar.break,
legend_height = unit(20, "mm"),labels_gp = gpar(fontsize = 8),grid_width = unit(2, "mm"))
)
# draw(ht1)
return(ht1)
}
#' Show all the significant interactions (L-R pairs) from some cell groups to other cell groups
#'
#' The dot color and size represent the calculated communication probability and p-values.
#'
#' @param object CellChat object
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param signaling a character vector giving the name of signaling pathways of interest
#' @param pairLR.use a data frame consisting of one column named either "interaction_name" or "pathway_name", defining the interactions of interest
#' @param color.heatmap A character string or vector indicating the colormap option to use. It can be the avaibale color palette in viridis_pal() or brewer.pal()
#' @param direction Sets the order of colors in the scale. If 1, the default colors are used. If -1, the order of colors is reversed.
#' @param n.colors number of basic colors to generate from color palette
#' @param thresh threshold of the p-value for determining significant interaction
#' @param comparison a numerical vector giving the datasets for comparison in the merged object; e.g., comparison = c(1,2)
#' @param group a numerical vector giving the group information of different datasets; e.g., group = c(1,2,2)
#' @param remove.isolate whether remove the entire empty column, i.e., communication between certain cell groups
#' @param max.dataset a scale, keep the communications with highest probability in max.dataset (i.e., certrain condition)
#' @param min.dataset a scale, keep the communications with lowest probability in min.dataset (i.e., certrain condition)
#' @param min.quantile,max.quantile minimum and maximum quantile cutoff values for the colorbar, may specify quantile in [0,1]
#' @param line.on whether add vertical line when doing comparison analysis for the merged object
#' @param line.size size of vertical line if added
#' @param color.text.use whether color the xtick labels according to the dataset origin when doing comparison analysis
#' @param color.text the colors for xtick labels according to the dataset origin when doing comparison analysis
#' @param title.name main title of the plot
#' @param font.size,font.size.title font size of all the text and the title name
#' @param show.legend whether show legend
#' @param grid.on,color.grid whether add grid
#' @param angle.x,vjust.x,hjust.x parameters for adjusting the rotation of xtick labels
#' @param return.data whether return the data.frame for replotting
#'
#' @return
#' @export
#'
#' @examples
#'\dontrun{
#' # show all the significant interactions (L-R pairs) from some cell groups (defined by 'sources.use') to other cell groups (defined by 'targets.use')
#' netVisual_bubble(cellchat, sources.use = 4, targets.use = c(5:11), remove.isolate = FALSE)
#'
#' # show all the significant interactions (L-R pairs) associated with certain signaling pathways
#' netVisual_bubble(cellchat, sources.use = 4, targets.use = c(5:11), signaling = c("CCL","CXCL"))
#'
#' # show all the significant interactions (L-R pairs) based on user's input (defined by `pairLR.use`)
#' pairLR.use <- extractEnrichedLR(cellchat, signaling = c("CCL","CXCL","FGF"))
#' netVisual_bubble(cellchat, sources.use = c(3,4), targets.use = c(5:8), pairLR.use = pairLR.use, remove.isolate = TRUE)
#'
#'# show all the increased interactions in the second dataset compared to the first dataset
#' netVisual_bubble(cellchat, sources.use = 4, targets.use = c(5:8), remove.isolate = TRUE, max.dataset = 2)
#'
#'# show all the decreased interactions in the second dataset compared to the first dataset
#' netVisual_bubble(cellchat, sources.use = 4, targets.use = c(5:8), remove.isolate = TRUE, max.dataset = 1)
#'}
netVisual_bubble <- function(object, sources.use = NULL, targets.use = NULL, signaling = NULL, pairLR.use = NULL, color.heatmap = c("Spectral","viridis"), n.colors = 10, direction = -1, thresh = 0.05,
comparison = NULL, group = NULL, remove.isolate = FALSE, max.dataset = NULL, min.dataset = NULL,
min.quantile = 0, max.quantile = 1, line.on = TRUE, line.size = 0.2, color.text.use = TRUE, color.text = NULL,
title.name = NULL, font.size = 10, font.size.title = 10, show.legend = TRUE,
grid.on = TRUE, color.grid = "grey90", angle.x = 90, vjust.x = NULL, hjust.x = NULL,
return.data = FALSE){
color.heatmap <- match.arg(color.heatmap)
if (is.list(object@net[[1]])) {
message("Comparing communications on a merged object \n")
} else {
message("Comparing communications on a single object \n")
}
if (is.null(vjust.x) | is.null(hjust.x)) {
angle=c(0, 45, 90)
hjust=c(0, 1, 1)
vjust=c(0, 1, 0.5)
vjust.x = vjust[angle == angle.x]
hjust.x = hjust[angle == angle.x]
}
if (length(color.heatmap) == 1) {
color.use <- tryCatch({
RColorBrewer::brewer.pal(n = n.colors, name = color.heatmap)
}, error = function(e) {
scales::viridis_pal(option = color.heatmap, direction = -1)(n.colors)
})
} else {
color.use <- color.heatmap
}
if (direction == -1) {
color.use <- rev(color.use)
}
if (is.null(comparison)) {
cells.level <- levels(object@idents)
if (is.numeric(sources.use)) {
sources.use <- cells.level[sources.use]
}
if (is.numeric(targets.use)) {
targets.use <- cells.level[targets.use]
}
df.net <- subsetCommunication(object, slot.name = "net",
sources.use = sources.use, targets.use = targets.use,
signaling = signaling,
pairLR.use = pairLR.use,
thresh = thresh)
df.net$source.target <- paste(df.net$source, df.net$target, sep = " -> ")
source.target <- paste(rep(sources.use, each = length(targets.use)), targets.use, sep = " -> ")
source.target.isolate <- setdiff(source.target, unique(df.net$source.target))
if (length(source.target.isolate) > 0) {
df.net.isolate <- as.data.frame(matrix(NA, nrow = length(source.target.isolate), ncol = ncol(df.net)))
colnames(df.net.isolate) <- colnames(df.net)
df.net.isolate$source.target <- source.target.isolate
df.net.isolate$interaction_name_2 <- df.net$interaction_name_2[1]
df.net.isolate$pval <- 1
a <- stringr::str_split(df.net.isolate$source.target, " -> ", simplify = T)
df.net.isolate$source <- as.character(a[, 1])
df.net.isolate$target <- as.character(a[, 2])
df.net <- rbind(df.net, df.net.isolate)
}
df.net$pval[df.net$pval > 0.05] = 1
df.net$pval[df.net$pval > 0.01 & df.net$pval <= 0.05] = 2
df.net$pval[df.net$pval <= 0.01] = 3
df.net$prob[df.net$prob == 0] <- NA
df.net$prob.original <- df.net$prob
df.net$prob <- -1/log(df.net$prob)
idx1 <- which(is.infinite(df.net$prob) | df.net$prob < 0)
if (sum(idx1) > 0) {
values.assign <- seq(max(df.net$prob, na.rm = T)*1.1, max(df.net$prob, na.rm = T)*1.5, length.out = length(idx1))
position <- sort(prob.original[idx1], index.return = TRUE)$ix
df.net$prob[idx1] <- values.assign[match(1:length(idx1), position)]
}
# rownames(df.net) <- df.net$interaction_name_2
df.net$source <- factor(df.net$source, levels = cells.level[cells.level %in% unique(df.net$source)])
df.net$target <- factor(df.net$target, levels = cells.level[cells.level %in% unique(df.net$target)])
group.names <- paste(rep(levels(df.net$source), each = length(levels(df.net$target))), levels(df.net$target), sep = " -> ")
df.net$interaction_name_2 <- as.character(df.net$interaction_name_2)
df.net <- with(df.net, df.net[order(interaction_name_2),])
df.net$interaction_name_2 <- factor(df.net$interaction_name_2, levels = unique(df.net$interaction_name_2))
cells.order <- group.names
df.net$source.target <- factor(df.net$source.target, levels = cells.order)
df <- df.net
} else {
dataset.name <- names(object@net)
df.net.all <- subsetCommunication(object, slot.name = "net",
sources.use = sources.use, targets.use = targets.use,
signaling = signaling,
pairLR.use = pairLR.use,
thresh = thresh)
df.all <- data.frame()
for (ii in 1:length(comparison)) {
cells.level <- levels(object@idents[[comparison[ii]]])
if (is.numeric(sources.use)) {
sources.use <- cells.level[sources.use]
}
if (is.numeric(targets.use)) {
targets.use <- cells.level[targets.use]
}
df.net <- df.net.all[[comparison[ii]]]
df.net$interaction_name_2 <- as.character(df.net$interaction_name_2)
df.net$source.target <- paste(df.net$source, df.net$target, sep = " -> ")
source.target <- paste(rep(sources.use, each = length(targets.use)), targets.use, sep = " -> ")
source.target.isolate <- setdiff(source.target, unique(df.net$source.target))
if (length(source.target.isolate) > 0) {
df.net.isolate <- as.data.frame(matrix(NA, nrow = length(source.target.isolate), ncol = ncol(df.net)))
colnames(df.net.isolate) <- colnames(df.net)
df.net.isolate$source.target <- source.target.isolate
df.net.isolate$interaction_name_2 <- df.net$interaction_name_2[1]
df.net.isolate$pval <- 1
a <- stringr::str_split(df.net.isolate$source.target, " -> ", simplify = T)
df.net.isolate$source <- as.character(a[, 1])
df.net.isolate$target <- as.character(a[, 2])
df.net <- rbind(df.net, df.net.isolate)
}
df.net$source <- factor(df.net$source, levels = cells.level[cells.level %in% unique(df.net$source)])
df.net$target <- factor(df.net$target, levels = cells.level[cells.level %in% unique(df.net$target)])
group.names <- paste(rep(levels(df.net$source), each = length(levels(df.net$target))), levels(df.net$target), sep = " -> ")
group.names0 <- group.names
group.names <- paste0(group.names0, " (", dataset.name[comparison[ii]], ")")
if (nrow(df.net) > 0) {
df.net$pval[df.net$pval > 0.05] = 1
df.net$pval[df.net$pval > 0.01 & df.net$pval <= 0.05] = 2
df.net$pval[df.net$pval <= 0.01] = 3
df.net$prob[df.net$prob == 0] <- NA
df.net$prob.original <- df.net$prob
df.net$prob <- -1/log(df.net$prob)
} else {
df.net <- as.data.frame(matrix(NA, nrow = length(group.names), ncol = 5))
colnames(df.net) <- c("interaction_name_2","source.target","prob","pval","prob.original")
df.net$source.target <- group.names0
}
# df.net$group.names <- sub(paste0(' \\(',dataset.name[comparison[ii]],'\\)'),'',as.character(df.net$source.target))
df.net$group.names <- as.character(df.net$source.target)
df.net$source.target <- paste0(df.net$source.target, " (", dataset.name[comparison[ii]], ")")
df.net$dataset <- dataset.name[comparison[ii]]
df.all <- rbind(df.all, df.net)
}
if (nrow(df.all) == 0) {
stop("No interactions are detected. Please consider changing the cell groups for analysis. ")
}
idx1 <- which(is.infinite(df.all$prob) | df.all$prob < 0)
if (sum(idx1) > 0) {
values.assign <- seq(max(df.all$prob, na.rm = T)*1.1, max(df.all$prob, na.rm = T)*1.5, length.out = length(idx1))
position <- sort(df.all$prob.original[idx1], index.return = TRUE)$ix
df.all$prob[idx1] <- values.assign[match(1:length(idx1), position)]
}
df.all$interaction_name_2[is.na(df.all$interaction_name_2)] <- df.all$interaction_name_2[!is.na(df.all$interaction_name_2)][1]
df <- df.all
df <- with(df, df[order(interaction_name_2),])
df$interaction_name_2 <- factor(df$interaction_name_2, levels = unique(df$interaction_name_2))
cells.order <- c()
dataset.name.order <- c()
for (i in 1:length(group.names0)) {
for (j in 1:length(comparison)) {
cells.order <- c(cells.order, paste0(group.names0[i], " (", dataset.name[comparison[j]], ")"))
dataset.name.order <- c(dataset.name.order, dataset.name[comparison[j]])
}
}
df$source.target <- factor(df$source.target, levels = cells.order)
}
min.cutoff <- quantile(df$prob, min.quantile,na.rm= T)
max.cutoff <- quantile(df$prob, max.quantile,na.rm= T)
df$prob[df$prob < min.cutoff] <- min.cutoff
df$prob[df$prob > max.cutoff] <- max.cutoff
if (remove.isolate) {
df <- df[!is.na(df$prob), ]
line.on <- FALSE
}
if (!is.null(max.dataset)) {
# line.on <- FALSE
# df <- df[!is.na(df$prob),]
signaling <- as.character(unique(df$interaction_name_2))
for (i in signaling) {
df.i <- df[df$interaction_name_2 == i, ,drop = FALSE]
cell <- as.character(unique(df.i$group.names))
for (j in cell) {
df.i.j <- df.i[df.i$group.names == j, , drop = FALSE]
values <- df.i.j$prob
idx.max <- which(values == max(values, na.rm = T))
idx.min <- which(values == min(values, na.rm = T))
#idx.na <- c(which(is.na(values)), which(!(dataset.name[comparison] %in% df.i.j$dataset)))
dataset.na <- c(df.i.j$dataset[is.na(values)], setdiff(dataset.name[comparison], df.i.j$dataset))
if (length(idx.max) > 0) {
if (!(df.i.j$dataset[idx.max] %in% dataset.name[max.dataset])) {
df.i.j$prob <- NA
} else if ((idx.max != idx.min) & !is.null(min.dataset)) {
if (!(df.i.j$dataset[idx.min] %in% dataset.name[min.dataset])) {
df.i.j$prob <- NA
} else if (length(dataset.na) > 0 & sum(!(dataset.name[min.dataset] %in% dataset.na)) > 0) {
df.i.j$prob <- NA
}
}
}
df.i[df.i$group.names == j, "prob"] <- df.i.j$prob
}
df[df$interaction_name_2 == i, "prob"] <- df.i$prob
}
#df <- df[!is.na(df$prob), ]
}
if (remove.isolate) {
df <- df[!is.na(df$prob), ]
line.on <- FALSE
}
if (nrow(df) == 0) {
stop("No interactions are detected. Please consider changing the cell groups for analysis. ")
}
df$interaction_name_2 <- factor(df$interaction_name_2, levels = unique(df$interaction_name_2))
df$source.target = droplevels(df$source.target, exclude = setdiff(levels(df$source.target),unique(df$source.target)))
g <- ggplot(df, aes(x = source.target, y = interaction_name_2, color = prob, size = pval)) +
geom_point(pch = 16) +
theme_linedraw() + theme(panel.grid.major = element_blank()) +
theme(axis.text.x = element_text(angle = angle.x, hjust= hjust.x, vjust = vjust.x),
axis.title.x = element_blank(),
axis.title.y = element_blank()) +
scale_x_discrete(position = "bottom")
values <- c(1,2,3); names(values) <- c("p > 0.05", "0.01 < p < 0.05","p < 0.01")
g <- g + scale_radius(range = c(min(df$pval), max(df$pval)), breaks = sort(unique(df$pval)),labels = names(values)[values %in% sort(unique(df$pval))], name = "p-value")
#g <- g + scale_radius(range = c(1,3), breaks = values,labels = names(values), name = "p-value")
if (min(df$prob, na.rm = T) != max(df$prob, na.rm = T)) {
g <- g + scale_colour_gradientn(colors = colorRampPalette(color.use)(99), na.value = "white", limits=c(quantile(df$prob, 0,na.rm= T), quantile(df$prob, 1,na.rm= T)),
breaks = c(quantile(df$prob, 0,na.rm= T), quantile(df$prob, 1,na.rm= T)), labels = c("min","max")) +
guides(color = guide_colourbar(barwidth = 0.5, title = "Commun. Prob."))
} else {
g <- g + scale_colour_gradientn(colors = colorRampPalette(color.use)(99), na.value = "white") +
guides(color = guide_colourbar(barwidth = 0.5, title = "Commun. Prob."))
}
g <- g + theme(text = element_text(size = font.size),plot.title = element_text(size=font.size.title)) +
theme(legend.title = element_text(size = 8), legend.text = element_text(size = 6))
if (grid.on) {
if (length(unique(df$source.target)) > 1) {
g <- g + geom_vline(xintercept=seq(1.5, length(unique(df$source.target))-0.5, 1),lwd=0.1,colour=color.grid)
}
if (length(unique(df$interaction_name_2)) > 1) {
g <- g + geom_hline(yintercept=seq(1.5, length(unique(df$interaction_name_2))-0.5, 1),lwd=0.1,colour=color.grid)
}
}
if (!is.null(title.name)) {
g <- g + ggtitle(title.name) + theme(plot.title = element_text(hjust = 0.5))
}
if (!is.null(comparison)) {
if (line.on) {
xintercept = seq(0.5+length(dataset.name[comparison]), length(group.names0)*length(dataset.name[comparison]), by = length(dataset.name[comparison]))
g <- g + geom_vline(xintercept = xintercept, linetype="dashed", color = "grey60", size = line.size)
}
if (color.text.use) {
if (is.null(group)) {
group <- 1:length(comparison)
names(group) <- dataset.name[comparison]
}
if (is.null(color.text)) {
color <- ggPalette(length(unique(group)))
} else {
color <- color.text
}
names(color) <- names(group[!duplicated(group)])
color <- color[group]
#names(color) <- dataset.name[comparison]
dataset.name.order <- levels(df$source.target)
dataset.name.order <- stringr::str_match(dataset.name.order, "\\(.*\\)")
dataset.name.order <- stringr::str_sub(dataset.name.order, 2, stringr::str_length(dataset.name.order)-1)
xtick.color <- color[dataset.name.order]
g <- g + theme(axis.text.x = element_text(colour = xtick.color))
}
}
if (!show.legend) {
g <- g + theme(legend.position = "none")
}
if (return.data) {
return(list(communication = df, gg.obj = g))
} else {
return(g)
}
}
#' Chord diagram for visualizing cell-cell communication for a signaling pathway
#'
#' Names of cell states will be displayed in this chord diagram
#'
#' @param object CellChat object
#' @param signaling a character vector giving the name of signaling networks
#' @param net a weighted matrix or a data frame with three columns defining the cell-cell communication network
#' @param slot.name the slot name of object: slot.name = "net" when visualizing cell-cell communication network per each ligand-receptor pair associated with a given signaling pathway;
#' slot.name = "netP" when visualizing cell-cell communication network at the level of signaling pathways
#' @param color.use colors for the cell groups
#' @param group A named group labels for making multiple-group Chord diagrams. The sector names should be used as the names in the vector.
#' The order of group controls the sector orders and if group is set as a factor, the order of levels controls the order of groups.
#' @param cell.order a char vector defining the cell type orders (sector orders)
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param lab.cex font size for the text
#' @param small.gap Small gap between sectors.
#' @param big.gap Gap between the different sets of sectors, which are defined in the `group` parameter
#' @param annotationTrackHeight annotationTrack Height
#' @param remove.isolate whether remove sectors without any links
#' @param link.visible whether plot the link. The value is logical, if it is set to FALSE, the corresponding link will not plotted, but the space is still ocuppied. The format is a matrix with names or a data frame with three columns
#' @param scale scale each sector to same width; default = FALSE; however, it is set to be TRUE when remove.isolate = TRUE
#' @param link.target.prop If the Chord diagram is directional, for each source sector, whether to draw bars that shows the proportion of target sectors.
#' @param reduce if the ratio of the width of certain grid compared to the whole circle is less than this value, the grid is removed on the plot. Set it to value less than zero if you want to keep all tiny grid.
#' @param directional Whether links have directions. 1 means the direction is from the first column in df to the second column, -1 is the reverse, 0 is no direction, and 2 for two directional.
#' @param transparency Transparency of link colors
#' @param link.border border for links, single scalar or a matrix with names or a data frame with three columns
#' @param title.name title name
#' @param show.legend whether show the figure legend
#' @param legend.pos.x,legend.pos.y adjust the legend position
#' @param nCol number of columns when displaying the figures
#' @param thresh threshold of the p-value for determining significant interaction when visualizing links at the level of ligands/receptors;
#' @param ... other parameters passing to chordDiagram
#' @return an object of class "recordedplot"
#' @export
netVisual_chord_cell <- function(object, signaling = NULL, net = NULL, slot.name = "netP",
color.use = NULL,group = NULL,cell.order = NULL,
sources.use = NULL, targets.use = NULL,
lab.cex = 0.8,small.gap = 1, big.gap = 10, annotationTrackHeight = c(0.03),
remove.isolate = FALSE, link.visible = TRUE, scale = FALSE, directional = 1,link.target.prop = TRUE, reduce = -1,
transparency = 0.4, link.border = NA,
title.name = NULL, show.legend = FALSE, legend.pos.x = 20, legend.pos.y = 20, nCol = NULL,
thresh = 0.05,...){
if (!is.null(signaling)) {
pairLR <- searchPair(signaling = signaling, pairLR.use = object@LR$LRsig, key = "pathway_name", matching.exact = T, pair.only = F)
net <- object@net
pairLR.use.name <- dimnames(net$prob)[[3]]
pairLR.name <- intersect(rownames(pairLR), pairLR.use.name)
pairLR <- pairLR[pairLR.name, ]
prob <- net$prob
pval <- net$pval
prob[pval > thresh] <- 0
if (length(pairLR.name) > 1) {
pairLR.name.use <- pairLR.name[apply(prob[,,pairLR.name], 3, sum) != 0]
} else {
pairLR.name.use <- pairLR.name[sum(prob[,,pairLR.name]) != 0]
}
if (length(pairLR.name.use) == 0) {
stop(paste0('There is no significant communication of ', signaling))
} else {
pairLR <- pairLR[pairLR.name.use,]
}
nRow <- length(pairLR.name.use)
prob <- prob[,,pairLR.name.use]
if (length(dim(prob)) == 2) {
prob <- replicate(1, prob, simplify="array")
}
if (slot.name == "netP") {
message("Plot the aggregated cell-cell communication network at the signaling pathway level")
net <- apply(prob, c(1,2), sum)
if (is.null(title.name)) {
title.name <- paste0(signaling, " signaling pathway network")
}
# par(mfrow = c(1,1), xpd=TRUE)
# par(mar = c(5, 4, 4, 2))
gg <- netVisual_chord_cell_internal(net, color.use = color.use, group = group, cell.order = cell.order, sources.use = sources.use, targets.use = targets.use,
lab.cex = lab.cex,small.gap = small.gap, annotationTrackHeight = annotationTrackHeight,
remove.isolate = remove.isolate, link.visible = link.visible, scale = scale, directional = directional,link.target.prop = link.target.prop, reduce = reduce,
transparency = transparency, link.border = link.border,
title.name = title.name, show.legend = show.legend, legend.pos.x = legend.pos.x, legend.pos.y = legend.pos.y, ...)
} else if (slot.name == "net") {
message("Plot the cell-cell communication network per each ligand-receptor pair associated with a given signaling pathway")
if (is.null(nCol)) {
nCol <- min(length(pairLR.name.use), 2)
}
# layout(matrix(1:length(pairLR.name.use), ncol = nCol))
# par(xpd=TRUE)
# par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE, mar = c(5, 4, 4, 2) +0.1)
par(mfrow = c(ceiling(length(pairLR.name.use)/nCol), nCol), xpd=TRUE)
gg <- vector("list", length(pairLR.name.use))
for (i in 1:length(pairLR.name.use)) {
#par(mar = c(5, 4, 4, 2))
title.name <- pairLR$interaction_name_2[i]
net <- prob[,,i]
gg[[i]] <- netVisual_chord_cell_internal(net, color.use = color.use, group = group,cell.order = cell.order,sources.use = sources.use, targets.use = targets.use,
lab.cex = lab.cex,small.gap = small.gap, annotationTrackHeight = annotationTrackHeight,
remove.isolate = remove.isolate, link.visible = link.visible, scale = scale, directional = directional,link.target.prop = link.target.prop, reduce = reduce,
transparency = transparency, link.border = link.border,
title.name = title.name, show.legend = show.legend, legend.pos.x = legend.pos.x, legend.pos.y = legend.pos.y, ...)
}
}
} else if (!is.null(net)) {
gg <- netVisual_chord_cell_internal(net, color.use = color.use, group = group,cell.order = cell.order,sources.use = sources.use, targets.use = targets.use,
lab.cex = lab.cex,small.gap = small.gap, annotationTrackHeight = annotationTrackHeight,
remove.isolate = remove.isolate, link.visible = link.visible, scale = scale, directional = directional,link.target.prop = link.target.prop, reduce = reduce,
transparency = transparency, link.border = link.border,
title.name = title.name, show.legend = show.legend, legend.pos.x = legend.pos.x,legend.pos.y=legend.pos.y, ...)
} else {
stop("Please assign values to either `signaling` or `net`")
}
return(gg)
}
#' Chord diagram for visualizing cell-cell communication from a weighted adjacency matrix or a data frame
#'
#' Names of cell states/groups will be displayed in this chord diagram
#'
#' @param net a weighted matrix or a data frame with three columns defining the cell-cell communication network
#' @param color.use colors for the cell groups
#' @param group A named group labels for making multiple-group Chord diagrams. The sector names should be used as the names in the vector.
#' The order of group controls the sector orders and if group is set as a factor, the order of levels controls the order of groups.
#' @param cell.order a char vector defining the cell type orders (sector orders)
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param lab.cex font size for the text
#' @param small.gap Small gap between sectors.
#' @param big.gap Gap between the different sets of sectors, which are defined in the `group` parameter
#' @param annotationTrackHeight annotationTrack Height
#' @param remove.isolate whether remove sectors without any links
#' @param link.visible whether plot the link. The value is logical, if it is set to FALSE, the corresponding link will not plotted, but the space is still ocuppied. The format is a matrix with names or a data frame with three columns
#' @param scale scale each sector to same width; default = FALSE; however, it is set to be TRUE when remove.isolate = TRUE
#' @param link.target.prop If the Chord diagram is directional, for each source sector, whether to draw bars that shows the proportion of target sectors.
#' @param reduce if the ratio of the width of certain grid compared to the whole circle is less than this value, the grid is removed on the plot. Set it to value less than zero if you want to keep all tiny grid.
#' @param directional Whether links have directions. 1 means the direction is from the first column in df to the second column, -1 is the reverse, 0 is no direction, and 2 for two directional.
#' @param transparency Transparency of link colors
#' @param link.border border for links, single scalar or a matrix with names or a data frame with three columns
#' @param title.name title name of the plot
#' @param show.legend whether show the figure legend
#' @param legend.pos.x,legend.pos.y adjust the legend position
#' @param ... other parameters passing to chordDiagram
#' @importFrom circlize circos.clear chordDiagram circos.track circos.text get.cell.meta.data
#' @importFrom grDevices recordPlot
#' @importFrom BiocGenerics union
#' @return an object of class "recordedplot"
#' @export
netVisual_chord_cell_internal <- function(net, color.use = NULL, group = NULL, cell.order = NULL,
sources.use = NULL, targets.use = NULL,
lab.cex = 0.8,small.gap = 1, big.gap = 10, annotationTrackHeight = c(0.03),
remove.isolate = FALSE, link.visible = TRUE, scale = FALSE, directional = 1, link.target.prop = TRUE, reduce = -1,
transparency = 0.4, link.border = NA,
title.name = NULL, show.legend = FALSE, legend.pos.x = 20, legend.pos.y = 20,...){
if (inherits(x = net, what = c("matrix", "Matrix"))) {
cell.levels <- union(rownames(net), colnames(net))
net <- reshape2::melt(net, value.name = "prob")
colnames(net)[1:2] <- c("source","target")
} else if (is.data.frame(net)) {
if (all(c("source","target", "prob") %in% colnames(net)) == FALSE) {
stop("The input data frame must contain three columns named as source, target, prob")
}
cell.levels <- as.character(union(net$source,net$target))
}
if (!is.null(cell.order)) {
cell.levels <- cell.order
}
net$source <- as.character(net$source)
net$target <- as.character(net$target)
# keep the interactions associated with sources and targets of interest
if (!is.null(sources.use)){
if (is.numeric(sources.use)) {
sources.use <- cell.levels[sources.use]
}
net <- subset(net, source %in% sources.use)
}
if (!is.null(targets.use)){
if (is.numeric(targets.use)) {
targets.use <- cell.levels[targets.use]
}
net <- subset(net, target %in% targets.use)
}
# remove the interactions with zero values
net <- subset(net, prob > 0)
# create a fake data if keeping the cell types (i.e., sectors) without any interactions
if (!remove.isolate) {
cells.removed <- setdiff(cell.levels, as.character(union(net$source,net$target)))
if (length(cells.removed) > 0) {
net.fake <- data.frame(cells.removed, cells.removed, 1e-10*sample(length(cells.removed), length(cells.removed)))
colnames(net.fake) <- colnames(net)
net <- rbind(net, net.fake)
link.visible <- net[, 1:2]
link.visible$plot <- FALSE
link.visible$plot[1:(nrow(net) - nrow(net.fake))] <- TRUE
# directional <- net[, 1:2]
# directional$plot <- 0
# directional$plot[1:(nrow(net) - nrow(net.fake))] <- 1
# link.arr.type = "big.arrow"
# message("Set scale = TRUE when remove.isolate = FALSE")
scale = TRUE
}
}
df <- net
cells.use <- union(df$source,df$target)
# define grid order
order.sector <- cell.levels[cell.levels %in% cells.use]
# define grid color
if (is.null(color.use)){
color.use = scPalette(length(cell.levels))
names(color.use) <- cell.levels
} else if (is.null(names(color.use))) {
names(color.use) <- cell.levels
}
grid.col <- color.use[order.sector]
names(grid.col) <- order.sector
# set grouping information
if (!is.null(group)) {
group <- group[names(group) %in% order.sector]
}
# define edge color
edge.color <- color.use[as.character(df$source)]
if (directional == 0 | directional == 2) {
link.arr.type = "triangle"
} else {
link.arr.type = "big.arrow"
}
circos.clear()
chordDiagram(df,
order = order.sector,
col = edge.color,
grid.col = grid.col,
transparency = transparency,
link.border = link.border,
directional = directional,
direction.type = c("diffHeight","arrows"),
link.arr.type = link.arr.type, # link.border = "white",
annotationTrack = "grid",
annotationTrackHeight = annotationTrackHeight,
preAllocateTracks = list(track.height = max(strwidth(order.sector))),
small.gap = small.gap,
big.gap = big.gap,
link.visible = link.visible,
scale = scale,
group = group,
link.target.prop = link.target.prop,
reduce = reduce,
...)
circos.track(track.index = 1, panel.fun = function(x, y) {
xlim = get.cell.meta.data("xlim")
xplot = get.cell.meta.data("xplot")
ylim = get.cell.meta.data("ylim")
sector.name = get.cell.meta.data("sector.index")
circos.text(mean(xlim), ylim[1], sector.name, facing = "clockwise", niceFacing = TRUE, adj = c(0, 0.5),cex = lab.cex)
}, bg.border = NA)
# https://jokergoo.github.io/circlize_book/book/legends.html
if (show.legend) {
lgd <- ComplexHeatmap::Legend(at = names(grid.col), type = "grid", legend_gp = grid::gpar(fill = grid.col), title = "Cell State")
ComplexHeatmap::draw(lgd, x = unit(1, "npc")-unit(legend.pos.x, "mm"), y = unit(legend.pos.y, "mm"), just = c("right", "bottom"))
}
if(!is.null(title.name)){
# title(title.name, cex = 1)
text(-0, 1.02, title.name, cex=1)
}
circos.clear()
gg <- recordPlot()
return(gg)
}
#' Chord diagram for visualizing cell-cell communication for a set of ligands/receptors or signaling pathways
#'
#' Names of ligands/receptors or signaling pathways will be displayed in this chord diagram
#'
#' @param object CellChat object
#' @param slot.name the slot name of object: slot.name = "net" when visualizing links at the level of ligands/receptors; slot.name = "netP" when visualizing links at the level of signaling pathways
#' @param signaling a character vector giving the name of signaling networks
#' @param pairLR.use a data frame consisting of one column named either "interaction_name" or "pathway_name", defining the interactions of interest
#' @param net A data frame consisting of the interactions of interest.
#' net should have at least three columns: "source","target" and "interaction_name" when visualizing links at the level of ligands/receptors;
#' "source","target" and "pathway_name" when visualizing links at the level of signaling pathway; "interaction_name" and "pathway_name" must be the matched names in CellChatDB$interaction.
#' @param sources.use a vector giving the index or the name of source cell groups
#' @param targets.use a vector giving the index or the name of target cell groups.
#' @param color.use colors for the cell groups
#' @param lab.cex font size for the text
#' @param small.gap Small gap between sectors.
#' @param big.gap Gap between the different sets of sectors, which are defined in the `group` parameter
#' @param annotationTrackHeight annotationTrack Height
#' @param link.visible whether plot the link. The value is logical, if it is set to FALSE, the corresponding link will not plotted, but the space is still ocuppied. The format is a matrix with names or a data frame with three columns
#' @param scale scale each sector to same width; default = FALSE; however, it is set to be TRUE when remove.isolate = TRUE
#' @param link.target.prop If the Chord diagram is directional, for each source sector, whether to draw bars that shows the proportion of target sectors.
#' @param reduce if the ratio of the width of certain grid compared to the whole circle is less than this value, the grid is removed on the plot. Set it to value less than zero if you want to keep all tiny grid.
#' @param directional Whether links have directions. 1 means the direction is from the first column in df to the second column, -1 is the reverse, 0 is no direction, and 2 for two directional.
#' @param transparency Transparency of link colors
#' @param link.border border for links, single scalar or a matrix with names or a data frame with three columns
#' @param title.name title name of the plot
#' @param show.legend whether show the figure legend
#' @param legend.pos.x,legend.pos.y adjust the legend position
#' @param thresh threshold of the p-value for determining significant interaction when visualizing links at the level of ligands/receptors;
#' @param ... other parameters to chordDiagram
#' @importFrom circlize circos.clear chordDiagram circos.track circos.text get.cell.meta.data
#' @importFrom dplyr select %>% group_by summarize
#' @importFrom grDevices recordPlot
#' @importFrom stringr str_split
#' @return an object of class "recordedplot"
#' @export
netVisual_chord_gene <- function(object, slot.name = "net", color.use = NULL,
signaling = NULL, pairLR.use = NULL, net = NULL,
sources.use = NULL, targets.use = NULL,
lab.cex = 0.8,small.gap = 1, big.gap = 10, annotationTrackHeight = c(0.03),
link.visible = TRUE, scale = FALSE, directional = 1, link.target.prop = TRUE, reduce = -1,
transparency = 0.4, link.border = NA,
title.name = NULL, legend.pos.x = 20, legend.pos.y = 20, show.legend = TRUE,
thresh = 0.05,
...){
if (!is.null(pairLR.use)) {
if (!is.data.frame(pairLR.use)) {
stop("pairLR.use should be a data frame with a signle column named either 'interaction_name' or 'pathway_name' ")
} else if ("pathway_name" %in% colnames(pairLR.use)) {
message("slot.name is set to be 'netP' when pairLR.use contains signaling pathways")
slot.name = "netP"
}
}
if (!is.null(pairLR.use) & !is.null(signaling)) {
stop("Please do not assign values to 'signaling' when using 'pairLR.use'")
}
if (is.null(net)) {
prob <- slot(object, "net")$prob
pval <- slot(object, "net")$pval
prob[pval > thresh] <- 0
net <- reshape2::melt(prob, value.name = "prob")
colnames(net)[1:3] <- c("source","target","interaction_name")
pairLR = dplyr::select(object@LR$LRsig, c("interaction_name_2", "pathway_name", "ligand", "receptor" ,"annotation","evidence"))
idx <- match(net$interaction_name, rownames(pairLR))
temp <- pairLR[idx,]
net <- cbind(net, temp)
}
if (!is.null(signaling)) {
pairLR.use <- data.frame()
for (i in 1:length(signaling)) {
pairLR.use.i <- searchPair(signaling = signaling[i], pairLR.use = object@LR$LRsig, key = "pathway_name", matching.exact = T, pair.only = T)
pairLR.use <- rbind(pairLR.use, pairLR.use.i)
}
}
if (!is.null(pairLR.use)){
if ("interaction_name" %in% colnames(pairLR.use)) {
net <- subset(net,interaction_name %in% pairLR.use$interaction_name)
} else if ("pathway_name" %in% colnames(pairLR.use)) {
net <- subset(net, pathway_name %in% as.character(pairLR.use$pathway_name))
}
}
if (slot.name == "netP") {
net <- dplyr::select(net, c("source","target","pathway_name","prob"))
net$source_target <- paste(net$source, net$target, sep = "sourceTotarget")
net <- net %>% dplyr::group_by(source_target, pathway_name) %>% dplyr::summarize(prob = sum(prob))
a <- stringr::str_split(net$source_target, "sourceTotarget", simplify = T)
net$source <- as.character(a[, 1])
net$target <- as.character(a[, 2])
net$ligand <- net$pathway_name
net$receptor <- " "
}
# keep the interactions associated with sources and targets of interest
if (!is.null(sources.use)){
if (is.numeric(sources.use)) {
sources.use <- levels(object@idents)[sources.use]
}
net <- subset(net, source %in% sources.use)
} else {
sources.use <- levels(object@idents)
}
if (!is.null(targets.use)){
if (is.numeric(targets.use)) {
targets.use <- levels(object@idents)[targets.use]
}
net <- subset(net, target %in% targets.use)
} else {
targets.use <- levels(object@idents)
}
# remove the interactions with zero values
df <- subset(net, prob > 0)
if (nrow(df) == 0) {
stop("No signaling links are inferred! ")
}
if (length(unique(net$ligand)) == 1) {
message("You may try the function `netVisual_chord_cell` for visualizing individual signaling pathway")
}
df$id <- 1:nrow(df)
# deal with duplicated sector names
ligand.uni <- unique(df$ligand)
for (i in 1:length(ligand.uni)) {
df.i <- df[df$ligand == ligand.uni[i], ]
source.uni <- unique(df.i$source)
for (j in 1:length(source.uni)) {
df.i.j <- df.i[df.i$source == source.uni[j], ]
df.i.j$ligand <- paste0(df.i.j$ligand, paste(rep(' ',j-1),collapse = ''))
df$ligand[df$id %in% df.i.j$id] <- df.i.j$ligand
}
}
receptor.uni <- unique(df$receptor)
for (i in 1:length(receptor.uni)) {
df.i <- df[df$receptor == receptor.uni[i], ]
target.uni <- unique(df.i$target)
for (j in 1:length(target.uni)) {
df.i.j <- df.i[df.i$target == target.uni[j], ]
df.i.j$receptor <- paste0(df.i.j$receptor, paste(rep(' ',j-1),collapse = ''))
df$receptor[df$id %in% df.i.j$id] <- df.i.j$receptor
}
}
cell.order.sources <- levels(object@idents)[levels(object@idents) %in% sources.use]
cell.order.targets <- levels(object@idents)[levels(object@idents) %in% targets.use]
df$source <- factor(df$source, levels = cell.order.sources)
df$target <- factor(df$target, levels = cell.order.targets)
# df.ordered.source <- df[with(df, order(source, target, -prob)), ]
# df.ordered.target <- df[with(df, order(target, source, -prob)), ]
df.ordered.source <- df[with(df, order(source, -prob)), ]
df.ordered.target <- df[with(df, order(target, -prob)), ]
order.source <- unique(df.ordered.source[ ,c('ligand','source')])
order.target <- unique(df.ordered.target[ ,c('receptor','target')])
# define sector order
order.sector <- c(order.source$ligand, order.target$receptor)
# define cell type color
if (is.null(color.use)){
color.use = scPalette(nlevels(object@idents))
names(color.use) <- levels(object@idents)
color.use <- color.use[levels(object@idents) %in% as.character(union(df$source,df$target))]
} else if (is.null(names(color.use))) {
names(color.use) <- levels(object@idents)
color.use <- color.use[levels(object@idents) %in% as.character(union(df$source,df$target))]
}
# define edge color
edge.color <- color.use[as.character(df.ordered.source$source)]
names(edge.color) <- as.character(df.ordered.source$source)
# define grid colors
grid.col.ligand <- color.use[as.character(order.source$source)]
names(grid.col.ligand) <- as.character(order.source$source)
grid.col.receptor <- color.use[as.character(order.target$target)]
names(grid.col.receptor) <- as.character(order.target$target)
grid.col <- c(as.character(grid.col.ligand), as.character(grid.col.receptor))
names(grid.col) <- order.sector
df.plot <- df.ordered.source[ ,c('ligand','receptor','prob')]
if (directional == 2) {
link.arr.type = "triangle"
} else {
link.arr.type = "big.arrow"
}
circos.clear()
chordDiagram(df.plot,
order = order.sector,
col = edge.color,
grid.col = grid.col,
transparency = transparency,
link.border = link.border,
directional = directional,
direction.type = c("diffHeight","arrows"),
link.arr.type = link.arr.type,
annotationTrack = "grid",
annotationTrackHeight = annotationTrackHeight,
preAllocateTracks = list(track.height = max(strwidth(order.sector))),
small.gap = small.gap,
big.gap = big.gap,
link.visible = link.visible,
scale = scale,
link.target.prop = link.target.prop,
reduce = reduce,
...)
circos.track(track.index = 1, panel.fun = function(x, y) {
xlim = get.cell.meta.data("xlim")
xplot = get.cell.meta.data("xplot")
ylim = get.cell.meta.data("ylim")
sector.name = get.cell.meta.data("sector.index")
circos.text(mean(xlim), ylim[1], sector.name, facing = "clockwise", niceFacing = TRUE, adj = c(0, 0.5),cex = lab.cex)
}, bg.border = NA)
# https://jokergoo.github.io/circlize_book/book/legends.html
if (show.legend) {
lgd <- ComplexHeatmap::Legend(at = names(color.use), type = "grid", legend_gp = grid::gpar(fill = color.use), title = "Cell State")
ComplexHeatmap::draw(lgd, x = unit(1, "npc")-unit(legend.pos.x, "mm"), y = unit(legend.pos.y, "mm"), just = c("right", "bottom"))
}
circos.clear()
if(!is.null(title.name)){
text(-0, 1.02, title.name, cex=1)
}
gg <- recordPlot()
return(gg)
}
#' River plot showing the associations of latent patterns with cell groups and ligand-receptor pairs or signaling pathways
#'
#' River (alluvial) plot shows the correspondence between the inferred latent patterns and cell groups as well as ligand-receptor pairs or signaling pathways.
#'
#' The thickness of the flow indicates the contribution of the cell group or signaling pathway to each latent pattern. The height of each pattern is proportional to the number of its associated cell groups or signaling pathways.
#'
#' Outgoing patterns reveal how the sender cells coordinate with each other as well as how they coordinate with certain signaling pathways to drive communication.
#'
#' Incoming patterns show how the target cells coordinate with each other as well as how they coordinate with certain signaling pathways to respond to incoming signaling.
#'
#' @param object CellChat object
#' @param slot.name the slot name of object that is used to compute centrality measures of signaling networks
#' @param pattern "outgoing" or "incoming"
#' @param cutoff the threshold for filtering out weak links
#' @param sources.use a vector giving the index or the name of source cell groups of interest
#' @param targets.use a vector giving the index or the name of target cell groups of interest
#' @param signaling a character vector giving the name of signaling pathways of interest
#' @param color.use the character vector defining the color of each cell group
#' @param color.use.pattern the character vector defining the color of each pattern
#' @param color.use.signaling the character vector defining the color of each signaling
#' @param do.order whether reorder the cell groups or signaling according to their similarity
#' @param main.title the title of plot
#' @param font.size font size of the text
#' @param font.size.title font size of the title
#' @importFrom methods slot
#' @importFrom stats cutree dist hclust
#' @importFrom grDevices colorRampPalette
#' @importFrom RColorBrewer brewer.pal
#' @import ggalluvial
# #' @importFrom ggalluvial geom_stratum geom_flow to_lodes_form
#' @importFrom ggplot2 geom_text scale_x_discrete scale_fill_manual theme ggtitle
#' @importFrom cowplot plot_grid ggdraw draw_label
#' @return
#' @export
#'
#' @examples
netAnalysis_river <- function(object, slot.name = "netP", pattern = c("outgoing","incoming"), cutoff = 0.5,
sources.use = NULL, targets.use = NULL, signaling = NULL,
color.use = NULL, color.use.pattern = NULL, color.use.signaling = "grey50",
do.order = FALSE, main.title = NULL,
font.size = 2.5, font.size.title = 12){
message("Please make sure you have load `library(ggalluvial)` when running this function")
requireNamespace("ggalluvial")
# suppressMessages(require(ggalluvial))
res.pattern <- methods::slot(object, slot.name)$pattern[[pattern]]
data1 = res.pattern$pattern$cell
data2 = res.pattern$pattern$signaling
if (is.null(color.use.pattern)) {
nPatterns <- length(unique(data1$Pattern))
if (pattern == "outgoing") {
color.use.pattern = ggPalette(nPatterns*2)[seq(1,nPatterns*2, by = 2)]
} else if (pattern == "incoming") {
color.use.pattern = ggPalette(nPatterns*2)[seq(2,nPatterns*2, by = 2)]
}
}
if (is.null(main.title)) {
if (pattern == "outgoing") {
main.title = "Outgoing communication patterns of secreting cells"
} else if (pattern == "incoming") {
main.title = "Incoming communication patterns of target cells"
}
}
if (is.null(data2)) {
data1$Contribution[data1$Contribution < cutoff] <- 0
plot.data <- data1
nPatterns<-length(unique(plot.data$Pattern))
nCellGroup<-length(unique(plot.data$CellGroup))
if (is.null(color.use)) {
color.use <- scPalette(nCellGroup)
}
if (is.null(color.use.pattern)){
color.use.pattern <- ggPalette(nPatterns)
}
plot.data.long <- to_lodes_form(plot.data, axes = 1:2, id = "connection")
if (do.order) {
mat = tapply(plot.data[["Contribution"]], list(plot.data[["CellGroup"]], plot.data[["Pattern"]]), sum)
d <- dist(as.matrix(mat))
hc <- hclust(d, "ave")
k <- length(unique(grep("Pattern", plot.data.long$stratum[plot.data.long$Contribution != 0], value = T)))
cluster <- hc %>% cutree(k)
order.name <- order(cluster)
plot.data.long$stratum <- factor(plot.data.long$stratum, levels = c(names(cluster)[order.name], colnames(mat)))
color.use <- color.use[order.name]
}
color.use.all <- c(color.use, color.use.pattern)
gg <- ggplot(plot.data.long,aes(x = factor(x, levels = c("CellGroup", "Pattern")),y=Contribution,
stratum = stratum, alluvium = connection,
fill = stratum, label = stratum)) +
geom_flow(width = 1/3,aes.flow = "backward") +
geom_stratum(width=1/3,size=0.1,color="black", alpha = 0.8, linetype = 1) +
geom_text(stat = "stratum", size = font.size) +
scale_x_discrete(limits = c(), labels=c("Cell groups", "Patterns")) +
scale_fill_manual(values = alpha(color.use.all, alpha = 0.8), drop = FALSE) +
theme_bw()+
theme(legend.position = "none",
axis.title = element_blank(),
axis.text.y= element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),axis.text=element_text(size=10))+
ggtitle(main.title)
} else {
data1$Contribution[data1$Contribution < cutoff] <- 0
plot.data <- data1
nPatterns<-length(unique(plot.data$Pattern))
nCellGroup<-length(unique(plot.data$CellGroup))
cells.level = levels(object@idents)
if (is.null(color.use)) {
color.use <- scPalette(length(cells.level))[cells.level %in% unique(plot.data$CellGroup)]
}
if (is.null(color.use.pattern)){
color.use.pattern <- ggPalette(nPatterns)
}
if (!is.null(sources.use)) {
if (is.numeric(sources.use)) {
sources.use <- cells.level[sources.use]
}
plot.data <- subset(plot.data, CellGroup %in% sources.use)
}
if (!is.null(targets.use)) {
if (is.numeric(targets.use)) {
targets.use <- cells.level[targets.use]
}
plot.data <- subset(plot.data, CellGroup %in% targets.use)
}
## connect cell groups with patterns
plot.data.long <- to_lodes_form(plot.data, axes = 1:2, id = "connection")
if (do.order) {
mat = tapply(plot.data[["Contribution"]], list(plot.data[["CellGroup"]], plot.data[["Pattern"]]), sum)
d <- dist(as.matrix(mat))
hc <- hclust(d, "ave")
k <- length(unique(grep("Pattern", plot.data.long$stratum[plot.data.long$Contribution != 0], value = T)))
cluster <- hc %>% cutree(k)
order.name <- order(cluster)
plot.data.long$stratum <- factor(plot.data.long$stratum, levels = c(names(cluster)[order.name], colnames(mat)))
color.use <- color.use[order.name]
}
color.use.all <- c(color.use, color.use.pattern)
StatStratum <- ggalluvial::StatStratum
gg1 <- ggplot(plot.data.long,aes(x = factor(x, levels = c("CellGroup", "Pattern")),y=Contribution,
stratum = stratum, alluvium = connection,
fill = stratum, label = stratum)) +
geom_flow(width = 1/3,aes.flow = "backward") +
geom_stratum(width=1/3,size=0.1,color="black", alpha = 0.8, linetype = 1) +
geom_text(stat = "stratum", size = font.size) +
scale_x_discrete(limits = c(), labels=c("Cell groups", "Patterns")) +
scale_fill_manual(values = alpha(color.use.all, alpha = 0.8), drop = FALSE) +
theme_bw()+
theme(legend.position = "none",
axis.title = element_blank(),
axis.text.y= element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),axis.text=element_text(size=10)) +
theme(plot.margin = unit(c(0, 0, 0, 0), "cm"))
## connect patterns with signaling
data2$Contribution[data2$Contribution < cutoff] <- 0
plot.data <- data2
nPatterns<-length(unique(plot.data$Pattern))
nSignaling<-length(unique(plot.data$Signaling))
if (length(color.use.signaling) == 1) {
color.use.all <- c(color.use.pattern, rep(color.use.signaling, nSignaling))
} else {
color.use.all <- c(color.use.pattern, color.use.signaling)
}
if (!is.null(signaling)) {
plot.data <- plot.data[plot.data$Signaling %in% signaling, ]
}
plot.data.long <- ggalluvial::to_lodes_form(plot.data, axes = 1:2, id = "connection")
if (do.order) {
mat = tapply(plot.data[["Contribution"]], list(plot.data[["Signaling"]], plot.data[["Pattern"]]), sum)
d <- dist(as.matrix(mat))
hc <- hclust(d, "ave")
k <- length(unique(grep("Pattern", plot.data.long$stratum[plot.data.long$Contribution != 0], value = T)))
cluster <- hc %>% cutree(k)
order.name <- order(cluster)
plot.data.long$stratum <- factor(plot.data.long$stratum, levels = c(colnames(mat),names(cluster)[order.name]))
}
gg2 <- ggplot(plot.data.long,aes(x = factor(x, levels = c("Pattern", "Signaling")),y= Contribution,
stratum = stratum, alluvium = connection,
fill = stratum, label = stratum)) +
geom_flow(width = 1/3,aes.flow = "forward") +
geom_stratum(width=1/3,size=0.1,color="black", alpha = 0.8, linetype = 1) +
geom_text(stat = "stratum", size = font.size) + # 2.5
scale_x_discrete(limits = c(), labels=c("Patterns", "Signaling")) +
scale_fill_manual(values = alpha(color.use.all, alpha = 0.8), drop = FALSE) +
theme_bw()+
theme(legend.position = "none",
axis.title = element_blank(),
axis.text.y= element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),axis.text=element_text(size= 10))+
theme(plot.margin = unit(c(0, 0, 0, 0), "cm"))
## connect cell groups with signaling
# data1 = data1[data1$Contribution > 0,]
# data2 = data2[data2$Contribution > 0,]
# data3 = merge(data1, data2, by.x="Pattern", by.y="Pattern")
# data3$Contribution <- data3$Contribution.x * data3$Contribution.y
# data3 <- data3[,colnames(data3) %in% c("CellGroup","Signaling","Contribution")]
# plot.data <- data3
# nSignaling<-length(unique(plot.data$Signaling))
# nCellGroup<-length(unique(plot.data$CellGroup))
#
# if (length(color.use.signaling) == 1) {
# color.use.signaling <- rep(color.use.signaling, nSignaling)
# }
#
#
# ## connect cell groups with patterns
# plot.data.long <- to_lodes_form(plot.data, axes = 1:2, id = "connection")
# if (do.order) {
# mat = tapply(plot.data[["Contribution"]], list(plot.data[["CellGroup"]], plot.data[["Signaling"]]), sum)
# d <- dist(as.matrix(mat))
# hc <- hclust(d, "ave")
# k <- length(unique(grep("Signaling", plot.data.long$stratum[plot.data.long$Contribution != 0], value = T)))
# cluster <- hc %>% cutree(k)
# order.name <- order(cluster)
# plot.data.long$stratum <- factor(plot.data.long$stratum, levels = c(names(cluster)[order.name], colnames(mat)))
# color.use <- color.use[order.name]
# }
# color.use.all <- c(color.use, color.use.signaling)
# gg3 <- ggplot(plot.data.long, aes(x = factor(x, levels = c("CellGroup", "Signaling")),y=Contribution,
# stratum = stratum, alluvium = connection,
# fill = stratum, label = stratum)) +
# geom_flow(width = 1/3,aes.flow = "forward") +
# geom_stratum(width=1/3,size=0.1,color="black", alpha = 0.8, linetype = 1) +
# geom_text(stat = "stratum", size = 2.5) +
# scale_x_discrete(limits = c(), labels=c("Cell groups", "Signaling")) +
# scale_fill_manual(values = alpha(color.use.all, alpha = 0.8), drop = FALSE) +
# theme_bw()+
# theme(legend.position = "none",
# axis.title = element_blank(),
# axis.text.y= element_blank(),
# panel.grid.major = element_blank(),
# panel.grid.minor = element_blank(),
# panel.border = element_blank(),
# axis.ticks = element_blank(),axis.text=element_text(size=10)) +
# theme(plot.margin = unit(c(0, 0, 0, 0), "cm"))
gg <- cowplot::plot_grid(gg1, gg2,align = "h", nrow = 1)
title <- cowplot::ggdraw() + cowplot::draw_label(main.title,size = font.size.title)
gg <- cowplot::plot_grid(title, gg, ncol=1, rel_heights=c(0.1, 1))
}
return(gg)
}
#' Dot plots showing the associations of latent patterns with cell groups and ligand-receptor pairs or signaling pathways
#'
#' Using a contribution score of each cell group to each signaling pathway computed by multiplying W by H obtained from `identifyCommunicationPatterns`, we constructed a dot plot in which the dot size is proportion to the contribution score to show association between cell group and their enriched signaling pathways.
#'
#' @param object CellChat object
#' @param slot.name the slot name of object that is used to compute centrality measures of signaling networks
#' @param pattern "outgoing" or "incoming"
#' @param cutoff the threshold for filtering out weak links. Default is 1/R where R is the number of latent patterns. We set the elements in W and H to be zero if they are less than `cutoff`.
#' @param color.use the character vector defining the color of each cell group
#' @param pathway.show the character vector defining the signaling to show
#' @param group.show the character vector defining the cell group to show
#' @param shape the shape of the symbol: 21 for circle and 22 for square
#' @param dot.size a range defining the size of the symbol
#' @param dot.alpha transparency
#' @param main.title the title of plot
#' @param font.size font size of the text
#' @param font.size.title font size of the title
#' @importFrom methods slot
#' @import ggplot2
#' @importFrom dplyr group_by top_n
#' @return
#' @export
#'
#' @examples
netAnalysis_dot <- function(object, slot.name = "netP", pattern = c("outgoing","incoming"), cutoff = NULL, color.use = NULL,
pathway.show = NULL, group.show = NULL,
shape = 21, dot.size = c(1, 3), dot.alpha = 1, main.title = NULL,
font.size = 10, font.size.title = 12){
pattern <- match.arg(pattern)
patternSignaling <- methods::slot(object, slot.name)$pattern[[pattern]]
data1 = patternSignaling$pattern$cell
data2 = patternSignaling$pattern$signaling
data = patternSignaling$data
if (is.null(main.title)) {
if (pattern == "outgoing") {
main.title = "Outgoing communication patterns of secreting cells"
} else if (pattern == "incoming") {
main.title = "Incoming communication patterns of target cells"
}
}
if (is.null(color.use)) {
color.use <- scPalette(nlevels(data1$CellGroup))
}
if (is.null(cutoff)) {
cutoff <- 1/length(unique(data1$Pattern))
}
options(warn = -1)
data1$Contribution[data1$Contribution < cutoff] <- 0
data2$Contribution[data2$Contribution < cutoff] <- 0
data3 = merge(data1, data2, by.x="Pattern", by.y="Pattern")
data3$Contribution <- data3$Contribution.x * data3$Contribution.y
data3 <- data3[,colnames(data3) %in% c("CellGroup","Signaling","Contribution")]
if (!is.null(pathway.show)) {
data3 <- data3[data3$Signaling %in% pathway.show, ]
pathway.add <- pathway.show[which(pathway.show %in% data3$Signaling == 0)]
if (length(pathway.add) > 1) {
data.add <- expand.grid(CellGroup = levels(data1$CellGroup), Signaling = pathway.add)
data.add$Contribution <- 0
data3 <- rbind(data3, data.add)
}
data3$Signaling <- factor(data3$Signaling, levels = pathway.show)
}
if (!is.null(group.show)) {
data3$CellGroup <- as.character(data3$CellGroup)
data3 <- data3[data3$CellGroup %in% group.show, ]
data3$CellGroup <- factor(data3$CellGroup, levels = group.show)
}
data <- as.data.frame(as.table(data));
data <- data[data[,3] != 0, ]
data12 <- paste0(data[,1],data[,2])
data312 <- paste0(data3[,1],data3[,2])
idx1 <- which(match(data312, data12, nomatch = 0) ==0)
data3$Contribution[idx1] <- 0
data3$id <- data312
data3 <- data3 %>% group_by(id) %>% top_n(1, Contribution)
data3$Contribution[which(data3$Contribution == 0)] <- NA
df <- data3
gg <- ggplot(data = df, aes(x = Signaling, y = CellGroup)) +
geom_point(aes(size = Contribution, fill = CellGroup, colour = CellGroup), shape = shape) +
scale_size_continuous(range = dot.size) +
theme_linedraw() +
scale_x_discrete(position = "bottom") +
ggtitle(main.title) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(text = element_text(size = font.size),plot.title = element_text(size=font.size.title, face="plain"),
axis.text.x = element_text(angle = 45, hjust=1),
axis.text.y = element_text(angle = 0, hjust=1),
axis.title.x = element_blank(),
axis.title.y = element_blank()) +
theme(axis.line.x = element_line(size = 0.25), axis.line.y = element_line(size = 0.25)) +
theme(panel.grid.major = element_line(colour="grey90", size = (0.1)))
gg <- gg + scale_y_discrete(limits = rev(levels(data3$CellGroup)))
gg <- gg + scale_fill_manual(values = ggplot2::alpha(color.use, alpha = dot.alpha), drop = FALSE, na.value = "white")
gg <- gg + scale_colour_manual(values = color.use, drop = FALSE, na.value = "white")
gg <- gg + guides(colour=FALSE) + guides(fill=FALSE)
gg <- gg + theme(legend.title = element_text(size = 10), legend.text = element_text(size = 8))
gg
return(gg)
}
#' 2D visualization of the learned manifold of signaling networks
#'
#' @param object CellChat object
#' @param slot.name the slot name of object that is used to compute centrality measures of signaling networks
#' @param type "functional","structural"
#' @param pathway.remove a character vector defining the signaling to remove
#' @param pathway.remove.show whether show the removed signaling names
#' @param color.use defining the color for each cell group
#' @param dot.size a range defining the size of the symbol
#' @param dot.alpha transparency
#' @param xlabel label of x-axis
#' @param ylabel label of y-axis
#' @param title main title of the plot
#' @param font.size font size of the text
#' @param font.size.title font size of the title
#' @param label.size font size of the text
#' @param do.label label the each point
#' @param show.legend whether show the legend
#' @param show.axes whether show the axes
#' @import ggplot2
#' @importFrom ggrepel geom_text_repel
#' @importFrom methods slot
#' @return
#' @export
#'
#' @examples
netVisual_embedding <- function(object, slot.name = "netP", type = c("functional","structural"), color.use = NULL, pathway.remove = NULL, pathway.remove.show = TRUE, dot.size = c(2, 6), label.size = 2, dot.alpha = 0.5,
xlabel = "Dim 1", ylabel = "Dim 2", title = NULL,
font.size = 10, font.size.title = 12, do.label = T, show.legend = T, show.axes = T) {
type <- match.arg(type)
comparison <- "single"
comparison.name <- paste(comparison, collapse = "-")
Y <- methods::slot(object, slot.name)$similarity[[type]]$dr[[comparison.name]]
Groups <- methods::slot(object, slot.name)$similarity[[type]]$group[[comparison.name]]
prob <- methods::slot(object, slot.name)$prob
if (is.null(pathway.remove)) {
similarity <- methods::slot(object, slot.name)$similarity[[type]]$matrix[[comparison.name]]
pathway.remove <- rownames(similarity)[which(colSums(similarity) == 1)]
}
if (length(pathway.remove) > 0) {
pathway.remove.idx <- which(dimnames(prob)[[3]] %in% pathway.remove)
prob <- prob[ , , -pathway.remove.idx]
}
prob_sum <- apply(prob, 3, sum)
df <- data.frame(x = Y[,1], y = Y[, 2], Commun.Prob. = prob_sum/max(prob_sum), labels = as.character(unlist(dimnames(prob)[3])), Groups = as.factor(Groups))
if (is.null(color.use)) {
color.use <- ggPalette(length(unique(Groups)))
}
gg <- ggplot(data = df, aes(x, y)) +
geom_point(aes(size = Commun.Prob.,fill = Groups, colour = Groups), shape = 21) +
CellChat_theme_opts() +
theme(text = element_text(size = font.size), legend.key.height = grid::unit(0.15, "in"))+
guides(colour = guide_legend(override.aes = list(size = 3)))+
labs(title = title, x = xlabel, y = ylabel) + theme(plot.title = element_text(size= font.size.title, face="plain"))+
scale_size_continuous(limits = c(0,1), range = dot.size, breaks = c(0.1,0.5,0.9)) +
theme(axis.text.x = element_blank(),axis.text.y = element_blank(),axis.ticks = element_blank()) +
theme(axis.line.x = element_line(size = 0.25), axis.line.y = element_line(size = 0.25))
gg <- gg + scale_fill_manual(values = ggplot2::alpha(color.use, alpha = dot.alpha), drop = FALSE)
gg <- gg + scale_colour_manual(values = color.use, drop = FALSE)
if (do.label) {
gg <- gg + ggrepel::geom_text_repel(mapping = aes(label = labels, colour = Groups), size = label.size, show.legend = F,segment.size = 0.2, segment.alpha = 0.5)
}
if (length(pathway.remove) > 0 & pathway.remove.show) {
gg <- gg + annotate(geom = 'text', label = paste("Isolate pathways: ", paste(pathway.remove, collapse = ', ')), x = -Inf, y = Inf, hjust = 0, vjust = 1, size = label.size,fontface="italic")
}
if (!show.legend) {
gg <- gg + theme(legend.position = "none")
}
if (!show.axes) {
gg <- gg + theme_void()
}
gg
}
#' Zoom into the 2D visualization of the learned manifold learning of the signaling networks
#'
#' @param object CellChat object
#' @param slot.name the slot name of object that is used to compute centrality measures of signaling networks
#' @param type "functional","structural"
#' @param pathway.remove a character vector defining the signaling to remove
#' @param color.use defining the color for each cell group
#' @param nCol the number of columns of the plot
#' @param dot.size a range defining the size of the symbol
#' @param dot.alpha transparency
#' @param xlabel label of x-axis
#' @param ylabel label of y-axis
#' @param label.size font size of the text
#' @param do.label label the each point
#' @param show.legend whether show the legend
#' @param show.axes whether show the axes
#' @import ggplot2
#' @importFrom ggrepel geom_text_repel
#' @importFrom cowplot plot_grid
#' @importFrom methods slot
#' @return
#' @export
#'
#' @examples
netVisual_embeddingZoomIn <- function(object, slot.name = "netP", type = c("functional","structural"), color.use = NULL, pathway.remove = NULL, nCol = 1, dot.size = c(2, 6), label.size = 2.8, dot.alpha = 0.5,
xlabel = NULL, ylabel = NULL, do.label = T, show.legend = F, show.axes = T) {
comparison <- "single"
comparison.name <- paste(comparison, collapse = "-")
Y <- methods::slot(object, slot.name)$similarity[[type]]$dr[[comparison.name]]
clusters <- methods::slot(object, slot.name)$similarity[[type]]$group[[comparison.name]]
prob <- methods::slot(object, slot.name)$prob
if (is.null(pathway.remove)) {
similarity <- methods::slot(object, slot.name)$similarity[[type]]$matrix[[comparison.name]]
pathway.remove <- rownames(similarity)[which(colSums(similarity) == 1)]
}
if (length(pathway.remove) > 0) {
pathway.remove.idx <- which(dimnames(prob)[[3]] %in% pathway.remove)
prob <- prob[ , , -pathway.remove.idx]
}
prob_sum <- apply(prob, 3, sum)
df <- data.frame(x = Y[,1], y = Y[, 2], Commun.Prob. = prob_sum/max(prob_sum), labels = as.character(unlist(dimnames(prob)[3])), clusters = as.factor(clusters))
if (is.null(color.use)) {
color.use <- ggPalette(length(unique(clusters)))
}
# zoom into each cluster and do labels
ggAll <- vector("list", length(unique(clusters)))
for (i in 1:length(unique(clusters))) {
clusterID = i
title <- paste0("Group ", clusterID)
df2 <- df[df$clusters %in% clusterID,]
gg <- ggplot(data = df2, aes(x, y)) +
geom_point(aes(size = Commun.Prob.), shape = 21, colour = alpha(color.use[clusterID], alpha = 1), fill = alpha(color.use[clusterID], alpha = dot.alpha)) +
CellChat_theme_opts() +
theme(text = element_text(size = 10), legend.key.height = grid::unit(0.15, "in"))+
labs(title = title, x = xlabel, y = ylabel) + theme(plot.title = element_text(size=12))+
scale_size_continuous(limits = c(0,1), range = dot.size, breaks = c(0.1,0.5,0.9)) +
theme(axis.text.x = element_blank(),axis.text.y = element_blank(),axis.ticks = element_blank()) +
theme(axis.line.x = element_line(size = 0.25), axis.line.y = element_line(size = 0.25))
if (do.label) {
gg <- gg + ggrepel::geom_text_repel(mapping = aes(label = labels), colour = color.use[clusterID], size = label.size, segment.size = 0.2, segment.alpha = 0.5)
}
if (!show.legend) {
gg <- gg + theme(legend.position = "none")
}
if (!show.axes) {
gg <- gg + theme_void()
}
ggAll[[i]] <- gg
}
gg.combined <- cowplot::plot_grid(plotlist = ggAll, ncol = nCol)
gg.combined
}
#' 2D visualization of the joint manifold learning of signaling networks from two datasets
#'
#' @param object CellChat object
#' @param slot.name the slot name of object that is used to compute centrality measures of signaling networks
#' @param type "functional","structural"
#' @param comparison a numerical vector giving the datasets for comparison. Default are all datasets when object is a merged object
#' @param pathway.remove a character vector defining the signaling to remove
#' @param pathway.remove.show whether show the removed signaling names
#' @param color.use defining the color for each cell group
#' @param point.shape a numeric vector giving the point shapes. By default point.shape <- c(21, 0, 24, 23, 25, 10, 12), see available shapes at http://www.sthda.com/english/wiki/r-plot-pch-symbols-the-different-point-shapes-available-in-r
#' @param dot.size a range defining the size of the symbol
#' @param dot.alpha transparency
#' @param xlabel label of x-axis
#' @param ylabel label of y-axis
#' @param title main title of the plot
#' @param label.size font size of the text
#' @param do.label label the each point
#' @param show.legend whether show the legend
#' @param show.axes whether show the axes
#' @import ggplot2
#' @importFrom ggrepel geom_text_repel
#' @importFrom methods slot
#' @return
#' @export
#'
#' @examples
netVisual_embeddingPairwise <- function(object, slot.name = "netP", type = c("functional","structural"), comparison = NULL, color.use = NULL, point.shape = NULL, pathway.remove = NULL, pathway.remove.show = TRUE, dot.size = c(2, 6), label.size = 2.5, dot.alpha = 0.5,
xlabel = "Dim 1", ylabel = "Dim 2", title = NULL,do.label = T, show.legend = T, show.axes = T) {
type <- match.arg(type)
if (is.null(comparison)) {
comparison <- 1:length(unique(object@meta$datasets))
}
cat("2D visualization of signaling networks from datasets", as.character(comparison), '\n')
comparison.name <- paste(comparison, collapse = "-")
Y <- methods::slot(object, slot.name)$similarity[[type]]$dr[[comparison.name]]
clusters <- methods::slot(object, slot.name)$similarity[[type]]$group[[comparison.name]]
object.names <- setdiff(names(methods::slot(object, slot.name)), "similarity")[comparison]
prob <- list()
for (i in 1:length(comparison)) {
object.net <- methods::slot(object, slot.name)[[comparison[i]]]
prob[[i]] = object.net$prob
}
if (is.null(point.shape)) {
point.shape <- c(21, 0, 24, 23, 25, 10, 12)
}
if (is.null(pathway.remove)) {
similarity <- methods::slot(object, slot.name)$similarity[[type]]$matrix[[comparison.name]]
pathway.remove <- rownames(similarity)[which(colSums(similarity) == 1)]
pathway.remove <- sub("--.*", "", pathway.remove)
}
if (length(pathway.remove) > 0) {
for (i in 1:length(prob)) {
probi <- prob[[i]]
pathway.remove.idx <- which(dimnames(probi)[[3]] %in% pathway.remove)
if (length(pathway.remove.idx) > 0) {
probi <- probi[ , , -pathway.remove.idx]
}
prob[[i]] <- probi
}
}
prob_sum.each <- list()
signalingAll <- c()
for (i in 1:length(prob)) {
probi <- prob[[i]]
prob_sum.each[[i]] <- apply(probi, 3, sum)
signalingAll <- c(signalingAll, paste0(names(prob_sum.each[[i]]),"--",object.names[i]))
}
prob_sum <- unlist(prob_sum.each)
names(prob_sum) <- signalingAll
group <- sub(".*--", "", names(prob_sum))
labels = sub("--.*", "", names(prob_sum))
df <- data.frame(x = Y[,1], y = Y[, 2], Commun.Prob. = prob_sum/max(prob_sum),
labels = as.character(labels), clusters = as.factor(clusters), group = factor(group, levels = unique(group)))
# color dots (light inside color and dark border) based on clustering and no labels
if (is.null(color.use)) {
color.use <- ggPalette(length(unique(clusters)))
}
gg <- ggplot(data = df, aes(x, y)) +
geom_point(aes(size = Commun.Prob.,fill = clusters, colour = clusters, shape = group)) +
CellChat_theme_opts() +
theme(text = element_text(size = 10), legend.key.height = grid::unit(0.15, "in"))+
guides(colour = guide_legend(override.aes = list(size = 3)))+
labs(title = title, x = xlabel, y = ylabel) +
scale_size_continuous(limits = c(0,1), range = dot.size, breaks = c(0.1,0.5,0.9)) +
theme(axis.text.x = element_blank(),axis.text.y = element_blank(),axis.ticks = element_blank()) +
theme(axis.line.x = element_line(size = 0.25), axis.line.y = element_line(size = 0.25))
gg <- gg + scale_fill_manual(values = ggplot2::alpha(color.use, alpha = dot.alpha), drop = FALSE) #+ scale_alpha(group, range = c(0.1, 1))
gg <- gg + scale_colour_manual(values = color.use, drop = FALSE)
gg <- gg + scale_shape_manual(values = point.shape[1:length(prob)])
if (do.label) {
gg <- gg + ggrepel::geom_text_repel(mapping = aes(label = labels, colour = clusters, alpha=group), size = label.size, show.legend = F,segment.size = 0.2, segment.alpha = 0.5) + scale_alpha_discrete(range = c(1, 0.6))
}
if (length(pathway.remove) > 0 & pathway.remove.show) {
gg <- gg + annotate(geom = 'text', label = paste("Isolate pathways: ", paste(pathway.remove, collapse = ', ')), x = -Inf, y = Inf, hjust = 0, vjust = 1, size = label.size,fontface="italic")
}
if (!show.legend) {
gg <- gg + theme(legend.position = "none")
}
if (!show.axes) {
gg <- gg + theme_void()
}
gg
}
#' Zoom into the 2D visualization of the joint manifold learning of signaling networks from two datasets
#'
#' @param object CellChat object
#' @param slot.name the slot name of object that is used to compute centrality measures of signaling networks
#' @param type "functional","structural"
#' @param comparison a numerical vector giving the datasets for comparison. Default are all datasets when object is a merged object
#' @param pathway.remove a character vector defining the signaling to remove
#' @param color.use defining the color for each cell group
#' @param nCol number of columns in the plot
#' @param point.shape a numeric vector giving the point shapes. By default point.shape <- c(21, 0, 24, 23, 25, 10, 12), see available shapes at http://www.sthda.com/english/wiki/r-plot-pch-symbols-the-different-point-shapes-available-in-r
#' @param dot.size a range defining the size of the symbol
#' @param dot.alpha transparency
#' @param xlabel label of x-axis
#' @param ylabel label of y-axis
#' @param label.size font size of the text
#' @param do.label label the each point
#' @param show.legend whether show the legend
#' @param show.axes whether show the axes
#' @import ggplot2
#' @importFrom ggrepel geom_text_repel
#' @importFrom methods slot
#' @return
#' @export
#'
#' @examples
netVisual_embeddingPairwiseZoomIn <- function(object, slot.name = "netP", type = c("functional","structural"), comparison = NULL, color.use = NULL, nCol = 1, point.shape = NULL, pathway.remove = NULL, dot.size = c(2, 6), label.size = 2.8, dot.alpha = 0.5,
xlabel = NULL, ylabel = NULL, do.label = T, show.legend = F, show.axes = T) {
type <- match.arg(type)
if (is.null(comparison)) {
comparison <- 1:length(unique(object@meta$datasets))
}
cat("2D visualization of signaling networks from datasets", as.character(comparison), '\n')
comparison.name <- paste(comparison, collapse = "-")
Y <- methods::slot(object, slot.name)$similarity[[type]]$dr[[comparison.name]]
clusters <- methods::slot(object, slot.name)$similarity[[type]]$group[[comparison.name]]
object.names <- setdiff(names(methods::slot(object, slot.name)), "similarity")[comparison]
prob <- list()
for (i in 1:length(comparison)) {
object.net <- methods::slot(object, slot.name)[[comparison[i]]]
prob[[i]] = object.net$prob
}
if (is.null(point.shape)) {
point.shape <- c(21, 0, 24, 23, 25, 10, 12)
}
if (is.null(pathway.remove)) {
similarity <- methods::slot(object, slot.name)$similarity[[type]]$matrix[[comparison.name]]
pathway.remove <- rownames(similarity)[which(colSums(similarity) == 1)]
}
if (length(pathway.remove) > 0) {
for (i in 1:length(prob)) {
probi <- prob[[i]]
pathway.remove.idx <- which(dimnames(probi)[[3]] %in% pathway.remove)
if (length(pathway.remove.idx) > 0) {
probi <- probi[ , , -pathway.remove.idx]
}
prob[[i]] <- probi
}
}
prob_sum.each <- list()
signalingAll <- c()
for (i in 1:length(prob)) {
probi <- prob[[i]]
prob_sum.each[[i]] <- apply(probi, 3, sum)
signalingAll <- c(signalingAll, paste0(names(prob_sum.each[[i]]),"-",object.names[i]))
}
prob_sum <- unlist(prob_sum.each)
names(prob_sum) <- signalingAll
group <- sub(".*-", "", names(prob_sum))
labels = sub("-.*", "", names(prob_sum))
df <- data.frame(x = Y[,1], y = Y[, 2], Commun.Prob. = prob_sum/max(prob_sum),
labels = as.character(labels), clusters = as.factor(clusters), group = factor(group, levels = unique(group)))
if (is.null(color.use)) {
color.use <- ggPalette(length(unique(clusters)))
}
# zoom into each cluster and do labels
ggAll <- vector("list", length(unique(clusters)))
for (i in 1:length(unique(clusters))) {
clusterID = i
title <- paste0("Cluster ", clusterID)
df2 <- df[df$clusters %in% clusterID,]
gg <- ggplot(data = df2, aes(x, y)) +
geom_point(aes(size = Commun.Prob., shape = group),fill = alpha(color.use[clusterID], alpha = dot.alpha), colour = alpha(color.use[clusterID], alpha = 1)) +
CellChat_theme_opts() +
theme(text = element_text(size = 10), legend.key.height = grid::unit(0.15, "in"))+
guides(colour = guide_legend(override.aes = list(size = 3)))+
labs(title = title, x = xlabel, y = ylabel) +
scale_size_continuous(limits = c(0,1), range = dot.size, breaks = c(0.1,0.5,0.9)) +
theme(axis.text.x = element_blank(),axis.text.y = element_blank(),axis.ticks = element_blank()) +
theme(axis.line.x = element_line(size = 0.25), axis.line.y = element_line(size = 0.25))
idx <- match(unique(df2$group), levels(df$group), nomatch = 0)
gg <- gg + scale_shape_manual(values= point.shape[idx])
if (do.label) {
gg <- gg + ggrepel::geom_text_repel(mapping = aes(label = labels), colour = color.use[clusterID], size = label.size, show.legend = F,segment.size = 0.2, segment.alpha = 0.5) + scale_alpha_discrete(range = c(1, 0.6))
}
if (!show.legend) {
gg <- gg + theme(legend.position = "none")
}
if (!show.axes) {
gg <- gg + theme_void()
}
ggAll[[i]] <- gg
}
gg.combined <- cowplot::plot_grid(plotlist = ggAll, ncol = nCol)
gg.combined
}
#' Show the description of CellChatDB databse
#'
#' @param CellChatDB CellChatDB databse
#' @param nrow the number of rows in the plot
#' @importFrom dplyr group_by summarise n %>%
#'
#' @return
#' @export
#'
showDatabaseCategory <- function(CellChatDB, nrow = 1) {
interaction_input <- CellChatDB$interaction
geneIfo <- CellChatDB$geneInfo
df <- interaction_input %>% group_by(annotation) %>% summarise(value=n())
df$group <- factor(df$annotation, levels = c("Secreted Signaling","ECM-Receptor","Cell-Cell Contact"))
gg1 <- pieChart(df)
binary <- (interaction_input$ligand %in% geneIfo$Symbol) & (interaction_input$receptor %in% geneIfo$Symbol)
df <- data.frame(group = rep("Heterodimers", dim(interaction_input)[1]),stringsAsFactors = FALSE)
df$group[binary] <- rep("Others",sum(binary),1)
df <- df %>% group_by(group) %>% summarise(value=n())
df$group <- factor(df$group, levels = c("Heterodimers","Others"))
gg2 <- pieChart(df)
kegg <- grepl("KEGG", interaction_input$evidence)
df <- data.frame(group = rep("Literature", dim(interaction_input)[1]),stringsAsFactors = FALSE)
df$group[kegg] <- rep("KEGG",sum(kegg),1)
df <- df %>% group_by(group) %>% summarise(value=n())
df$group <- factor(df$group, levels = c("KEGG","Literature"))
gg3 <- pieChart(df)
gg <- cowplot::plot_grid(gg1, gg2, gg3, nrow = nrow, align = "h", rel_widths = c(1, 1,1))
return(gg)
}
#' Plot pie chart
#'
#' @param df a dataframe
#' @param label.size a character
#' @param color.use the name of the variable in CellChatDB interaction_input
#' @param title the title of plot
#' @import ggplot2
#' @importFrom scales percent
#' @importFrom dplyr arrange desc mutate
#' @importFrom ggrepel geom_text_repel
#' @return
#' @export
#'
pieChart <- function(df, label.size = 2.5, color.use = NULL, title = "") {
df %>% arrange(dplyr::desc(value)) %>%
mutate(prop = scales::percent(value/sum(value))) -> df
gg <- ggplot(df, aes(x="", y=value, fill=forcats::fct_inorder(group))) +
geom_bar(stat="identity", width=1) +
coord_polar("y", start=0)+theme_void() +
ggrepel::geom_text_repel(aes(label = prop), size= label.size, show.legend = F, nudge_x = 0)
gg <- gg + theme(legend.position="bottom", legend.direction = "vertical")
if(!is.null(color.use)) {
gg <- gg + scale_color_manual(color.use)
}
if (!is.null(title)) {
gg <- gg + guides(fill = guide_legend(title = title))
}
gg
}
#' A Seurat wrapper function for plotting gene expression using violin plot or dot plot
#'
#' This function create a Seurat object from an input CellChat object, and then plot gene expression distribution using a modified violin plot or dot plot based on Seurat's function.
#' Please check \code{\link{StackedVlnPlot}} and \code{\link{dotPlot}} for detailed description of the arguments.
#'
#' USER can extract the signaling genes related to the inferred L-R pairs or signaling pathway using \code{\link{extractEnrichedLR}}, and then plot gene expression using Seurat package.
#'
#' @param object seurat object
#' @param features Features to plot gene expression
#' @param signaling a char vector containing signaling pathway names for searching
#' @param enriched.only whether only return the identified enriched signaling genes in the database. Default = TRUE, returning the significantly enriched signaling interactions
#' @param type violin plot or dot plot
#' @param color.use defining the color for each cell group
#' @param group.by Name of one metadata columns to group (color) cells. Default is the defined cell groups in CellChat object
#' @param ... other arguments passing to either VlnPlot or DotPlot from Seurat package
#' @return
#' @export
#'
#' @examples
plotGeneExpression <- function(object, features = NULL, signaling = NULL, enriched.only = TRUE, type = c("violin", "dot"), color.use = NULL, group.by = NULL, ...) {
type <- match.arg(type)
meta <- object@meta
if (is.list(object@idents)) {
meta$group.cellchat <- object@idents$joint
} else {
meta$group.cellchat <- object@idents
}
w10x <- Seurat::CreateSeuratObject(counts = object@data.signaling, meta.data = meta)
if (is.null(group.by)) {
group.by <- "group.cellchat"
}
Seurat::Idents(w10x) <- group.by
if (!is.null(features) & !is.null(signaling)) {
warning("`features` will be used when inputing both `features` and `signaling`!")
}
if (!is.null(features)) {
feature.use <- features
} else if (!is.null(signaling)) {
res <- extractEnrichedLR(object, signaling = signaling, geneLR.return = TRUE, enriched.only = enriched.only)
feature.use <- res$geneLR
}
if (type == "violin") {
gg <- StackedVlnPlot(w10x, features = feature.use, color.use = color.use, ...)
} else if (type == "dot") {
gg <- dotPlot(w10x, features = feature.use, ...)
}
return(gg)
}
#' Dot plot
#'
#'The size of the dot encodes the percentage of cells within a class, while the color encodes the AverageExpression level across all cells within a class
#'
#' @param object seurat object
#' @param features Features to plot (gene expression, metrics)
#' @param rotation whether rotate the plot
#' @param colormap RColorbrewer palette to use (check available palette using RColorBrewer::display.brewer.all()). default will use customed color palette
#' @param color.direction Sets the order of colours in the scale. If 1, the default, colours are as output by RColorBrewer::brewer.pal(). If -1, the order of colours is reversed.
#' @param idents Which classes to include in the plot (default is all)
#' @param group.by Name of one or more metadata columns to group (color) cells by
#' (for example, orig.ident); pass 'ident' to group by identity class
#' @param split.by Name of a metadata column to split plot by;
#' @param legend.width legend width
#' @param scale whther show x-axis text
#' @param col.min Minimum scaled average expression threshold (everything smaller will be set to this)
#' @param col.max Maximum scaled average expression threshold (everything larger will be set to this)
#' @param dot.scale Scale the size of the points, similar to cex
#' @param assay Name of assay to use, defaults to the active assay
#' @param angle.x angle for x-axis text rotation
#' @param hjust.x adjust x axis text
#' @param angle.y angle for y-axis text rotation
#' @param hjust.y adjust y axis text
#' @param show.legend whether show the legend
#' @param ... Extra parameters passed to DotPlot from Seurat package
#' @return ggplot2 object
#' @export
#'
#' @examples
#' @import ggplot2
dotPlot <- function(object, features, rotation = TRUE, colormap = "OrRd", color.direction = 1, scale = TRUE, col.min = -2.5, col.max = 2.5, dot.scale = 6, assay = "RNA",
idents = NULL, group.by = NULL, split.by = NULL, legend.width = 0.5,
angle.x = 45, hjust.x = 1, angle.y = 0, hjust.y = 0.5, show.legend = TRUE, ...) {
gg <- Seurat::DotPlot(object, features = features, assay = assay, cols = c("blue", "red"),
scale = scale, col.min = col.min, col.max = col.max, dot.scale = dot.scale,
idents = idents, group.by = group.by, split.by = split.by,...)
gg <- gg + theme(axis.title.x=element_blank(), axis.title.y=element_blank()) +
theme(axis.text.x = element_text(size = 10), axis.text.y = element_text(size = 10), axis.line = element_line(colour = 'black')) +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5))+
theme(axis.text.x = element_text(angle = angle.x, hjust = hjust.x), axis.text.y = element_text(angle = angle.y, hjust = hjust.y))
gg <- gg + theme(legend.title = element_text(size = 10), legend.text = element_text(size = 8))
if (is.null(split.by)) {
gg <- gg + guides(color = guide_colorbar(barwidth = legend.width, title = "Scaled expression"),size = guide_legend(title = 'Percent expressed'))
}
if (rotation) {
gg <- gg + coord_flip()
}
if (!is.null(colormap)) {
if (is.null(split.by)) {
gg <- gg + scale_color_distiller(palette = colormap, direction = color.direction, guide = guide_colorbar(title = "Scaled Expression", ticks = T, label = T, barwidth = legend.width), na.value = "lightgrey")
}
}
if (!show.legend) {
gg <- gg + theme(legend.position = "none")
}
return(gg)
}
#' Stacked Violin plot
#'
#' @param object seurat object
#' @param features Features to plot (gene expression, metrics)
#' @param color.use defining the color for each cell group
#' @param colors.ggplot whether use ggplot color scheme; default: colors.ggplot = FALSE
#' @param split.by Name of a metadata column to split plot by;
#' @param idents Which classes to include in the plot (default is all)
#' @param show.text.y whther show y-axis text
#' @param line.size line width in the violin plot
#' @param pt.size size of the dots
#' @param plot.margin adjust the white space between each plot
#' @param angle.x angle for x-axis text rotation
#' @param vjust.x adjust x axis text
#' @param hjust.x adjust x axis text
#' @param ... Extra parameters passed to VlnPlot from Seurat package
#' @return ggplot2 object
#' @export
#'
#' @examples
#' @import ggplot2
#' @importFrom patchwork wrap_plots
StackedVlnPlot<- function(object, features, idents = NULL, split.by = NULL,
color.use = NULL, colors.ggplot = FALSE,
angle.x = 90, vjust.x = NULL, hjust.x = NULL, show.text.y = TRUE, line.size = NULL,
pt.size = 0,
plot.margin = margin(0, 0, 0, 0, "cm"),
...) {
options(warn=-1)
if (is.null(color.use)) {
numCluster <- length(levels(Seurat::Idents(object)))
if (colors.ggplot) {
color.use <- NULL
} else {
color.use <- scPalette(numCluster)
}
}
if (is.null(vjust.x) | is.null(hjust.x)) {
angle=c(0, 45, 90)
hjust=c(0, 1, 1)
vjust=c(0, 1, 0.5)
vjust.x = vjust[angle == angle.x]
hjust.x = hjust[angle == angle.x]
}
plot_list<- purrr::map(features, function(x) modify_vlnplot(object = object, features = x, idents = idents, split.by = split.by, cols = color.use, pt.size = pt.size,
show.text.y = show.text.y, line.size = line.size, ...))
# Add back x-axis title to bottom plot. patchwork is going to support this?
plot_list[[length(plot_list)]]<- plot_list[[length(plot_list)]] +
theme(axis.text.x=element_text(), axis.ticks.x = element_line()) +
theme(axis.text.x = element_text(angle = angle.x, hjust = hjust.x, vjust = vjust.x)) +
theme(axis.text.x = element_text(size = 10))
# change the y-axis tick to only max value
ymaxs<- purrr::map_dbl(plot_list, extract_max)
plot_list<- purrr::map2(plot_list, ymaxs, function(x,y) x +
scale_y_continuous(breaks = c(y)) +
expand_limits(y = y))
p<- patchwork::wrap_plots(plotlist = plot_list, ncol = 1)
return(p)
}
#' modified vlnplot
#' @param object Seurat object
#' @param features Features to plot (gene expression, metrics)
#' @param split.by Name of a metadata column to split plot by;
#' @param idents Which classes to include in the plot (default is all)
#' @param cols defining the color for each cell group
#' @param show.text.y whther show y-axis text
#' @param line.size line width in the violin plot
#' @param pt.size size of the dots
#' @param plot.margin adjust the white space between each plot
#' @param ... pass any arguments to VlnPlot in Seurat
#' @import ggplot2
#'
modify_vlnplot<- function(object,
features,
idents = NULL,
split.by = NULL,
cols = NULL,
show.text.y = TRUE,
line.size = NULL,
pt.size = 0,
plot.margin = margin(0, 0, 0, 0, "cm"),
...) {
options(warn=-1)
p<- Seurat::VlnPlot(object, features = features, cols = cols, pt.size = pt.size, idents = idents, split.by = split.by, ... ) +
xlab("") + ylab(features) + ggtitle("")
p <- p + theme(text = element_text(size = 10)) + theme(axis.line = element_line(size=line.size)) +
theme(axis.text.x = element_text(size = 10), axis.text.y = element_text(size = 8), axis.line.x = element_line(colour = 'black', size=line.size),axis.line.y = element_line(colour = 'black', size= line.size))
# theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5))
p <- p + theme(legend.position = "none",
plot.title= element_blank(),
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.y = element_text(size = rel(1), angle = 0),
axis.text.y = element_text(size = rel(1)),
plot.margin = plot.margin ) +
theme(axis.text.y = element_text(size = 8))
p <- p + theme(element_line(size=line.size))
if (!show.text.y) {
p <- p + theme(axis.ticks.y=element_blank(), axis.text.y=element_blank())
}
return(p)
}
#' extract the max value of the y axis
#' @param p ggplot object
#' @importFrom ggplot2 ggplot_build
extract_max<- function(p){
ymax<- max(ggplot_build(p)$layout$panel_scales_y[[1]]$range$range)
return(ceiling(ymax))
}
|
709f13158a162877fa520c93b34bc7d2cc33eed0 | f7d9d31d70f17ed4e8eb0d17c1394667e8e52d11 | /turmas/2020_turma1/R/graficos.R | a8a67d2bb08b9a72e3586f34e5f923e6e36733c5 | [] | no_license | seade-R/programacao-r | ea57032585127a5199f53bfb5c637c5d371562a7 | 3b2599bebb0c975613d4b070875766e9dabd40ec | refs/heads/master | 2023-03-10T10:07:22.794751 | 2023-03-07T00:55:25 | 2023-03-07T00:55:25 | 246,390,979 | 15 | 5 | null | null | null | null | UTF-8 | R | false | false | 4,725 | r | graficos.R | library(tidyverse)
library(readxl)
library(janitor)
obitos_2018_url <- 'http://www.seade.gov.br/produtos/midia/2020/02/DO2018.xlsx'
download.file(obitos_2018_url, 'obitos_2018.xlsx')
obitos_2018 <- read_excel('obitos_2018.xlsx')
obitos_2018 %>%
glimpse()
obitos_2018 %>%
ggplot() +
geom_bar(aes(x = idadeanos))
obitos_2018 %>%
ggplot() +
geom_bar(aes(racacor_f))
obitos_2018 %>%
ggplot() +
geom_histogram(aes(x = idadeanos))
obitos_2018 %>%
ggplot() +
geom_histogram(aes(x = idadeanos), binwidth = 5)
obitos_2018 %>%
ggplot() +
geom_histogram(aes(x = idadeanos),
binwidth = 5,
color = 'orange',
fill = 'green')
obitos_2018 %>%
ggplot() +
geom_density(aes(x = idadeanos))
obitos_2018 %>%
ggplot() +
geom_density(aes(x = idadeanos),
color = 'darkblue')
obitos_2018 %>%
ggplot() +
geom_density(aes(x = idadeanos),
color = 'darkblue',
fill = 'darkblue')
obitos_2018 %>%
ggplot() +
geom_density(aes(x = idadeanos),
color = 'blue',
fill = 'blue',
alpha = 0.2)
geom_vline(aes(xintercept = 75))
obitos_2018 %>%
ggplot() +
geom_density(aes(x = idadeanos),
color = 'blue',
fill = 'blue',
alpha = 0.2) +
geom_vline(aes(xintercept = 82))
obitos_2018 %>%
ggplot() +
geom_density(aes(x = idadeanos),
color = 'blue',
fill = 'blue',
alpha = 0.2) +
geom_vline(aes(xintercept = 82),
linetype="dashed",
color="red")
obitos_2018 %>%
ggplot() +
geom_histogram(aes(x = idadeanos,
fill = sexo),
binwidth = 5)
obitos_2018 %>%
ggplot() +
geom_histogram(aes(x = idadeanos,
fill = sexo),
binwidth = 5,
position = "dodge")
obitos_2018 %>%
ggplot() +
geom_density(aes(x = idadeanos,
color = sexo))
obitos_2018 %>%
filter(sexo_f != 'Ignorado') %>%
ggplot() +
geom_density(aes(x = idadeanos,
color = sexo))
obitos_2018 %>%
filter(sexo_f != 'Ignorado') %>%
ggplot() +
geom_density(aes(x = idadeanos,
color = sexo,
fill = sexo))
obitos_2018 %>%
filter(sexo_f != 'Ignorado') %>%
ggplot() +
geom_density(aes(x = idadeanos,
color = sexo,
fill = sexo),
alpha = 0.5)
obitos_2018 %>%
ggplot() +
geom_density(aes(x = idadeanos,
fill = racacor_f,
color = racacor_f),
alpha = 0.5)
obitos_2018 %>%
ggplot() +
geom_boxplot(aes(x = racacor_f,
y = idadeanos,
fill = racacor_f))
obitos_2018 %>%
ggplot() +
geom_violin(aes(x = racacor_f,
y = idadeanos,
fill = racacor_f))
obitos_2018 %>%
mutate(racacor_f = fct_reorder(racacor_f, idadeanos, median)) %>%
ggplot() +
geom_boxplot(aes(x = racacor_f,
y = idadeanos,
fill = racacor_f))
obitos_2018 %>%
filter(sexo_f != 'Ignorado') %>%
filter(racacor_f != 'Ignorada') %>%
mutate(racacor_f = fct_reorder(racacor_f, idadeanos, median)) %>%
ggplot() +
geom_boxplot(aes(x = racacor_f,
y = idadeanos,
fill = racacor_f)) +
facet_wrap(.~sexo_f)
obitos_2018 %>%
filter(sexo_f != 'Ignorado') %>%
filter(racacor_f != 'Ignorada') %>%
mutate(racacor_f = fct_reorder(racacor_f, idadeanos, median)) %>%
ggplot() +
geom_boxplot(aes(x = racacor_f,
y = idadeanos,
fill = racacor_f)) +
facet_wrap(.~sexo_f) +
labs(
title = 'Distribuição de óbitos por idade, sexo e raça/cor',
subtitle = 'Registro Civil 2018',
caption = 'Fonte: SEADE',
y = 'Idade (anos)',
x = '') +
theme(legend.position = 'none')
obitos_2018 %>%
filter(sexo_f != 'Ignorado') %>%
filter(racacor_f != 'Ignorada') %>%
mutate(racacor_f = fct_reorder(racacor_f, idadeanos, median)) %>%
ggplot() +
geom_boxplot(aes(x = sexo_f,
y = idadeanos,
fill = sexo_f)) +
facet_wrap(.~racacor_f) +
labs(
title = 'Distribuição de óbitos por idade, sexo e raça/cor',
subtitle = 'Registro Civil 2018',
caption = 'Fonte: SEADE',
y = 'Idade (anos)',
x = '') +
theme(legend.position = 'none')
nv_2017_url <- 'http://www.seade.gov.br/produtos/midia/2020/02/DN2017.xlsx'
download.file(nv_2017_url, 'nv_2017.xlsx')
nv_2017 <- read_excel('nv_2017.xlsx')
|
3042856021c6e935e5189c1dd9ff07439dd8939d | 2099a2b0f63f250e09f7cd7350ca45d212e2d364 | /DUC-Dataset/Summary_p100_R/D073.SJMN91-06164210.html.R | abaf6a47ce0c1e1e08f09c22b85697c3f95ed77f | [] | no_license | Angela7126/SLNSumEval | 3548301645264f9656b67dc807aec93b636778ef | b9e7157a735555861d2baf6c182e807e732a9dd6 | refs/heads/master | 2023-04-20T06:41:01.728968 | 2021-05-12T03:40:11 | 2021-05-12T03:40:11 | 366,429,744 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 784 | r | D073.SJMN91-06164210.html.R | <html>
<head>
<meta name="TextLength" content="SENT_NUM:4, WORD_NUM:99">
</head>
<body bgcolor="white">
<a href="#0" id="0">It was followed by a second explosion a few minutes later and a third, smaller blast at 11:49 a.m.; The huge plume could be seen in Manila, 60 miles to the south, and reporters at the scene said it blocked out the sun.</a>
<a href="#1" id="1">They told rescuers that they would not leave their livestock and if the animals die, "we will die with them.</a>
<a href="#2" id="2">He said strong tremors preceded the nighttime eruptions and continued afterward.</a>
<a href="#3" id="3">"; Scientists warned of more, possibly larger eruptions from the 4,795-foot volcano, which was dormant for six centuries until it began spewing steam in April.</a>
</body>
</html> |
a9b9687a92fa4cafda129f5ee9556e045da4345e | 3647a0d6e8869fbc1595d90527aafd34231304f1 | /Quant-Gen/additive_alleles_app.R | 2049288470a938552bf46d988a4a23540d3781fe | [
"MIT"
] | permissive | mweissman97/shiny_popgen | dce6d1a28a1b7574a5638a7265fa15d0002ea75b | dcb492f66501d22f4c4eb81d6c22c8064cb3b8c1 | refs/heads/master | 2023-07-12T02:51:13.543470 | 2021-07-14T02:25:28 | 2021-07-14T02:25:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,884 | r | additive_alleles_app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
pheno.dist <- function(n.loci, allele.types) {
n.alleles <- 2 * n.loci #2 alleles for each locus
class.counts <- choose(n.alleles,
0:n.alleles)
# turn counts into proportions
class.freqs <- class.counts/sum(class.counts)
#class.freqs <- class.counts
class.types <- NULL #storage
for (i in 0:n.alleles) {
type <- i * allele.types[1] +
(n.alleles - i) * allele.types[2]
class.types <- c(class.types,
type) #add to storage
}
return(list(class.types = class.types,
class.freqs = class.freqs))
}
#for_sim(n = 10, gen = 20, show_coal = F)
ui <- fluidPage(pageWithSidebar(
headerPanel = headerPanel("Additive alleles"),
sidebarPanel(
sliderInput(inputId = "n", label = "Number of independent loci", value = 1,
min = 1, max = 100, step = 1),
sliderInput(inputId = "A", label = "Phenotypic contribution of A alleles", value = 10,
min = 0, max = 100, step = 1),
sliderInput(inputId = "a", label = "Phenotypic contribution of a alleles", value = 0,
min = 0, max = 100, step = 1)
),
mainPanel = mainPanel(
plotOutput(outputId = 'viz')
)
))
#back end code and response to user input
server <- function(input, output){
output$viz <- renderPlot({
out <- pheno.dist(input$n, c(input$A, input$a))
plot(out$class.types, out$class.freqs,
type = "h", lwd = 3, ylab = "Frequency of phenotypic class",
xlab = "Phenotypic classes", ylim = c(0, max(out$class.freqs, na.rm = F)*1.05))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
4122bbc782b3162a2a5978486b6615cd213b60e8 | 92d0ec74ce59f8d98e858d02598cced7b0d30c54 | /data analysis/HPC_analysis.R | 5863a591cf73ad4c4abe64d4623a6e1522f21b38 | [] | no_license | Miaoyanwang/ordinal_tensor | 1a8bd71f227294c16e12c164af12edaa134f3067 | ba90e6df7ac5865c755373c84370042d4f1d528d | refs/heads/master | 2023-04-22T07:24:17.635342 | 2021-05-10T02:35:09 | 2021-05-10T02:35:09 | 209,680,209 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,675 | r | HPC_analysis.R | ### Cross validation ######
source("functions.R")
load("../data/dti_brain.RData")
### 5 fold ################ To Chanwoo: Please changes the sampling per instruction ####
# `index' is for 1:5
# I used following lines when I put codes in the server in sh file
# args index ==$SLURM_ARRAY_TASK_ID" filename.R
# i: repetition j: j-th testset
indset =matrix(nrow =50,ncol=2)
s = 0
for(i in 1:10){
for(j in 1:5){
# for(k in 1:2){
s = s+1
indset[s,] = c(i,j)
# }
}
}
i = indset[index,][1]
j = indset[index,][2]
# k = indset[index,][3]
set.seed(i)
l1 = split(sample(which(tensor==1),length(which(tensor==1))),as.factor(1:5))
l2 = split(sample(which(tensor==2),length(which(tensor==2))),as.factor(1:5))
l3 = split(sample(which(tensor==3),length(which(tensor==3))),as.factor(1:5))
cindex = list()
for (k in 1:5) {
cindex[[k]] = c(l1[[k]],l2[[k]],l3[[k]])
}
test_index = cindex[[j]]
train_index = setdiff(1:length(tensor),test_index)
train_tensor = tensor
train_tensor[test_index] = NA
#####################################################################################
################# Continuous Tucker decomposition CV ################################
d = dim(tensor)
r = c(23,23,8)
A_1 = randortho(d[1])[,1:r[1]]
A_2 = A_1
A_3 = randortho(d[3])[,1:r[3]]
C = rand_tensor(modes = r)
result = fit_continuous(train_tensor,C,A_1,A_2,A_3)
save(result,file = paste("CV_conti_",i,"_",j,".RData",sep = ""))
########## Analysis after getting the above output files ################
CV = as.data.frame(matrix(nrow = 50, ncol = 2))
names(CV) = c("MAE","MCR")
s = 0
for(i in 1:10){
for (j in 1:5) {
s = s+1
set.seed(i)
l1 = split(sample(which(tensor==1),length(which(tensor==1))),as.factor(1:5))
l2 = split(sample(which(tensor==2),length(which(tensor==2))),as.factor(1:5))
l3 = split(sample(which(tensor==3),length(which(tensor==3))),as.factor(1:5))
cindex = list()
for (k in 1:5) {
cindex[[k]] = c(l1[[k]],l2[[k]],l3[[k]])
}
test_index = cindex[[j]]
test_index = cindex[[j]]
load(paste("CV_conti_",i,"_",j,".RData",sep = ""))
theta = result$theta
CV[s,1] = mean(abs(theta[test_index]-tensor[test_index]))
CV[s,2] = error_rate = mean(round(theta)[test_index]!=tensor[test_index])
}
}
##################### ordinal glm tucker decomposition CV#################
d = dim(tensor)
r = c(23,23,8)
A_1 = randortho(d[1])[,1:r[1]]
A_2 = A_1
A_3 = randortho(d[3])[,1:r[3]]
C = rand_tensor(modes = r)
result = fit_ordinal(train_tensor,C,A_1,A_2,A_3)
save(result,file = paste("CV_ordinal_",i,"_",j,".RData",sep = ""))
########## Analysis after getting the above output files ################
### with median estimation
OCVmedian = as.data.frame(matrix(nrow = 50, ncol = 2))
names(OCVmedian) = c("MAE","MCR")
s = 0
for(i in 1:10){
for (j in 1:5) {
s = s+1
set.seed(i)
l1 = split(sample(which(tensor==1),length(which(tensor==1))),as.factor(1:5))
l2 = split(sample(which(tensor==2),length(which(tensor==2))),as.factor(1:5))
l3 = split(sample(which(tensor==3),length(which(tensor==3))),as.factor(1:5))
cindex = list()
for (k in 1:5) {
cindex[[k]] = c(l1[[k]],l2[[k]],l3[[k]])
}
test_index = cindex[[j]]
test_index = cindex[[j]]
load(paste("CV_ordinal_",i,"_",j,".RData",sep = ""))
theta = result$theta
out = estimation(theta,result$omega,type="median")@data # we can also use other estimator via type="mode","mean", or "median"
OCVmedian[s,1] = mean(abs(out[test_index]-tensor[test_index]))
OCVmedian[s,2] = error_rate = mean(out[test_index]!=tensor[test_index])
}
}
### with mode estimaiton
OCVmode = as.data.frame(matrix(nrow = 50, ncol = 2))
names(OCVmode) = c("MAE","MCR")
s = 0
for(i in 1:10){
for (j in 1:5) {
s = s+1
set.seed(i)
l1 = split(sample(which(tensor==1),length(which(tensor==1))),as.factor(1:5))
l2 = split(sample(which(tensor==2),length(which(tensor==2))),as.factor(1:5))
l3 = split(sample(which(tensor==3),length(which(tensor==3))),as.factor(1:5))
cindex = list()
for (k in 1:5) {
cindex[[k]] = c(l1[[k]],l2[[k]],l3[[k]])
}
test_index = cindex[[j]]
test_index = cindex[[j]]
load(paste("CV_ordinal_",i,"_",j,".RData",sep = ""))
theta = result$theta
out = estimation(theta,result$omega,type="mode")@data # we can also use other estimator via type="mode","mean", or "median"
OCVmode[s,1] = mean(abs(out[test_index]-tensor[test_index]))
OCVmode[s,2] = error_rate = mean(out[test_index]!=tensor[test_index])
}
}
###################### Getting a data summary##############################
################### continuous decomposition ###############
cv_rep = matrix(nrow =10,ncol = 2)
for(k in 1:10){
cv_rep[k,] = apply(CV[((k-1)*5+1):(k*5),],2,mean)
}
cv_summary = rbind(apply(cv_rep,2,mean),apply(cv_rep,2,sd))
colnames(cv_summary) = c("MAD","MCR")
rownames(cv_summary) = c("mean","sd")
################### ordinal decomposition with mode ########
ocvmode_rep = matrix(nrow =10,ncol = 2)
for(k in 1:10){
ocvmode_rep[k,] = apply(OCVmode[((k-1)*5+1):(k*5),],2,mean)
}
ocvmode_summary = rbind(apply(ocvmode_rep,2,mean),apply(ocvmode_rep,2,sd))
colnames(ocvmode_summary) = c("MAD","MCR")
rownames(ocvmode_summary) = c("mean","sd")
################### ordinal decomposition with median ######
ocvmedian_rep = matrix(nrow =10,ncol = 2)
for(k in 1:10){
ocvmedian_rep[k,] = apply(OCVmedian[((k-1)*5+1):(k*5),],2,mean)
}
ocvmedian_summary = rbind(apply(ocvmedian_rep,2,mean),apply(ocvmedian_rep,2,sd))
colnames(ocvmedian_summary) = c("MAD","MCR")
rownames(ocvmedian_summary) = c("mean","sd")
|
a7b85121251b6159b5a82966f0ba501b59d0a46e | 486e148c5bbe8977a20e616500a80036d0edaba9 | /Y3-S2/TS/proiect-lab/lab.R | a1ebb555f1d2cc0be2987b1ac76e6168e5091f9c | [] | no_license | cluntraru/FMI | 12dc3a3da72ab90cc1bf69100cc1c09855983162 | d6ac083be495779604d83d1fe7f20f0197b589ca | refs/heads/master | 2022-09-19T04:42:03.597746 | 2020-05-28T11:33:49 | 2020-05-28T11:33:49 | 175,603,011 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 814 | r | lab.R | # Functia de intensitate din enunt
intensity <- function(t) {
if (t < 0 || t > 24) {
return(-1)
} else if (t >= 0 && t <= 8) {
return(15)
} else if (t < 14) {
return(2 * tan(0.1 * t) + 14)
} else {
return(9)
}
}
# Functie care intoarce timpul la care se petrece urmatorul eveniment Ts
genPoisson <- function(s, lambda, intensity) {
# Initial, ne aflam la momentul de timp s
t <- s
# Generam U1 si U2
U <- runif(2)
# Actualizam momentul de timp curent
t <- t - 1 / lambda * log(U[1])
# Regeneram U1, U2 cat timp nu se indeplineste U2 <= lambda(t)/t
while (U[2] > intensity(t) / lambda) {
U <- runif(2)
# Actualizam timpul, analog cu mai sus
t <- t - 1 / lambda * log(U[1])
}
# Daca este indeplinita conditia, atunci Ts = t, deci returnam t
return(t)
} |
8a42c6e12a79f80fefec22a61419fc7aca195ff4 | 19f0e3c6b29e88fa3797ab07b46870beafd6d3d1 | /gps-analysis/analysis.R | 31e921620b434e489f354ecce4c2088725190ab5 | [] | no_license | gretac/ura | fe7e0432c52dcc607eb0f06abc289132b515cf9e | e5fc3ae78625b672734d0744b708919959f92112 | refs/heads/master | 2021-01-22T09:20:50.163127 | 2016-02-05T20:41:23 | 2016-02-05T20:41:23 | 35,527,070 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 14,396 | r | analysis.R |
options(mc.cores = 4)
#setwd("/home/sfischme/Documents/acerta/pilots/ROS")
setwd("/Users/gretacutulenco/Documents/greta_dev/pilots/GPS-driving-reporting/")
## load Acerta
Sys.setenv(R_TRACES_DIR="../traces")
q <- getwd()
setwd("../../../tstat/site/Rserver")
setwd("/home/sfischme/Documents/acerta/tstat/site/Rserver")
source("server.R")
setwd(q)
library(microbenchmark)
library(bit64)
################################################################################
################################################################################
################################################################################
library(ggmap)
files <- list.files("augusto-bike-rides/",pattern="^ride*")
#files <- "ride032.csv" ## problem, this doesn't work
# uphill/downhill
slope <- dat$slope
total <- length(slope)
uphill <- length(which(slope > 0)) / total * 100
downhill <- length(which(slope < 0)) / total * 100
level <- length(which(slope == 0)) / total * 100
slope.max <- max(abs(slope), na.rm=T)
slope.min <- min(abs(slope), na.rm=T)
up.max <- max(slope, na.rm=T)
up.min <- min(slope[slope > 0], na.rm=T)
down.max <- min(slope, na.rm=T)
down.min <- max(slope[slope < 0], na.rm=T)
foreach (f=files) %do% {
prefix <- gsub("\\.csv$","",f)
message(f)
dat <- fread(paste0("augusto-bike-rides/",f))[,c("time", "latitude", "longitude", "altitude", "speed", "pace", "course", "slope", "distance", "distance_interval"),with=F] %>%
prepareGPSData(3) %>%
detectDrivingDirection(3) %>%
detectDrivingScenarios() %>%
detectTurns(7.5,2)
#dat <- dat[speed<70]
#max(dat$speed)
xbound <- max(abs(min(dat$latitude.delta, na.rm=T)), max(dat$latitude.delta, na.rm=T))
ybound <- max(abs(min(dat$longitude.delta, na.rm=T)),max(dat$longitude.delta, na.rm=T))
bbox <- make_bbox(dat$longitude,dat$latitude)
map_loc <- get_map(bbox, source="osm")## maptype = 'satellite')
## map_loc <- get_map(c(long=mean(bbox[1],bbox[3])+0.003,lat=mean(bbox[2],bbox[4])+0.003),zoom=16, source="google")## ma
## map_loc <- get_map(c(long=mean(bbox[1],bbox[3]),lat=mean(bbox[2],bbox[4])),zoom=8, source="google")## ma
map <- ggmap(map_loc, extent = 'device')
map + geom_point(data=dat,aes(x=longitude,y=latitude),color="royalblue",size=2) +
ggtitle("Map")
ggsave(last_plot(),file=paste0(prefix,"-drive-map.pdf"))
## turns analysis
map + geom_path(data=dat,aes(x=longitude,y=latitude),color="black",size=1) +
geom_point(data=dat[turn!=0],aes(x=longitude,y=latitude, color=factor(turn)), size=3) +
scale_color_brewer("Turns",labels=c("-1"="Right","1"="Left"),palette="Set1") +
ggtitle("Map")
ggsave(last_plot(),file=paste0(prefix,"-drive-map-with-turns.pdf"))
ggplot(dat[turn!=0]) + geom_bar(aes(x=factor(turn))) +
scale_x_discrete("Turns",label=list("-1"="Right","1"="Left")) + ylab("Count") + ggtitle("Turns")
ggsave(last_plot(),file=paste0(prefix,"-drive-bar-turns.pdf"))
## directions exposure
map + geom_point(data=dat,aes(x=longitude,y=latitude,color=phi.ccat),size=2) +
scale_colour_brewer(name="Driving\nDirection",palette="Set1") +
theme(legend.position=c(1,0),legend.justification=c(1,0)) +
ggtitle("Map Directions Exposure")
ggsave(last_plot(),file=paste0(prefix,"-drive-map-directions-exposure.pdf"))
ggplot(dat) + geom_bar(aes(x=phi.ccat,weight=distance_interval),binwith=5) +
xlab("Direction") + ylab("Distance [m]") +
ggtitle("Exposure")
ggsave(last_plot(),file=paste0(prefix,"-drive-bar-exposure.pdf"))
ggplot(dat,aes(x=course,weight=distance_interval)) +
ggtitle("Exposure Vectorgram") +
xlab("") + ylab("") +
geom_bar(binwidth=360/16) +
scale_x_continuous() +
coord_polar() +
theme(legend.position="none")
ggsave(last_plot(),file=paste0(prefix,"-drive-vectorgram-exposure.pdf"))
ggplot(dat,aes(x=0, y=0, color=phi.ccat)) +
ggtitle("Driving Vectorgram") +
xlab("") + ylab("") +
scale_colour_brewer(name="Driving\nDirection",palette="Set1") +
coord_cartesian(xlim= c(-xbound,xbound), ylim=c(-ybound,ybound)) +
theme(legend.position=c(1,0),legend.justification=c(1,0)) +
geom_segment(aes(xend=latitude.delta,yend=longitude.delta), arrow = arrow(length=unit(0.1,"cm") ))
ggsave(last_plot(),file=paste0(prefix,"-drive-vectorgram-directions.pdf"))
## speed
map + geom_point(data=dat,aes(x=longitude,y=latitude,color=speed),size=2) +
scale_colour_gradient(name="Speed (km/h)",low="blue",high="red") +
theme(legend.position=c(1,0),legend.justification=c(1,0)) +
ggtitle("Map Speed")
ggsave(last_plot(),file=paste0(prefix,"-drive-map-velocity.pdf"))
ggplot(dat) + geom_bar(aes(x=speed,weight=distance_interval),binwith=5) +
xlab("Speed [km/h]") + ylab("Distance [m]") +
ggtitle("Speed Profile")
ggsave(last_plot(),file=paste0(prefix,"-drive-bar-speed.pdf"))
## acceleration
ggplot(dat,aes(x=t, y=0)) +
ggtitle("Acceleration Vectorgram") +
xlab("Time [s]") + ylab("Acceleration") +
geom_segment(aes(xend=t,yend=acceleration), arrow = arrow(length=unit(0.1,"cm") ))
ggsave(last_plot(),file=paste0(prefix,"-drive-vectorgram-acceleration.pdf"))
ggplot(dat) + geom_bar(aes(x=acceleration),binwidth=1) +
xlab("Acceleration [km/h^2]") + ylab("Count") +
ggtitle("Acceleration Profile")
ggsave(last_plot(),file=paste0(prefix,"-drive-bar-acceleration.pdf"))
map + geom_point(data=dat,aes(x=longitude,y=latitude,color=acceleration),size=2) +
scale_colour_gradient(name="Velocity",low="blue",high="red") +
theme(legend.position="none") +
ggtitle("Map Acceleration")
ggsave(last_plot(),file=paste0(prefix,"-drive-map-acceleration.pdf"))
### Altitude
ggplot(dat) + geom_point(aes(x=t,y=altitude),color="black",size=1.5) +
ggtitle("Profile") + xlab("Time") + ylab("Level [m]") +
theme(legend.position="none")
ggsave(last_plot(),file=paste0(prefix,"-drive-profile-altitude.pdf"))
map + geom_point(data=dat,aes(x=longitude,y=latitude,color=altitude),size=2) +
scale_colour_gradient(name="Altitude",low="blue",high="red") +
theme(legend.position=c(1,0),legend.justification=c(1,0)) +
ggtitle("Map Altitude Profile")
ggsave(last_plot(),file=paste0(prefix,"-drive-map-altitude.pdf"))
ggplot(dat,aes(x=t, y=0)) +
ggtitle("Altitude Vectorgram") +
xlab("Time [s]") + ylab("Altitude change [m]") +
geom_segment(aes(xend=t,yend=altitude.delta/t.delta), arrow = arrow(length=unit(0.1,"cm") ))
ggsave(last_plot(),file=paste0(prefix,"-drive-vectorgram-altitude.pdf"))
## lighting exposures
map + geom_point(data=dat,aes(x=longitude,y=latitude,color=insun),size=2) +
scale_colour_brewer("Driving",labels=c("FALSE"="Ignored", "TRUE"="Into sun"),palette="Set1") +
theme(legend.position=c(1,0),legend.justification=c(1,0)) +
ggtitle("Map Scenario: Driving into the Sun")
ggsave(last_plot(),file=paste0(prefix,"-drive-map-into-sun.pdf"))
ggplot(dat) + geom_bar(aes(x=factor(dusk),weight=distance_interval/1000),binwith=5) +
scale_x_discrete("",labels=c("FALSE"="No dusk", "TRUE"="Dusk")) +
ylab("Distance [km]") +
ggtitle("Explosure: Dusk")
ggsave(last_plot(),file=paste0(prefix,"-drive-bar-dusk.pdf"))
ggplot(dat) + geom_bar(aes(x=factor(dawn),weight=distance_interval/1000),binwith=5) +
scale_x_discrete("",labels=c("FALSE"="No dawn", "TRUE"="Dawn")) +
ylab("Distance [km]") +
ggtitle("Explosure: Dawn")
ggsave(last_plot(),file=paste0(prefix,"-drive-bar-dawn.pdf"))
ggplot(dat) + geom_bar(aes(x=day,weight=distance_interval/1000),binwith=5) +
scale_x_discrete("",labels=c("FALSE"="Night time", "TRUE"="Day time")) +
xlab("Direction") + ylab("Distance [km]") +
ggtitle("Explosure: Day/Night")
ggsave(last_plot(),file=paste0(prefix,"-drive-bar-day-night.pdf"))
ggplot(dat) + geom_bar(aes(x=insun,weight=distance_interval/1000),binwith=5) +
scale_x_discrete("",labels=c("FALSE"="Sun irrelevant", "TRUE"="Into sun")) +
xlab("Direction") + ylab("Distance [km]") +
ggtitle("Explosure: Dusk")
ggsave(last_plot(),file=paste0(prefix,"-drive-bar-in-sun.pdf"))
NULL
}
#### Summary statistics
files
dat <- lapply(files,function(fin) fread(paste0("augusto-bike-rides/",fin))[,c("time", "latitude", "longitude", "altitude", "speed", "pace", "course", "slope", "distance", "distance_interval"),with=F] %>%
prepareGPSData(3) %>%
detectDrivingDirection(3) %>%
detectDrivingScenarios() %>%
detectTurns(7.5,2) )
dat <- rbind.fill(dat) %>% data.table()
prepareGPSData <- function(dat,SMOOTH_SECONDS) {
dat[,cnt:=1:nrow(dat)]
angle2 <- function(x1,y1,x2,y2){
## right turns are be positive values
ang <- (atan2(y2,x2) - atan2(y1,x1))/pi*180
ang[ abs(ang) > 280 ] <- NA ## limit of atan; after 270 degree, you're still moving forward
return(ang)
}
f <- rep(1/SMOOTH_SECONDS,SMOOTH_SECONDS)
dat[,latitude := stats::filter(latitude,f) %>% as.numeric()]
dat[,longitude := stats::filter(longitude,f) %>% as.numeric()]
dat[,altitude := stats::filter(altitude,f) %>% as.numeric()]
dat[,latitude.delta := c(diff(latitude),NA)]
dat[,longitude.delta := c(diff(longitude),NA)]
dat[,altitude.delta := c(diff(altitude),NA)]
dat[, velocity:=sqrt(latitude*latitude+longitude*longitude)]
dat[, velocity.delta:=c(diff(velocity),NA)]
dat <- dat[complete.cases(dat)]
dat[, angle:=angle2(latitude.delta, longitude.delta, shift(latitude.delta), shift(longitude.delta))]
dat[,t:=data.frame(strptime(time,format="%Y-%m-%d %H:%M:%S")-21600)] ## TODO: no clue why a dataframe is needed ## TODO: smarter time conversion
dat[,t.delta:=c(0,diff(t))]
dat[,acceleration:=c(0,diff(speed))/t.delta]
return(dat)
}
detectDrivingScenarios <- function(dat) {
## find interesting scenarios
dat[,dusk:= (findInterval(as.numeric(format(t, "%H")) + as.numeric(format(t, "%M"))/60, c(17.5,20)) == 1)]
dat[,dawn:= findInterval(as.numeric(format(t, "%H")) + as.numeric(format(t, "%M"))/60, c(6.5,8)) == 1]
dat[,sunrise:= findInterval(as.numeric(format(t, "%H")) + as.numeric(format(t, "%M"))/60, c(7.5,10)) == 1]
dat[,sunset:= findInterval(as.numeric(format(t, "%H")) + as.numeric(format(t, "%M"))/60, c(15.5,18)) == 1]
dat[,insun:= (sunrise & phi.ccat=="E") | (sunset & phi.ccat=="W")]
dat[,day:= findInterval(as.numeric(format(t, "%H")) + as.numeric(format(t, "%M"))/60, c(6.5,18.5)) == 1]
return(dat)
}
detectDrivingDirection <- function(dat, THRESH_MIN_DIRECTION_DRIVE_SAMPLES) {
## compute the absolute direction angle of each driving vector
dat[,phi:=(atan2(longitude.delta,latitude.delta)*180/pi)]
## categorize into 8 driving directions
DRIVING_DIRECTIONS <- 8
dat[,phi.cat:=(floor(((dat$phi+45/2) / (360/DRIVING_DIRECTIONS))))] ## shift by 45 degree to have proper N NE E SE S SW W NW categories
#dat[, phi.cat:= ((dat$course) / (360/DRIVING_DIRECTIONS)) ]
##dat[phi.cat==-4,phi.cat:=4]
## clean sparse trains by iteratively deleting short trains, so transient faults do not cut large trains
mergeSparseCategories <- function(data, THRESH_SPARSE_TRAIN) {
repeat {
data[, rle1:=cumsum(c(0,diff(phi.cat))!=0) ]
## plot(data$rle1)
data[, rle2:=.N,by=rle1]
## plot(data$rle2)
if (min (data$rle2) >= THRESH_SPARSE_TRAIN) break;
data <- data[rle2 != min(data$rle2)]
}
return(data[,c("cnt","phi.cat"),with=F])
}
dat.tmp <- mergeSparseCategories(dat, THRESH_MIN_DIRECTION_DRIVE_SAMPLES)
setnames(dat.tmp,"phi.cat","phi.ccat")
dat <- merge(x=dat, y=dat.tmp, by="cnt", all.x=TRUE)
dat$phi.ccat <- factor(dat$phi.ccat)
# levels(dat$phi.ccat) <- list("SW"=-3,"S"=-2,"SE"=-1,"E"=0,"NE"=1,"N"=2,"NW"=3,"W"=4)
levels(dat$phi.ccat) <- list("SW"=-3,"W" =-2,"NW"=-1,"N"=0,"NE"=1,"E"=2,"SE"=3,"S" =4) ## Augusto's GPS
dat <- dat[!is.na(phi.ccat)]
dat[,cnt:=1:.N]
return(dat)
}
detectTurns <- function(dat, THRESH_CURVE_ANGLE, THRES_MIN_CURVE_SAMPLES) {
## clean up angles
dat[is.na(angle),angle:=0]
dat[,turn:=ifelse(angle>THRESH_CURVE_ANGLE,1,0)]
dat[angle < -THRESH_CURVE_ANGLE, turn:=-1]
mergeSparseCategories <- function(data, THRES_SPARSE_TRAIN) {
repeat {
data[, rle1:=cumsum(c(0,diff(turn))!=0) ]
## plot(data$rle1)
data[, rle2:=.N,by=rle1]
## plot(data$rle2)
if (min (data$rle2) >= THRES_SPARSE_TRAIN) break;
data <- data[rle2 != min(data$rle2)]
}
return(data[,c("cnt","turn"),with=F])
}
dat.tmp <- mergeSparseCategories(dat, THRES_MIN_CURVE_SAMPLES)
dat[,turn:=NULL]
dat <- merge(x=dat, y=dat.tmp, by="cnt", all.x=TRUE)
return (dat)
}
################################################################################
################################################################################
################################################################################
for(i in 1:length(files)) {
dat <- copy(dat.cp[dat.cp$file==files[i]])
dat <- data.table(latitude=substr(dat$gps_latitude, 1, nchar(dat$gps_latitude)-1) %>% as.numeric(),
longitude=substr(dat$gps_longitude, 1, nchar(dat$gps_longitude)-1) %>% as.numeric(),
altitude=dat$gps_altitude)
## clean the data
dat <- dat[complete.cases(dat)]
dat <- dat[dat$latitude > 10]
dat <- dat[dat$longitude > 10]
p1 <- ggplot(dat,aes(x=longitude, y=latitude, color = altitude)) + geom_path() + geom_point() + ggtitle(files[i])
try(ggsave(p1, file=paste0("plot-file-",i,".pdf")))
}
dat
for(i in seq(500,nrow(dat),60) ) {
p1 <- ggplot(dat[(i-500):i] ,aes(x=-longitude, y=latitude, color = altitude)) + geom_path() + geom_point() + ggtitle(files[i])
print(p1)
readline()
}
|
1e39f99926e2ea6f2622638a832ce5a680263646 | 4fe1bb1ce1bc5d082585db2e47399f4e73434a46 | /R/isSNV.R | 71d19610d4b99af45340e70f96474e67bb7759fd | [
"MIT"
] | permissive | seandavi/MutationTools | 41da16852703a78411c10b3e06fb8c7de2ae6515 | c130cb9e80872acb45f88e4085af110847d204cf | refs/heads/master | 2016-09-10T20:03:54.992224 | 2014-03-28T14:23:32 | 2014-03-28T14:23:32 | 17,545,165 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,198 | r | isSNV.R | #' Determine if variants in a VCF object are SNPs
#'
#' Returns TRUE for variants that are SNVs and FALSE otherwise.
#' For variants with multiple ALT alleles, only the FIRST is used.
#'
#' @param variants an object inheriting from the \code\link{VCF}} or \code{\link{VRanges}} classes
#' @return logical vector with the same length as \code{vcf}
#' @keywords character
#' @seealso \code{\link{VCF-class}}
#' @export
#' @examples
#' library(VariantAnnotation)
#' fl <- system.file("extdata", "chr22.vcf.gz", package="VariantAnnotation")
#' param <- ScanVcfParam(fixed="ALT", geno=c("GT", "GL"), info=c("LDAF"))
#' vcf = readVcf(fl,"hg19",param=param)
#' table(isSNV(vcf))
isSNV <- function(variants) {
if(inherits(variants,'VCF')) {
refall = as.character(ref(variants))
altall = as.character(unlist(alt(variants))[start(PartitioningByEnd(alt(variants)))])
return((nchar(refall)==1) & (nchar(altall)==1))
}
if(inherits(variants,'VRanges')) {
refall = as.character(ref(variants))
altall = as.character(alt(variants))
return((nchar(refall)==1) & (nchar(altall)==1))
}
stop('parameter variants must be a VRanges or VCF object')
}
|
2f285e330565f42d6ec84f381835c1b971e75f36 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/2314_0/rinput.R | bbf0ab62a090e48644dbf11148aa5b010b3b9289 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("2314_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2314_0_unrooted.txt") |
0bc1defa6b75fdd8b7a87d89eb11d6eb4d063c52 | e0abbbf66e9e8f22b0237265762db65cad3dd40b | /test_retrievals.R | 84fb2d8aeb96d530dbc69c81f3796b63120a139b | [] | no_license | jylhaisi/post-processing-mos-point-analysis | 84f4860810d47c74e185f12bc7788778ec458b63 | f949615fa82ee516b3724257ac3b5c83b221352a | refs/heads/master | 2021-06-07T20:41:41.489122 | 2020-12-16T12:02:34 | 2020-12-16T12:02:34 | 100,267,033 | 0 | 0 | null | 2018-04-06T07:16:16 | 2017-08-14T12:49:54 | R | UTF-8 | R | false | false | 4,164 | r | test_retrievals.R | # This script retrieves foreign station data from CLDB and prints out max precipitation values in the woule observation time period
rm(list=ls())
# Reading in the required packages, mapping lists and functions
source("load_libraries_tables_and_open_connections.R")
# User-defined variables
timestamps_series <- define_time_series(begin_date=as.POSIXct("2011-12-01 00:00:00 GMT",tz="GMT"),end_date=with_tz(round.POSIXt(Sys.time(),"hours"),tz="GMT"),interval_in_hours=3,interval_in_seconds=NA,even_hours=TRUE) # define_time_series(begin_date=as.POSIXct("2011-12-01 00:00:00 GMT",tz="GMT"),end_date=with_tz(round.POSIXt(Sys.time()+864000,"hours"),tz="GMT"),interval_in_hours=3,interval_in_seconds=NA,even_hours=TRUE)
modelobspairs_minimum_sample_size <- 100 # Arbitrary number here, could in principle also depend on the number of predictor variables
date_string <- format(Sys.time(), "%d%m%y")
mos_label <- paste0("MOS_ECMWF_020320") #paste0("MOS_ECMWF_",date_string) #
predictor_set <- "only_bestvars3" #"only_bestvars2_no_climatology_ensmean" #"NA" #"allmodelvars_1prec_noBAD_RH2"
derived_variables <- NA # c("Z_ORO","Z_850") #c("RH_SURF","Z_850","GH_850") # NA # c("DECLINATION")
station_list <- "mos_stations_homogeneous_Europe" # Possible pre-defined station lists are those names in all_station_lists. If you want to use an arbitrary station list, assign the station numbers manually to variable station_numbers
station_numbers <- eval(subs(all_station_lists[[station_list]])) # c(1406,2978) # Retrievals are generated and data is returned based on station wmon-numbers. If using a station list outside mos station list, define the wmon-numbers here.
obs_interpolation_method <- "spline_interp" # options repeat_previous (na.locf),linear_interp (na.approx),spline_interp (na.spline),no_interp (leave NA values to timeseries as they are). Continuous observations are interpolated, those which not are sublists in all_variable_lists
max_interpolate_gap <- 6 # This indicates the maximum time in hours to which observation interpolation is applied
verif_stationtype <- "normal" # In verif db, several stationgroups exist. "normal" assumes stations (2700 <= wmon <= 3000) belonging to stationgroup=1, and all other to stationgroup=9 (other stationgroups outside stationgroup=3 only have a small number of stations to them). Road weather station support needs to be coded later (this needs a road weather station list), currently this can be done manually by putting the stationgroup of interest here manually (e.g. ==3)
output_dir <- paste0("/data/statcal/results/MOS_coefficients/in_progress/",mos_label,"/") # output_dir check is done in the beginning of the function MOS_training
max_variables <- 10
fitting_algorithm <- "GlmnR1"
fitting_method <- "purrr" # only purrr method is maintained
timestamps_series <- define_time_series(begin_date=as.POSIXct("2011-12-01 00:00:00 GMT",tz="GMT"),end_date=with_tz(round.POSIXt(Sys.time(),"hours"),tz="GMT"),interval_in_hours=1,interval_in_seconds=NA,even_hours=TRUE) # define_time_series(begin_date=as.POSIXct("2011-12-01 00:00:00 GMT",tz="GMT"),end_date=with_tz(round.POSIXt(Sys.time()+864000,"hours"),tz="GMT"),interval_in_hours=3,interval_in_seconds=NA,even_hours=TRUE)
variable_list_retrieved <- rbind(choose_variables(c("TA"),"weather_data_qc","CLDB")) # rbind(choose_variables(c("PR_6H"),"weather_data_qc","CLDB"))
# station_numbers <- station_numbers[station_numbers >= 7000 & station_numbers <= 10000]
temp2 <- NA
station_numbers <- station_numbers[(station_numbers>60350 & station_numbers<60800)]
for (station_list_retrieved in station_numbers) {
function_arguments <- list(variable_list_retrieved,station_list_retrieved,timestamps_series)
retrieved_data <- do.call(retrieve_data_all,function_arguments)
temp1 <- retrieved_data$CLDB$weather_data_qc
if (!length(temp1)==FALSE) {
if (dim(temp1)[1]>1000) {
temp1 <- temp1[(temp1$parameter == "TA"),]
print(paste0("max TA value in time series at station wmon",station_list_retrieved," is ",max(temp1$value,na.rm=TRUE)," Kelvins!"))
temp2 <- c(temp2,max(temp1$value,na.rm=TRUE))
plot(temp1$value)
}
}
rm(temp1)
}
|
b81ad9b0b2e701f66b7df457384ee2ead7c32b71 | 9f1f507043d4bd08deb5c0c4f887b38fe22cfa5f | /dataGenerationScripts/generateBaseDataCenter.R | 69151e8a62059952d126b702bfbdda3f25e9d3bb | [] | no_license | maximustann/PhDProject | d3371199ae53a9b0dff2e5eeea624e56a03c8e41 | cf09e31021780d505e79831d2806f0f6e4c368e2 | refs/heads/master | 2023-08-30T15:14:51.673989 | 2021-11-12T07:01:41 | 2021-11-12T07:01:41 | 298,183,027 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,403 | r | generateBaseDataCenter.R | cat('Usage: generateBaseDataCenter(testCaseSize, OSNum, testCase)\n')
generateBaseDataCenter <- function(testCaseSize, OSNum, testCase){
set.seed(testCaseSize + testCase)
generateArtificialData <- function(datasetName, whichVMsize, vmCpuLimit, vmMemLimit, size, testCase){
selectFromData <- function(datasetName, size){
data <- unlist(read.table(datasetName, header = F))
data <- data[!data < 0]
myData <- sample(unlist(data), size, replace = T)
myData
}
taskM <- ceiling(rexp(size, 0.001))
taskTh <- unlist(selectFromData(datasetName, size))
taskA <- ceiling(rexp(size, 0.001))
# repair the generated dataset
for(i in seq_along(taskA)){
if(taskTh[i] * taskA[i] > vmCpuLimit){
repeat {
taskA[i] <- ceiling(rexp(1, 0.001))
if(taskTh[i] * taskA[i] <= vmCpuLimit)
break;
}
}
}
for(i in seq_along(taskM)){
if(taskTh[i] * taskM[i] > vmMemLimit){
repeat {
taskM[i] <- ceiling(rexp(1, 0.001))
if(taskTh[i] * taskM[i] <= vmMemLimit)
break;
}
}
}
#testCaseData <- cbind(taskA, taskM, osTypes)
testCaseData <- cbind(taskA, taskM)
writeTask(whichDataSet, testCaseData, whichVMsize, testCase, filename)
print('generate articial data')
} # End of generateArtificialData
generateVMtype <- function(vmCPU, vmMEM, pmCPU, pmMem, OSNum){
myPMCpu <- pmCPU
myPMMem <- pmMem
vm <- vector()
# if there is only one vm, just assign the largest
if(OSNum == 1){
vm <- 5
return(vm)
}
repeat {
type <- 0
# generate a random number
# if it is smaller than 0.5 and the VM list is not empty, break from the generation
if(runif(1) < 0.5 && length(vm) != 0){
break
}
# try three times to find a suitable vm
for(i in seq(1, 3)){
type <- sample(length(vmCPU), 1)
if(myPMCpu - vmCPU[type] >= 0 && myPMMem - vmMEM[type] >= 0){
# update myPM resources and break out
myPMCpu <- myPMCpu - vmCPU[type]
myPMMem <- myPMMem - vmMEM[type]
vm <- c(vm, type)
break
}
}
# if it tries three time still not working, break out
if(type == 0) break
}
#cat("finished one pm", '\n')
#cat('\n')
# return the vector of vm
vm
}
generateVMOS <- function(vm, OSProb){
size <- length(vm)
#cat("size = ", size, '\n')
osType <- vector()
# Three OSs
if(length(OSProb) == 2){
for(i in seq(1, size)){
type <- 0
r <- runif(1)
if(r < OSProb[1]) {type <- 1}
else if(r < OSProb[2] && r >= OSProb[1]) {type <- 2}
else { type <- 3}
osType <- c(osType, type)
}
# Five OSs
} else if(length(OSProb) == 4){
for(i in seq(1, size)){
type <- 0
r <- runif(1)
if(r < OSProb[1]) { type <- 1 }
else if (r < OSProb[2] && r >= OSProb[1]) {type <- 2}
else if (r < OSProb[3] && r >= OSProb[2]) {type <- 3}
else if (r < OSProb[4] && r >= OSProb[3]) {type <- 4}
else {type <- 5}
osType <- c(osType, type)
}
# Two OSs
} else if(length(OSProb) == 1){
for(i in seq(1, size)){
type <- 0
r <- runif(1)
if(r < OSProb[1]) type <- 1
else type <- 2
osType <- c(osType, type)
}
# Four OSs
} else if(length(OSProb) == 3){
for(i in seq(1, size)){
type <- 0
r <- runif(1)
if(r < OSProb[1]) { type <- 1 }
else if (r < OSProb[2] && r >= OSProb[1]) {type <- 2}
else if (r < OSProb[3] && r >= OSProb[2]) {type <- 3}
else {type <- 4}
osType <- c(osType, type)
}
# One Os
} else {
for(i in seq(1, size)){
osType <- c(osType, 1)
}
}
osType
}
generateContainer <- function(data, vmCPU, vmMEM){
myCpu <- vmCPU
myMEM <- vmMEM
vmCPUOverHeadRate <- 0.1
vmMemOverhead <- 200
# subtract the overheads
myCpu <- myCpu - myCpu * vmCPUOverHeadRate
myMEM <- myMEM - vmMemOverhead
containerCpu <- vector()
containerMem <- vector()
#data <- read.csv(datasetPath, header = F, sep = ',')
repeat {
type <- 0
# generate a random number
# if it is smaller than 0.5 and the container list is not empty, then break from the generation
if(runif(1) < 0.5 && length(containerCpu) >= 1){
break
}
# try three times to find a suitable container
for(i in seq(1, 3)){
type <- 1
choose <- sample(1:nrow(data), 1)
choosedCPU <- data[choose, 1]
choosedMem <- data[choose, 2]
if(choosedCPU <= myCpu && choosedMem <= myMEM){
myCpu <- myCpu - choosedCPU
myMEM <- myMEM - choosedMem
containerCpu <- c(containerCpu, choosedCPU)
containerMem <- c(containerMem, choosedMem)
break
}
}
# this type is only used for checking
if(type == 0) break
}
# return a matrix
cbind(containerCpu, containerMem)
}
writeFile <- function(pmVM, vmContainers, vmOS, containerMatrix, testCaseSize, testCase){
base <- paste("./InitEnv/container", testCaseSize, "/testCase", testCase, "/", sep = '')
if(!file.exists(base)){
dir.create(base)
}
pmFile <- paste(base, "pm.csv", sep='')
vmFile <- paste(base, "vm.csv", sep='')
vmOSFile <- paste(base, "/os.csv", sep='')
containerFile <- paste(base, "container.csv", sep='')
#cat("vmContainers = ", vmContainers, '\n')
lapply(X = pmVM, FUN= function(x){
write(x, append = T, file = pmFile, ncolumns = length(x), sep=',')
})
lapply(X = vmContainers, FUN= function(x){
write(x, append = T, file = vmFile, ncolumns = length(x), sep=',')
})
write.table(vmOS, vmOSFile, row.names = F, col.names = F, sep = ',')
write.table(containerMatrix, containerFile, row.names = F, col.names = F, sep = ',')
print("Finish")
}
# Program starts from here
# we only use small settings for both PM and VM
PMCPU <- 13200
PMMEM <- 16000
#PMCPU <- 3300
#PMMEM <- 4000
#PMCPU <- 6600
#PMMEM <- 8000
#datasetPath
datasetPath <- './auvergrid.csv'
#datasetPath <- './bitbrains.csv'
# Read vm configuration from file
vmConfig <- read.csv("/home/tanboxi/PH.D_project/data/baseConfig/VMConfig/LVMnePM/VMConfig_twenty.csv", header = F, sep=',')
vmCPU <- vmConfig[, 1]
vmMEM <- vmConfig[, 2]
# Read container configuration from file
dataset <- read.csv(datasetPath, header=F, sep=',')
OS2Prob <- 0.95
OS3Prob <- c(0.5, 0.8)
OS4Prob <- c(0.625, 0.8, 0.95)
OS5Prob <- c(0.179, 0.633, 0.869, 0.974)
if(OSNum == 1) OSProb <- 1
else if(OSNum == 2) OSProb <- OS2Prob
else if(OSNum == 3) OSProb <- OS3Prob
else if(OSNum == 4) OSProb <- OS4Prob
else OSProb <- OS5Prob
pmSize <- vector()
# First we generate a number of PMs
if(testCaseSize == 80){
# select a random number from [1, 2]
pmSize <- sample(seq(1, 3), 1)
} else if(testCaseSize == 200){
# select a random number from [4, 8]
pmSize <- sample(seq(4, 8), 1)
} else if(testCaseSize == 500){
# select a random number from [8, 16]
pmSize <- sample(seq(8, 16), 1)
} else {
# 1000 containers
pmSize <- sample(seq(16, 32), 1)
}
pmVM <- list()
globalContaienrCount <- 1
globalVMCount <- 1
vmContainers <- list()
vmOS <- vector()
containerMatrix <- vector()
#cat('pmSize = ', pmSize, '\n')
# for each pm, generate its vm list
for(eachPM in seq(1, pmSize)){
#cat('eachPM = ', eachPM, '\n')
vms <- generateVMtype(vmCPU, vmMEM, PMCPU, PMMEM, OSNum)
#cat('vms = ', vms, '\n')
os <- generateVMOS(vms, OSProb)
#cat('os = ', os, '\n')
#print('generated VMs and OSs')
# for each vm, generate its container list
for(eachVM in seq(1, length(vms))){
#('prepare to generate containers')
containers <- generateContainer(dataset, vmCPU[vms[eachVM]], vmMEM[vms[eachVM]])
#print('generated containers')
# calculate how many container does this VM hold
num <- nrow(containers)
containerList <- seq(globalContaienrCount, num + globalContaienrCount - 1)
#cat("containerList = ", containerList, '\n')
globalContaienrCount <- globalContaienrCount + num
#print('collect the container pointers/indexes')
vmContainers[[globalVMCount]] <- containerList
#print('collect the container resources')
containerMatrix <- rbind(containerMatrix, containers)
globalVMCount <- globalVMCount + 1
} # for ends
#print('prepare to collect data')
# collect the VM points
pmVM[[eachPM]] <- vms
vmOS <- c(vmOS, os)
#cat("pmVM = ", vms, "\n")
#print('collected the data')
}
#print("Ready to save to file")
# save to file
writeFile(pmVM, vmContainers, vmOS, containerMatrix, testCaseSize, testCase)
#print("Saved to files")
}
|
78dcd2c71ededf1a2cbd52340955f7699134f2e3 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /easyVerification/R/Ens2AFC.R | 3261f76d3b15ff3fa1998f12b0bdd0e680d92114 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,649 | r | Ens2AFC.R | # Ens2AFC.R Generalized Discrimination Score
#
# Copyright (C) 2016 MeteoSwiss
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#' @name Ens2AFC
#'
#' @title Generalized Discrimination Score
#'
#' @description Computes the generalized discrimination score for ensemble
#' forecasts after (Weigel and Mason, 2011).
#'
#' @param ens n x m matrix of n forecasts for m ensemble members
#' @param obs vector of n verifying observations
#' @param ... additional arguments not used in function (for compatibility)
#'
#' @details This function computes the generalized discrimination score for
#' ensemble forecasts with continuous observations as described in Weigel and
#' Mason (2011).
#'
#' @references Weigel, A.P., and S.J. Mason (2011). The Generalized
#' Discrimination Score for Ensemble Forecasts. Monthly Weather Review, 139(9),
#' 3069-3074. doi:10.1175/MWR-D-10-05069.1
#'
#' @examples
#' tm <- toymodel()
#' Ens2AFC(tm$fcst, tm$obs)
#'
#' @seealso \code{\link{veriApply}}
#'
#' @export
Ens2AFC <- function(ens, obs, ...){
return(0.5*(1 + cor(rankEnsCpp(ens), obs, method='kendall', use='p')))
}
#' @rdname Ens2AFC
rank.ensembles <- function (ens) {
nens = dim(ens)[2]
n = dim(ens)[1]
ranks = rep(1, n)
for (i in 2:n) for (j in 1:(i - 1)) {
ens.tmp.event = ens[i, ]
ens.tmp.nonev = ens[j, ]
rank.1 = rank(c(ens.tmp.event, ens.tmp.nonev))[1:nens]
p.afc = (sum(rank.1) - nens * (nens + 1)/2)/nens^2
if (p.afc > 0.5)
ranks[i] = ranks[i] + 1
if (p.afc < 0.5)
ranks[j] = ranks[j] + 1
if (p.afc == 0.5) {
ranks[i] = ranks[i] + 0.5
ranks[j] = ranks[j] + 0.5
}
}
return(ranks)
}
# rank.ens <- function(ens){
# nens <- ncol(ens)
# n <- nrow(ens)
# ens.list <- apply(ens, 1, list)
# U <- array(0.5, rep(n,2))
# U[,] <- (apply(cbind(ens[rep(1:n, n),], ens[rep(1:n, each=n),]), 1, function(x) sum(rank(x)[1:nens])) - nens*(nens + 1)/2)/nens**2
# ranks <- 0.5 + apply(sign(U - 0.5)/2 + 0.5, 2, sum)
# return(ranks)
# }
|
86bb3fe1e3b459c2c318b5b7271f5c3dec80d23b | 46bc9c9270977f361b52fe28a5041b34b962c248 | /app.R | 900cd33b60cfedb2029ed82b86418615eeae82fa | [] | no_license | margitaii/cs_shiny | d32b0ced1837487122dae732c2cf14f1c18fd1cc | f446c27ea356c968d957e5dfcb13023f8d34bc59 | refs/heads/master | 2022-04-14T07:54:09.212991 | 2019-01-22T07:52:33 | 2019-01-22T07:52:33 | 255,046,267 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,127 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# The app visualizes transaction data from
# Erste Personal Accounts API
#
# API info: https://www.ersteapihub.com/docs/apis/bank.csas/v3%2Fnetbanking
#
library(shiny)
library(httr)
library(data.table)
library(lubridate)
library(ggplot2)
library(scales)
library(DT)
library(xml2)
# Credentials and access points
API_key="your-key"
cli_key="sandboxClientId"
cli_secret="sandboxClientSecret"
authorize_url = "https://webapi.ersteapihub.com/api/csas/sandbox/v1/sandbox-idp/token"
access_url = "https://webapi.ersteapihub.com/api/csas/sandbox/v3/netbanking"
# User defined function for extracting transaction list
GET_trans <- function(id, datestart, dateend, headers){
trans <- GET(paste(access_url, '/my/accounts/',id,'/transactions?dateStart=',
datestart,
'&dateEnd=',
dateend,
sep=''),
add_headers(.headers = headers))
trans <- content(trans)$transactions
trans_tbl <- data.table(
ref=as.character(sapply(trans, function(x){x$id})),
amt=sapply(trans, function(x){x$amount$value}),
ccy=sapply(trans, function(x){x$amount$currency}),
c_d=sapply(trans, function(x){x$transactionType}),
dat=ymd_hms(sapply(trans, function(x){x$bookingDate}))
)
return(trans_tbl)
}
datescale <- data.frame(scale=c('year','month','week','day'),
dformat=c('%Y', '%Y / %m', '%Y / %m / %d','%Y / %m / %d'),
stringsAsFactors = FALSE)
# Define UI
ui <- fluidPage(
# Application title
titlePanel("Personal account API CS"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
p('This Shiny application demonstrates the Erste AISP API. It uses the Sandbox environment. For further details and API documentation please refer to '),
a(href='https://www.ersteapihub.com', 'https://www.ersteapihub.com'),
HTML('<br/><br/>'),
actionButton("login", label='Log in via API', icon=icon('refresh')),
HTML('<br/><br/>'),
selectInput("choice", "Select an account:", list('')),
p('Here you can filter transacionts by date.'),
dateRangeInput("dateRange", "Select a period",
start = Sys.Date()-180, end = Sys.Date()),
p('You can aggregate your transaction cash-flows by years, months, weeks or days. It will be displayed on the figure panel.'),
selectInput("scale", "Select the level of aggregation",
list('year', 'month', 'week', 'day'), selected='month')
),
mainPanel(
plotOutput(outputId = 'barPlot'),
dataTableOutput(outputId = 'tbl')
)
)
)
# Define server logic required to visualize transaction data
server <- function(input, output, session) {
observeEvent(input$login, {
# Authorization >> get acces token
auth <- POST(authorize_url,
body=list(
grant_type="authorization_code",
code="test-code",
client_id=cli_key,
client_secret=cli_secret
),
encode='form',
config=list(add_headers("Content-Type" = "application/x-www-form-urlencoded"))
)
token <- content(auth)$access_token
token <- paste('Bearer ',token,sep='')
# Credentials
headers <- c(API_key, token)
names(headers) <- c('WEB-API-key','Authorization')
# Get account list
acc <- GET(paste(access_url, '/my/accounts', sep=''),
add_headers(.headers = headers))
acc <- content(acc)$accounts
acc_list <- data.table(
id=sapply(acc, function(x){x$accountno$'cz-iban'}),
product=sapply(acc, function(x){x$productI18N}),
type=sapply(acc, function(x){x$type}),
subtype=sapply(acc, function(x){x$subtype})
)
acc_list$name <- paste(acc_list$type, acc_list$subtype, sep=' - ')
updateSelectInput(session=session, inputId = "choice", choices = acc_list$name)
trans <- reactive({
GET_trans(acc_list[acc_list$name==input$choice]$id,
input$dateRange[1],
input$dateRange[2],
headers)
})
output$barPlot <- renderPlot(if(input$choice != ""){
t_plot <- trans()
t_plot$dat_floor <- floor_date(t_plot$dat, input$scale)
trans_aggr <- t_plot[, .(amt=sum(amt)), by=.(c_d, dat_floor)]
gp <- ggplot(data=trans_aggr, aes(x=as.Date(dat_floor), y=amt, fill=c_d)) + geom_col(position='dodge', width = 10)
gp <- gp + scale_x_date(labels = date_format(datescale[datescale$scale==input$scale,]$dformat),
breaks=date_breaks(input$scale),
limits= input$dateRange)
gp <- gp + theme(axis.text.x = element_text(angle=90))
gp <- gp + xlab('') + ylab('CZK')
gp
})
output$tbl <- DT::renderDataTable(if(input$choice != ""){
t <- trans()
names(t) <- c('Transaction ID','Amount','CCY','Credit/debit','Booking date')
t
})
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
a4f86da5427a92ef7195b7d49da96f82f540ebaf | f9795034a885336779ae2d9a0eece9224d2f7cb1 | /query/rQuery.R | 31a065eaf42a6a73ee6d13247e797c26ad463955 | [] | no_license | anssonliang/R-EmailingExcelReport | a7e972a5815c638f172a1a0538df390538099fe7 | 19882c050b161cd6cc5bd8e2aa590dab39380236 | refs/heads/master | 2021-01-20T01:12:24.744245 | 2017-04-24T12:25:24 | 2017-04-24T12:25:24 | 89,236,276 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,675 | r | rQuery.R | # replace patterns in SQL script
#substituteFun <- function(sql, st_date = Sys.Date() - 1, end_date = Sys.Date() - 0){ # change date heres
substituteFun <- function(sql, st_date_input, end_date_input){
st_date <- as.numeric(difftime(st_date_input , unixTime, units = "sec"))
end_date <- as.numeric(difftime(end_date_input, unixTime, units = "sec"))
sql <- replace_date (sql, st_date, end_date )
sql <- replace_level(sql, months = months(), days = days() )
print(sql)
sql
}
# run SQL queries and restructure query results
queryKQIs <- function(){
# nested query functions for each sql element
queryResult <- lapply(sql, function(sql) {getIqData(substituteFun(sql(), Sys.Date() - dateBack_Start, Sys.Date() - dateBack_End ))}) # date changed
# restructure the query result to fit excel template
df <- list()
df$KQI <- data.frame(matrix(NA, ncol = 1, nrow = 14))
## KQIs
# WEB
a <- colSums(queryResult$WEB)
df$KQI[1, 1] <- as.numeric( (a['FST_PAGE_ACK_NUM'] / a['FST_PAGE_REQ_NUM']*100))
df$KQI[2, 1] <- as.numeric( a['FST_PAGE_ACK_NUM'])
df$KQI[3, 1] <- as.numeric( a['FST_PAGE_REQ_NUM'])
df$KQI[4, 1] <- as.numeric( a['PAGE_SR_DELAY_MSEL'] / a['FST_PAGE_ACK_NUM'] )
df$KQI[5, 1] <- as.numeric( (a['PAGE_SUCCEED_TIMES'] / a['FST_PAGE_REQ_NUM']*100))
df$KQI[6, 1] <- as.numeric( a['PAGE_SUCCEED_TIMES'])
df$KQI[7, 1] <- as.numeric( a['FST_PAGE_REQ_NUM'])
df$KQI[8, 1] <- as.numeric( a['PAGE_DELAY_MSEL'] / a['FST_PAGE_ACK_NUM'])
# FACEBOOK
df$KQI[9, 1] <- as.numeric(queryResult$FACEBOOK[1, 1])
# INSTAGRAM
df$KQI[10, 1] <- as.numeric(queryResult$INSTAGRAM[1, 1])
# SNAPCHAT
df$KQI[11, 1] <- as.numeric(queryResult$SNAPCHAT[1, 1])
# YOUTUBE
df$KQI[12, 1] <- as.numeric(queryResult$YOUTUBE[1, 1])
# NETFLIX
df$KQI[13, 1] <- as.numeric(queryResult$NETFLIX[1, 1])
# YOUSEE
df$KQI[14, 1] <- as.numeric(queryResult$YOUSEE[1, 1])
# HBO
df$KQI[15, 1] <- as.numeric(queryResult$HBO[1, 1])
# VIAPLAY
df$KQI[16, 1] <- as.numeric(queryResult$VIAPLAY[1, 1])
# TV2
df$KQI[17, 1] <- as.numeric(queryResult$TV2[1, 1])
# DR
df$KQI[18, 1] <- as.numeric(queryResult$DR[1, 1])
# APPSTORE_FILEACCESS
df$KQI[19, 1] <- as.numeric(queryResult$APPSTORE_FILEACCESS[1, 1])
# GOOGLEPLAY_FILEACCESS
df$KQI[20, 1] <- as.numeric(queryResult$GOOGLEPLAY_FILEACCESS[1, 1])
# AMAZONS3_FILEACCESS
df$KQI[21, 1] <- as.numeric(queryResult$AMAZONS3_FILEACCESS[1, 1])
# WINDOWS_FILEACCESS
df$KQI[22, 1] <- as.numeric(queryResult$WINDOWS_FILEACCESS[1, 1])
# APPLEICLOUD_FILEACCESS
df$KQI[23, 1] <- as.numeric(queryResult$APPLEICLOUD_FILEACCESS[1, 1])
# Voice all
a <- colSums(queryResult$VOICE)
df$KQI[24, 1] <- as.numeric((a['MOALERTCOUNT'] / a['MOCCHIREDCOUNT']) * 100)
df$KQI[25, 1] <- as.numeric(a['MOALERTCOUNT'])
df$KQI[26, 1] <- as.numeric(a['MOCCHIREDCOUNT'])
df$KQI[27, 1] <- as.numeric((a['MTALERTCOUNT'] / a['MTSETUPCOUNT']) * 100)
df$KQI[28, 1] <- as.numeric(a['MTALERTCOUNT'])
df$KQI[29, 1] <- as.numeric(a['MTSETUPCOUNT'])
df$KQI[30, 1] <- as.numeric((a['CONACKRADIODROPCOUNT'] / a['CONACKCOUNT']) * 100)
df$KQI[31, 1] <- as.numeric(a['CONACKRADIODROPCOUNT'])
df$KQI[32, 1] <- as.numeric(a['CONACKCOUNT'])
df$KQI[33, 1] <- as.numeric((a['E2EALERTDELAY'] / a['CALLPROCEEDCOUNT'] ) /1000 )
# Retain 3G
df$KQI[34, 1] <- as.numeric(queryResult$Retain3G[1, 1])
# TRAFFIC 2G
df$KQI[35, 1] <- as.numeric(queryResult$TRAFFIC2G[1, 1])
# TRAFFIC 3G
df$KQI[36, 1] <- as.numeric(queryResult$TRAFFIC3G[1, 1])
# DNS
a <- colSums(queryResult$DNS)
df$KQI[37, 1] <- as.numeric( (a['MS_DNS_SUCCEED_TIMES'] / a['MS_DNS_REQ_TIMES']*100))
df$KQI[38, 1] <- as.numeric( a['MS_DNS_SUCCEED_TIMES'])
df$KQI[39, 1] <- as.numeric( a['MS_DNS_REQ_TIMES'])
# TCP
a <- colSums(queryResult$TCP)
df$KQI[40, 1] <- as.numeric( (a['TCPCONNSUCCCOUNT'] / a['TCPCONNCOUNT']*100))
df$KQI[41, 1] <- as.numeric( a['TCPCONNSUCCCOUNT'])
df$KQI[42, 1] <- as.numeric( a['TCPCONNCOUNT'])
# FAILURES
b <- queryResult$FAILURES
df$FAILURE <- data.frame(matrix(NA, ncol = 7, nrow = nrow(b)))
for(i in 1:nrow(b)) {
df$FAILURE[i, 1] <- as.character( b[i,'DATE_TIME'], format = "%Y-%m-%d")
df$FAILURE[i, 2] <- as.character( b[i,'HOST'])
df$FAILURE[i, 3] <- as.character( b[i,'CAUSE_CATEGORY'])
df$FAILURE[i, 4] <- as.character( b[i,'SCENARIO'])
df$FAILURE[i, 5] <- as.character( b[i,'PROTOCOL'])
df$FAILURE[i, 6] <- as.character( b[i,'FAILURE_CAUSE'])
df$FAILURE[i, 7] <- as.numeric( b[i,'FAILURE_TIMES'])
}
# TCP apps
c <- queryResult$TCP_APP
df$TCP_APP <- data.frame(matrix(NA, ncol = 15, nrow = 12))
df$TCP_APP_TRANS<- data.frame(matrix(NA, ncol = 1, nrow = 15*12))
for(i in 1:12) {
df$TCP_APP[i, 1] <- as.numeric( (c[i,'COUNTER_11'] / c[i,'COUNTER_10']*100))
df$TCP_APP[i, 2] <- as.numeric( (c[i,'COUNTER_7'] + c[i,'COUNTER_5'])/1024/1024/1024)
df$TCP_APP[i, 3] <- as.numeric( (c[i,'COUNTER_19'] / c[i,'COUNTER_18']*100))
df$TCP_APP[i, 4] <- as.numeric( (c[i,'COUNTER_14'] / c[i,'COUNTER_13']*100))
df$TCP_APP[i, 5] <- as.numeric( (c[i,'COUNTER_17'] / c[i,'COUNTER_6']*100))
df$TCP_APP[i, 6] <- as.numeric( (c[i,'COUNTER_12'] / c[i,'COUNTER_4']*100))
df$TCP_APP[i, 7] <- as.numeric( c[i,'COUNTER_15'] / c[i,'COUNTER_11'])
df$TCP_APP[i, 8] <- as.numeric( c[i,'COUNTER_2'] / c[i,'COUNTER_20'])
df$TCP_APP[i, 9] <- as.numeric( c[i,'COUNTER_1'] / c[i,'COUNTER_3'])
df$TCP_APP[i, 10] <- as.numeric( (c[i,'COUNTER_9'] / c[i,'COUNTER_18']*100))
df$TCP_APP[i, 11] <- as.numeric( (c[i,'COUNTER_8'] / c[i,'COUNTER_13']*100))
df$TCP_APP[i, 12] <- as.numeric( (c[i,'COUNTER_22'] / c[i,'COUNTER_18']*100))
df$TCP_APP[i, 13] <- as.numeric( (c[i,'COUNTER_21'] / c[i,'COUNTER_13']*100))
df$TCP_APP[i, 14] <- as.numeric( (c[i,'COUNTER_16'] / c[i,'COUNTER_11']*100))
df$TCP_APP[i, 15] <- as.numeric( (c[i,'COUNTER_15'] - c[i,'COUNTER_16']) / c[i,'COUNTER_11'])
}
# Transpose data frame of TCP apps
for(j in 1:15) {
df$TCP_APP_TRANS[1+(j-1)*12,1] <- df$TCP_APP[1,j]
df$TCP_APP_TRANS[2+(j-1)*12,1] <- df$TCP_APP[2,j]
df$TCP_APP_TRANS[3+(j-1)*12,1] <- df$TCP_APP[3,j]
df$TCP_APP_TRANS[4+(j-1)*12,1] <- df$TCP_APP[4,j]
df$TCP_APP_TRANS[5+(j-1)*12,1] <- df$TCP_APP[5,j]
df$TCP_APP_TRANS[6+(j-1)*12,1] <- df$TCP_APP[6,j]
df$TCP_APP_TRANS[7+(j-1)*12,1] <- df$TCP_APP[7,j]
df$TCP_APP_TRANS[8+(j-1)*12,1] <- df$TCP_APP[8,j]
df$TCP_APP_TRANS[9+(j-1)*12,1] <- df$TCP_APP[9,j]
df$TCP_APP_TRANS[10+(j-1)*12,1] <- df$TCP_APP[10,j]
df$TCP_APP_TRANS[11+(j-1)*12,1] <- df$TCP_APP[11,j]
df$TCP_APP_TRANS[12+(j-1)*12,1] <- df$TCP_APP[12,j]
}
## TCP demarcation
# TCP RAT interface
d <- queryResult$TCP_RAT
df$TCP_RAT <- data.frame(matrix(NA, ncol = 15, nrow = 3))
df$TCP_RAT_TRANS<- data.frame(matrix(NA, ncol = 1, nrow = 15*3))
for(i in 1:3) {
df$TCP_RAT[i, 1] <- as.numeric( (d[i,'COUNTER_11'] / d[i,'COUNTER_10']*100))
df$TCP_RAT[i, 2] <- as.numeric( (d[i,'COUNTER_7'] + d[i,'COUNTER_5'])/1024/1024/1024)
df$TCP_RAT[i, 3] <- as.numeric( (d[i,'COUNTER_19'] / d[i,'COUNTER_18']*100))
df$TCP_RAT[i, 4] <- as.numeric( (d[i,'COUNTER_14'] / d[i,'COUNTER_13']*100))
df$TCP_RAT[i, 5] <- as.numeric( (d[i,'COUNTER_17'] / d[i,'COUNTER_6']*100))
df$TCP_RAT[i, 6] <- as.numeric( (d[i,'COUNTER_12'] / d[i,'COUNTER_4']*100))
df$TCP_RAT[i, 7] <- as.numeric( d[i,'COUNTER_15'] / d[i,'COUNTER_11'])
df$TCP_RAT[i, 8] <- as.numeric( d[i,'COUNTER_2'] / d[i,'COUNTER_20'])
df$TCP_RAT[i, 9] <- as.numeric( d[i,'COUNTER_1'] / d[i,'COUNTER_3'])
df$TCP_RAT[i, 10] <- as.numeric( (d[i,'COUNTER_9'] / d[i,'COUNTER_18']*100))
df$TCP_RAT[i, 11] <- as.numeric( (d[i,'COUNTER_8'] / d[i,'COUNTER_13']*100))
df$TCP_RAT[i, 12] <- as.numeric( (d[i,'COUNTER_22'] / d[i,'COUNTER_18']*100))
df$TCP_RAT[i, 13] <- as.numeric( (d[i,'COUNTER_21'] / d[i,'COUNTER_13']*100))
df$TCP_RAT[i, 14] <- as.numeric( (d[i,'COUNTER_16'] / d[i,'COUNTER_11']*100))
df$TCP_RAT[i, 15] <- as.numeric( (d[i,'COUNTER_15'] - d[i,'COUNTER_16']) / d[i,'COUNTER_11'])
}
# Transpose data frame of TCP RAT
for(j in 1:15) {
df$TCP_RAT_TRANS[1+(j-1)*3,1] <- df$TCP_RAT[1,j]
df$TCP_RAT_TRANS[2+(j-1)*3,1] <- df$TCP_RAT[2,j]
df$TCP_RAT_TRANS[3+(j-1)*3,1] <- df$TCP_RAT[3,j]
}
# TCP BSC interface
e <- queryResult$TCP_BSC
df$TCP_BSC <- data.frame(matrix(NA, ncol = 15, nrow = 7))
df$TCP_BSC_TRANS<- data.frame(matrix(NA, ncol = 1, nrow = 15*7))
for(i in 1:7) {
df$TCP_BSC[i, 1] <- as.numeric( (e[i,'COUNTER_11'] / e[i,'COUNTER_10']*100))
df$TCP_BSC[i, 2] <- as.numeric( (e[i,'COUNTER_7'] + e[i,'COUNTER_5'])/1024/1024/1024)
df$TCP_BSC[i, 3] <- as.numeric( (e[i,'COUNTER_19'] / e[i,'COUNTER_18']*100))
df$TCP_BSC[i, 4] <- as.numeric( (e[i,'COUNTER_14'] / e[i,'COUNTER_13']*100))
df$TCP_BSC[i, 5] <- as.numeric( (e[i,'COUNTER_17'] / e[i,'COUNTER_6']*100))
df$TCP_BSC[i, 6] <- as.numeric( (e[i,'COUNTER_12'] / e[i,'COUNTER_4']*100))
df$TCP_BSC[i, 7] <- as.numeric( e[i,'COUNTER_15'] / e[i,'COUNTER_11'])
df$TCP_BSC[i, 8] <- as.numeric( e[i,'COUNTER_2'] / e[i,'COUNTER_20'])
df$TCP_BSC[i, 9] <- as.numeric( e[i,'COUNTER_1'] / e[i,'COUNTER_3'])
df$TCP_BSC[i, 10] <- as.numeric( (e[i,'COUNTER_9'] / e[i,'COUNTER_18']*100))
df$TCP_BSC[i, 11] <- as.numeric( (e[i,'COUNTER_8'] / e[i,'COUNTER_13']*100))
df$TCP_BSC[i, 12] <- as.numeric( (e[i,'COUNTER_22'] / e[i,'COUNTER_18']*100))
df$TCP_BSC[i, 13] <- as.numeric( (e[i,'COUNTER_21'] / e[i,'COUNTER_13']*100))
df$TCP_BSC[i, 14] <- as.numeric( (e[i,'COUNTER_16'] / e[i,'COUNTER_11']*100))
df$TCP_BSC[i, 15] <- as.numeric( (e[i,'COUNTER_15'] - e[i,'COUNTER_16']) / e[i,'COUNTER_11'])
}
# Transpose data frame of TCP BSC
for(j in 1:15) {
df$TCP_BSC_TRANS[1+(j-1)*7,1] <- df$TCP_BSC[1,j]
df$TCP_BSC_TRANS[2+(j-1)*7,1] <- df$TCP_BSC[2,j]
df$TCP_BSC_TRANS[3+(j-1)*7,1] <- df$TCP_BSC[3,j]
df$TCP_BSC_TRANS[4+(j-1)*7,1] <- df$TCP_BSC[4,j]
df$TCP_BSC_TRANS[5+(j-1)*7,1] <- df$TCP_BSC[5,j]
df$TCP_BSC_TRANS[6+(j-1)*7,1] <- df$TCP_BSC[6,j]
df$TCP_BSC_TRANS[7+(j-1)*7,1] <- df$TCP_BSC[7,j]
}
# TCP RNC interface
f <- queryResult$TCP_RNC
df$TCP_RNC <- data.frame(matrix(NA, ncol = 15, nrow = 7))
df$TCP_RNC_TRANS<- data.frame(matrix(NA, ncol = 1, nrow = 15*7))
for(i in 1:7) {
df$TCP_RNC[i, 1] <- as.numeric( (f[i,'COUNTER_11'] / f[i,'COUNTER_10']*100))
df$TCP_RNC[i, 2] <- as.numeric( (f[i,'COUNTER_7'] + f[i,'COUNTER_5'])/1024/1024/1024)
df$TCP_RNC[i, 3] <- as.numeric( (f[i,'COUNTER_19'] / f[i,'COUNTER_18']*100))
df$TCP_RNC[i, 4] <- as.numeric( (f[i,'COUNTER_14'] / f[i,'COUNTER_13']*100))
df$TCP_RNC[i, 5] <- as.numeric( (f[i,'COUNTER_17'] / f[i,'COUNTER_6']*100))
df$TCP_RNC[i, 6] <- as.numeric( (f[i,'COUNTER_12'] / f[i,'COUNTER_4']*100))
df$TCP_RNC[i, 7] <- as.numeric( f[i,'COUNTER_15'] / f[i,'COUNTER_11'])
df$TCP_RNC[i, 8] <- as.numeric( f[i,'COUNTER_2'] / f[i,'COUNTER_20'])
df$TCP_RNC[i, 9] <- as.numeric( f[i,'COUNTER_1'] / f[i,'COUNTER_3'])
df$TCP_RNC[i, 10] <- as.numeric( (f[i,'COUNTER_9'] / f[i,'COUNTER_18']*100))
df$TCP_RNC[i, 11] <- as.numeric( (f[i,'COUNTER_8'] / f[i,'COUNTER_13']*100))
df$TCP_RNC[i, 12] <- as.numeric( (f[i,'COUNTER_22'] / f[i,'COUNTER_18']*100))
df$TCP_RNC[i, 13] <- as.numeric( (f[i,'COUNTER_21'] / f[i,'COUNTER_13']*100))
df$TCP_RNC[i, 14] <- as.numeric( (f[i,'COUNTER_16'] / f[i,'COUNTER_11']*100))
df$TCP_RNC[i, 15] <- as.numeric( (f[i,'COUNTER_15'] - f[i,'COUNTER_16']) / f[i,'COUNTER_11'])
}
# Transpose data frame of TCP RNC
for(j in 1:15) {
df$TCP_RNC_TRANS[1+(j-1)*7,1] <- df$TCP_RNC[1,j]
df$TCP_RNC_TRANS[2+(j-1)*7,1] <- df$TCP_RNC[2,j]
df$TCP_RNC_TRANS[3+(j-1)*7,1] <- df$TCP_RNC[3,j]
df$TCP_RNC_TRANS[4+(j-1)*7,1] <- df$TCP_RNC[4,j]
df$TCP_RNC_TRANS[5+(j-1)*7,1] <- df$TCP_RNC[5,j]
df$TCP_RNC_TRANS[6+(j-1)*7,1] <- df$TCP_RNC[6,j]
df$TCP_RNC_TRANS[7+(j-1)*7,1] <- df$TCP_RNC[7,j]
}
# TCP SGSN interface
g <- queryResult$TCP_SGSN
df$TCP_SGSN <- data.frame(matrix(NA, ncol = 15, nrow = 2))
df$TCP_SGSN_TRANS<- data.frame(matrix(NA, ncol = 1, nrow = 15*2))
for(i in 1:2) {
df$TCP_SGSN[i, 1] <- as.numeric( (g[i,'COUNTER_11'] / g[i,'COUNTER_10']*100))
df$TCP_SGSN[i, 2] <- as.numeric( (g[i,'COUNTER_7'] + g[i,'COUNTER_5'])/1024/1024/1024)
df$TCP_SGSN[i, 3] <- as.numeric( (g[i,'COUNTER_19'] / g[i,'COUNTER_18']*100))
df$TCP_SGSN[i, 4] <- as.numeric( (g[i,'COUNTER_14'] / g[i,'COUNTER_13']*100))
df$TCP_SGSN[i, 5] <- as.numeric( (g[i,'COUNTER_17'] / g[i,'COUNTER_6']*100))
df$TCP_SGSN[i, 6] <- as.numeric( (g[i,'COUNTER_12'] / g[i,'COUNTER_4']*100))
df$TCP_SGSN[i, 7] <- as.numeric( g[i,'COUNTER_15'] / g[i,'COUNTER_11'])
df$TCP_SGSN[i, 8] <- as.numeric( g[i,'COUNTER_2'] / g[i,'COUNTER_20'])
df$TCP_SGSN[i, 9] <- as.numeric( g[i,'COUNTER_1'] / g[i,'COUNTER_3'])
df$TCP_SGSN[i, 10] <- as.numeric( (g[i,'COUNTER_9'] / g[i,'COUNTER_18']*100))
df$TCP_SGSN[i, 11] <- as.numeric( (g[i,'COUNTER_8'] / g[i,'COUNTER_13']*100))
df$TCP_SGSN[i, 12] <- as.numeric( (g[i,'COUNTER_22'] / g[i,'COUNTER_18']*100))
df$TCP_SGSN[i, 13] <- as.numeric( (g[i,'COUNTER_21'] / g[i,'COUNTER_13']*100))
df$TCP_SGSN[i, 14] <- as.numeric( (g[i,'COUNTER_16'] / g[i,'COUNTER_11']*100))
df$TCP_SGSN[i, 15] <- as.numeric( (g[i,'COUNTER_15'] - g[i,'COUNTER_16']) / g[i,'COUNTER_11'])
}
# Transpose data frame of TCP SGSN
for(j in 1:15) {
df$TCP_SGSN_TRANS[1+(j-1)*2,1] <- df$TCP_SGSN[1,j]
df$TCP_SGSN_TRANS[2+(j-1)*2,1] <- df$TCP_SGSN[2,j]
}
# TCP GGSN interface
h <- queryResult$TCP_GGSN
df$TCP_GGSN <- data.frame(matrix(NA, ncol = 15, nrow = 5))
df$TCP_GGSN_TRANS<- data.frame(matrix(NA, ncol = 1, nrow = 15*5))
for(i in 1:5) {
df$TCP_GGSN[i, 1] <- as.numeric( (h[i,'COUNTER_11'] / h[i,'COUNTER_10']*100))
df$TCP_GGSN[i, 2] <- as.numeric( (h[i,'COUNTER_7'] + h[i,'COUNTER_5'])/1024/1024/1024)
df$TCP_GGSN[i, 3] <- as.numeric( (h[i,'COUNTER_19'] / h[i,'COUNTER_18']*100))
df$TCP_GGSN[i, 4] <- as.numeric( (h[i,'COUNTER_14'] / h[i,'COUNTER_13']*100))
df$TCP_GGSN[i, 5] <- as.numeric( (h[i,'COUNTER_17'] / h[i,'COUNTER_6']*100))
df$TCP_GGSN[i, 6] <- as.numeric( (h[i,'COUNTER_12'] / h[i,'COUNTER_4']*100))
df$TCP_GGSN[i, 7] <- as.numeric( h[i,'COUNTER_15'] / h[i,'COUNTER_11'])
df$TCP_GGSN[i, 8] <- as.numeric( h[i,'COUNTER_2'] / h[i,'COUNTER_20'])
df$TCP_GGSN[i, 9] <- as.numeric( h[i,'COUNTER_1'] / h[i,'COUNTER_3'])
df$TCP_GGSN[i, 10] <- as.numeric( (h[i,'COUNTER_9'] / h[i,'COUNTER_18']*100))
df$TCP_GGSN[i, 11] <- as.numeric( (h[i,'COUNTER_8'] / h[i,'COUNTER_13']*100))
df$TCP_GGSN[i, 12] <- as.numeric( (h[i,'COUNTER_22'] / h[i,'COUNTER_18']*100))
df$TCP_GGSN[i, 13] <- as.numeric( (h[i,'COUNTER_21'] / h[i,'COUNTER_13']*100))
df$TCP_GGSN[i, 14] <- as.numeric( (h[i,'COUNTER_16'] / h[i,'COUNTER_11']*100))
df$TCP_GGSN[i, 15] <- as.numeric( (h[i,'COUNTER_15'] - h[i,'COUNTER_16']) / h[i,'COUNTER_11'])
}
# Transpose data frame of TCP GGSN
for(j in 1:15) {
df$TCP_GGSN_TRANS[1+(j-1)*5,1] <- df$TCP_GGSN[1,j]
df$TCP_GGSN_TRANS[2+(j-1)*5,1] <- df$TCP_GGSN[2,j]
df$TCP_GGSN_TRANS[3+(j-1)*5,1] <- df$TCP_GGSN[3,j]
df$TCP_GGSN_TRANS[4+(j-1)*5,1] <- df$TCP_GGSN[4,j]
df$TCP_GGSN_TRANS[5+(j-1)*5,1] <- df$TCP_GGSN[5,j]
}
# TCP SGW interface
k <- queryResult$TCP_SGW
df$TCP_SGW <- data.frame(matrix(NA, ncol = 15, nrow = 5))
df$TCP_SGW_TRANS<- data.frame(matrix(NA, ncol = 1, nrow = 15*5))
for(i in 1:5) {
df$TCP_SGW[i, 1] <- as.numeric( (k[i,'COUNTER_11'] / k[i,'COUNTER_10']*100))
df$TCP_SGW[i, 2] <- as.numeric( (k[i,'COUNTER_7'] + k[i,'COUNTER_5'])/1024/1024/1024)
df$TCP_SGW[i, 3] <- as.numeric( (k[i,'COUNTER_19'] / k[i,'COUNTER_18']*100))
df$TCP_SGW[i, 4] <- as.numeric( (k[i,'COUNTER_14'] / k[i,'COUNTER_13']*100))
df$TCP_SGW[i, 5] <- as.numeric( (k[i,'COUNTER_17'] / k[i,'COUNTER_6']*100))
df$TCP_SGW[i, 6] <- as.numeric( (k[i,'COUNTER_12'] / k[i,'COUNTER_4']*100))
df$TCP_SGW[i, 7] <- as.numeric( k[i,'COUNTER_15'] / k[i,'COUNTER_11'])
df$TCP_SGW[i, 8] <- as.numeric( k[i,'COUNTER_2'] / k[i,'COUNTER_20'])
df$TCP_SGW[i, 9] <- as.numeric( k[i,'COUNTER_1'] / k[i,'COUNTER_3'])
df$TCP_SGW[i, 10] <- as.numeric( (k[i,'COUNTER_9'] / k[i,'COUNTER_18']*100))
df$TCP_SGW[i, 11] <- as.numeric( (k[i,'COUNTER_8'] / k[i,'COUNTER_13']*100))
df$TCP_SGW[i, 12] <- as.numeric( (k[i,'COUNTER_22'] / k[i,'COUNTER_18']*100))
df$TCP_SGW[i, 13] <- as.numeric( (k[i,'COUNTER_21'] / k[i,'COUNTER_13']*100))
df$TCP_SGW[i, 14] <- as.numeric( (k[i,'COUNTER_16'] / k[i,'COUNTER_11']*100))
df$TCP_SGW[i, 15] <- as.numeric( (k[i,'COUNTER_15'] - k[i,'COUNTER_16']) / k[i,'COUNTER_11'])
}
# Transpose data frame of TCP SGW
for(j in 1:15) {
df$TCP_SGW_TRANS[1+(j-1)*5,1] <- df$TCP_SGW[1,j]
df$TCP_SGW_TRANS[2+(j-1)*5,1] <- df$TCP_SGW[2,j]
df$TCP_SGW_TRANS[3+(j-1)*5,1] <- df$TCP_SGW[3,j]
df$TCP_SGW_TRANS[4+(j-1)*5,1] <- df$TCP_SGW[4,j]
df$TCP_SGW_TRANS[5+(j-1)*5,1] <- df$TCP_SGW[5,j]
}
# TCP PGW interface
l <- queryResult$TCP_PGW
df$TCP_PGW <- data.frame(matrix(NA, ncol = 15, nrow = 5))
df$TCP_PGW_TRANS<- data.frame(matrix(NA, ncol = 1, nrow = 15*5))
for(i in 1:5) {
df$TCP_PGW[i, 1] <- as.numeric( (l[i,'COUNTER_11'] / l[i,'COUNTER_10']*100))
df$TCP_PGW[i, 2] <- as.numeric( (l[i,'COUNTER_7'] + l[i,'COUNTER_5'])/1024/1024/1024)
df$TCP_PGW[i, 3] <- as.numeric( (l[i,'COUNTER_19'] / l[i,'COUNTER_18']*100))
df$TCP_PGW[i, 4] <- as.numeric( (l[i,'COUNTER_14'] / l[i,'COUNTER_13']*100))
df$TCP_PGW[i, 5] <- as.numeric( (l[i,'COUNTER_17'] / l[i,'COUNTER_6']*100))
df$TCP_PGW[i, 6] <- as.numeric( (l[i,'COUNTER_12'] / l[i,'COUNTER_4']*100))
df$TCP_PGW[i, 7] <- as.numeric( l[i,'COUNTER_15'] / l[i,'COUNTER_11'])
df$TCP_PGW[i, 8] <- as.numeric( l[i,'COUNTER_2'] / l[i,'COUNTER_20'])
df$TCP_PGW[i, 9] <- as.numeric( l[i,'COUNTER_1'] / l[i,'COUNTER_3'])
df$TCP_PGW[i, 10] <- as.numeric( (l[i,'COUNTER_9'] / l[i,'COUNTER_18']*100))
df$TCP_PGW[i, 11] <- as.numeric( (l[i,'COUNTER_8'] / l[i,'COUNTER_13']*100))
df$TCP_PGW[i, 12] <- as.numeric( (l[i,'COUNTER_22'] / l[i,'COUNTER_18']*100))
df$TCP_PGW[i, 13] <- as.numeric( (l[i,'COUNTER_21'] / l[i,'COUNTER_13']*100))
df$TCP_PGW[i, 14] <- as.numeric( (l[i,'COUNTER_16'] / l[i,'COUNTER_11']*100))
df$TCP_PGW[i, 15] <- as.numeric( (l[i,'COUNTER_15'] - l[i,'COUNTER_16']) / l[i,'COUNTER_11'])
}
# Transpose data frame of TCP PGW
for(j in 1:15) {
df$TCP_PGW_TRANS[1+(j-1)*5,1] <- df$TCP_PGW[1,j]
df$TCP_PGW_TRANS[2+(j-1)*5,1] <- df$TCP_PGW[2,j]
df$TCP_PGW_TRANS[3+(j-1)*5,1] <- df$TCP_PGW[3,j]
df$TCP_PGW_TRANS[4+(j-1)*5,1] <- df$TCP_PGW[4,j]
df$TCP_PGW_TRANS[5+(j-1)*5,1] <- df$TCP_PGW[5,j]
}
print(df)
df
} |
6d9de35ad7b2fcaa7fe5ac66a6f75d36273f041e | d79928950b55a5fd9f291bc9429e1e5b3a12a199 | /New Zealand early projects/Neural Nets and decision trees/distplots.r | f0a1f36e4fa0ba9faae286b7a6f5b23222e59c94 | [] | no_license | Przemek-Win/privateProjects | 0b09456b499e9d1fb2ad5f23861bbed8e5761f19 | b3eac22dfe075ba5a6d6cc76a839ace5a880de53 | refs/heads/master | 2022-12-13T14:43:26.707582 | 2018-08-15T21:28:07 | 2018-08-15T21:28:07 | 144,473,576 | 0 | 0 | null | 2022-12-08T02:22:20 | 2018-08-12T14:28:35 | Python | UTF-8 | R | false | false | 3,412 | r | distplots.r | ##############################################################################################
# Example to produce plot of:
# Relative distance in Feature Space (x) versus Relative Distance in Response space (y)
##############################################################################################
#
# Assume we are given a table with a single response variable
# Want to make a table with the (x) and (y) values as 2 columns
#
# IN: d - the dataframe or matrix
# response.var - the number of the column used as the response variable. Defaults to last column
# OP: Calculates the normalised distance between each pair of data items in explanatory space
# and the distance between their response variables.
# OUT: Data frame with 2 columns, the distance in feature space (x), and distance in response space (y)
########################################################################################################
dist.table <- function(d, response.var = ncol(d),...)
{
d <- scale(d) # scale data
d.dist <- dist(d[,-response.var]) # distance all X values
d.resp <- dist(d[,response.var])
d.dist <- (d.dist-min(d.dist))/(max(d.dist)-min(d.dist))
d.resp <- (d.resp-min(d.resp))/(max(d.resp)-min(d.resp))
data.frame(cbind(d.dist,d.resp))
}
#
# Example with simple linear response and no noise
#
X1 <- runif(100)
X2 <- runif(100)
Y <- runif(100)
#
ex1 <- data.frame(cbind(X1,X2,Y))
d <- dist.table(ex1, response.var = 3)
plot(x=d$d.dist, y = d$d.resp,xlab="Normalised Distance in Feature Space",
ylab="Normalised Distance in Response Space",cex=0.5)
#Part 3
setwd("C:/Users/Przemek/Documents/INFO324/assignment2")
library(MASS)
data(Boston)
head(Nijak)
summary(Nijak)
sapply(Nijak,class)
d <- dist.table(Boston, response.var = 14)
plot(x=d$d.dist, y = d$d.resp,xlab="Normalised Distance in Feature Space",
ylab="Normalised Distance in Response Space",cex=0.5, main="Boston distance plot")
abline(0,1, col=2, lwd=3)
Nijak<-as.data.frame(read.table("bioavailability.txt"),row.names = NULL)
summary(Nijak)
c<-dist.table(Nijak, response.var = length(names(Nijak)))
plot(x=c$d.dist, y = c$d.resp,xlab="Normalised Distance in Feature Space",
ylab="Normalised Distance in Response Space",cex=0.5, main="Bioavailability distance plot")
abline(0,1, col=2, lwd=3)
Question 4.1
library(neuralnet)
ydata <- as.data.frame(cbind(y,x1,x2))
colnames(ydata) <- c("y","x1","x2")
ydata.copy <- ydata # Keep a copy before we scale
Boston.copy <- scale(Boston)
scale.scale <- attr(Boston.copy,"scaled:scale")
scale.center <- attr(Boston.copy,"scaled:center")
ydata <- data.frame(ydata) # So we can use the column names
ydata <- ydata[order(ydata$y),]
head(Boston.copy)
net.y <- neuralnet(medv ~ lstat + black+ptratio+tax+rad+dis+age+rm+nox+chas+indus+zn+crim,data = Boston.copy,hidden=2)
net.y
net.pred <- compute(net.y, ydata[,2:3])
net.pred2 <- compute(net.y, Boston.copy)
# BUT THIS IS WITH THE SCALED DATA -- we need to convert back if we want to compare
# with the original values.
#
# Here we call the un.scale.data function with the result values and the two scaling
# properties we saved earlier. Note that it has 3 values since there were 3 variables.
# Because of this, the result has 3 columns, but it is just the y column that we want.
#
# Plot the original data in order
#
lines(net.pred.unscaled[,1],col='red')
#
# |
fa4c2d22531423f7382a0be0b34a27240feca7c6 | ce8bb19d9ff723dcff105626d5a0eda60b2dd55b | /model_simple.R | af1a427451e4e2180ff75e82f46f54b6393eb495 | [] | no_license | hdshea/R4DS_Exercises | 0ddb96f0e1c845034d0e6a40c8435af845610022 | ae850f3086cd47f90dda092f07194d1bbcf2aa66 | refs/heads/main | 2023-04-09T09:23:42.174419 | 2021-04-01T13:14:01 | 2021-04-01T13:14:01 | 330,763,572 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,903 | r | model_simple.R | #' ---
#' title: "R4DS Model Section: Model Basics Chapter"
#' author: "H. David Shea"
#' date: "28 January 2021"
#' output: github_document
#' ---
#'
#+ r setup, include = FALSE
library(tidyverse)
library(modelr)
options(na.action = na.warn)
#+
#' ## 23.2 a simple model
ggplot(sim1, aes(x, y)) +
geom_point()
models <- tibble(
a1 = runif(250, -20, 40),
a2 = runif(250, -5, 5)
)
ggplot(sim1, aes(x, y)) +
geom_abline(aes(intercept = a1, slope = a2), data = models, alpha = 1/4) +
geom_point()
model1 <- function(a, data) {
a[1] + data$x * a[2]
}
model1(c(7, 1.5), sim1)
measure_distance <- function(mod, data) {
diff <- data$y - model1(mod, data)
sqrt(mean(diff ^ 2))
}
measure_distance(c(7, 1.5), sim1)
sim1_dist <- function(a1, a2) {
measure_distance(c(a1, a2), sim1)
}
models <- models %>%
mutate(dist = purrr::map2_dbl(a1, a2, sim1_dist))
models
ggplot(sim1, aes(x, y)) +
geom_point(size = 2, color = "grey30") +
geom_abline(
aes(intercept = a1, slope = a2, color = -dist),
data = filter(models, rank(dist) <= 10)
)
filter(models, rank(dist) <= 10)
ggplot(models, aes(a1, a2)) +
geom_point(data = filter(models, rank(dist) <= 10), size = 4, color = "red") +
geom_point(aes(color = -dist))
grid <- expand.grid(
a1 = seq(-5, 20, length = 25),
a2 = seq(1, 3, length = 25)
) %>%
mutate(dist = purrr::map2_dbl(a1, a2, sim1_dist))
grid %>%
ggplot(aes(a1, a2)) +
geom_point(data = filter(grid, rank(dist) <= 10), size = 4, color = "red") +
geom_point(aes(color = -dist))
ggplot(sim1, aes(x, y)) +
geom_point(size = 2, color = "grey30") +
geom_abline(
aes(intercept = a1, slope = a2, color = -dist),
data = filter(grid, rank(dist) <= 10)
)
best <- optim(c(0, 0), measure_distance, data = sim1)
best$par
ggplot(sim1, aes(x, y)) +
geom_point(size = 2, color = "grey30") +
geom_abline(intercept = best$par[1], slope = best$par[2])
sim1_mod <- lm(y ~ x, data = sim1)
coef(sim1_mod)
#' ### 23.2 Exercises
#'
#' One downside of the linear model is that it is sensitive to unusual values because
#' the distance incorporates a squared term. Fit a linear model to the simulated data
#' below, and visualise the results. Rerun a few times to generate different simulated
#' datasets. What do you notice about the model?
#'
sim1a <- tibble(
x = rep(1:10, each = 3),
y = x * 1.5 + 6 + rt(length(x), df = 2)
)
sim1a_mod <- lm(y ~ x, data = sim1a)
coef(sim1a_mod)
ggplot(sim1a, aes(x, y)) +
geom_point(size = 2, color = "grey30") +
geom_abline(intercept = coef(sim1a_mod)[1], slope = coef(sim1a_mod)[2])
#' One way to make linear models more robust is to use a different distance
#' measure. For example, instead of root-mean-squared distance, you could use
#' mean-absolute distance:
#'
measure_distance2 <- function(mod, data) {
diff <- data$y - model1(mod, data)
mean(abs(diff))
}
sim1a <- tibble(
x = rep(1:10, each = 3),
y = x * 1.5 + 6 + rt(length(x), df = 2)
)
sim1a_mod <- lm(y ~ x, data = sim1a)
coef(sim1a_mod)
best <- optim(c(0, 0), measure_distance2, data = sim1a)
best$par
ggplot(sim1a, aes(x, y)) +
geom_point(size = 2, color = "grey30") +
geom_abline(intercept = coef(sim1a_mod)[1], slope = coef(sim1a_mod)[2]) +
geom_abline(intercept = best$par[1], slope = best$par[2])
#' ## 23.3 visualizing models
#'
#' It’s also useful to see what the model doesn’t capture, the so-called residuals which
#' are left after subtracting the predictions from the data. Residuals are powerful
#' because they allow us to use models to remove striking patterns so we can study the
#' subtler trends that remain.
#'
grid <- sim1 %>%
data_grid(x)
grid
sim1_mod <- lm(y ~ x, data = sim1)
grid <- grid %>%
add_predictions(sim1_mod)
grid
ggplot(sim1, aes(x)) +
geom_point(aes(y = y)) +
geom_line(aes(y = pred), data = grid, color = "red", size = 1)
sim1 <- sim1 %>%
add_residuals(sim1_mod)
sim1
#' freqpoly of residuals from lm(y ~ x)
ggplot(sim1, aes(resid)) +
geom_freqpoly(binwidth = 0.5)
#' plot of residuals from lm(y ~x)
ggplot(sim1, aes(x, resid)) +
geom_ref_line(h = 0) +
geom_point()
#' ### 23.3 Exercises
#'
#' Instead of using lm() to fit a straight line, you can use loess() to fit a smooth
#' curve. Repeat the process of model fitting, grid generation, predictions, and
#' visualisation on sim1 using loess() instead of lm(). How does the result compare
#' to geom_smooth()?
#'
grid_ls <- sim1 %>%
data_grid(x)
grid_ls
sim1_mod_ls <- loess(y ~ x, data = sim1)
grid_ls <- grid_ls %>%
add_predictions(sim1_mod_ls)
grid_ls
ggplot(sim1, aes(x)) +
geom_point(aes(y = y)) +
geom_smooth(aes(y = y), size = 3) +
geom_line(aes(y = pred), data = grid, color = "red", size = 1) +
geom_line(aes(y = pred), data = grid_ls, color = "green", size = 1)
#'Why might you want to look at a frequency polygon of absolute residuals?
#'What are the pros and cons compared to looking at the raw residuals?
#'
#' freqpoly of residuals from lm(y ~ x)
ggplot(sim1) +
geom_freqpoly(aes(resid), binwidth = 0.5) +
geom_freqpoly(aes(abs(resid)), binwidth = 0.5, color = "red")
#' ## 23.4 formulas and model families
#'
#' categorical variables
#'
ggplot(sim2) +
geom_point(aes(x, y))
mod2 <- lm(y ~ x, data = sim2)
grid <- sim2 %>%
data_grid(x) %>%
add_predictions(mod2)
grid
#' Effectively, a model with a categorical x will predict the mean value for each
#' category. (Why? Because the mean minimises the root-mean-squared distance.)
ggplot(sim2, aes(x)) +
geom_point(aes(y = y)) +
geom_point(data = grid, aes(y = pred), color = "red", size = 4)
#' interactions (continuous and categorical)
#'
ggplot(sim3, aes(x1, y)) +
geom_point(aes(color = x2))
#' note '+' in mod1 and '*' in mod2
mod1 <- lm(y ~ x1 + x2, data = sim3)
mod2 <- lm(y ~ x1 * x2, data = sim3)
grid <- sim3 %>%
data_grid(x1, x2) %>%
gather_predictions(mod1, mod2)
grid
ggplot(sim3, aes(x1, y, color = x2)) +
geom_point() +
geom_line(data = grid, aes(y = pred)) +
facet_wrap(~ model)
sim3 <- sim3 %>%
gather_residuals(mod1, mod2)
ggplot(sim3, aes(x1, resid, color = x2)) +
geom_point() +
facet_grid(model ~ x2)
#' There is little obvious pattern in the residuals for mod2. The residuals for mod1 show
#' that the model has clearly missed some pattern in b, and less so, but still present is
#' pattern in c, and d.
#' interactions (two continuous)
#'
mod1 <- lm(y ~ x1 + x2, data = sim4)
mod2 <- lm(y ~ x1 * x2, data = sim4)
grid <- sim4 %>%
data_grid(
x1 = seq_range(x1, 5),
x2 = seq_range(x2, 5)
) %>%
gather_predictions(mod1, mod2)
grid
#' Next let’s try and visualise that model. We have two continuous predictors, so you
#' can imagine the model like a 3d surface. We could display that using geom_tile():
ggplot(grid, aes(x1, x2)) +
geom_tile(aes(fill = pred)) +
facet_wrap(~ model)
#' That doesn’t suggest that the models are very different! But that’s partly an illusion:
#' our eyes and brains are not very good at accurately comparing shades of color.
#' Instead of looking at the surface from the top, we could look at it from either side,
#' showing multiple slices:
ggplot(grid, aes(x1, pred, color = x2, group = x2)) +
geom_line() +
facet_wrap(~ model)
ggplot(grid, aes(x2, pred, color = x1, group = x1)) +
geom_line() +
facet_wrap(~ model)
#' residuals
sim4 <- sim4 %>%
gather_residuals(mod1, mod2)
ggplot(sim4, aes(x1, resid, color = x2)) +
geom_point() +
facet_grid(model ~ x2)
#' transformations
#'
df <- tribble(
~y, ~x,
1, 1,
2, 2,
3, 3
)
library(splines)
model_matrix(df, y ~ ns(x, 2)) # ns is natural spline function, second arg is degrees of freedom
sim5 <- tibble(
x = seq(0, 3.5 * pi, length = 50),
y = 4 * sin(x) + rnorm(length(x))
)
ggplot(sim5, aes(x, y)) +
geom_point()
mod1 <- lm(y ~ ns(x, 1), data = sim5)
mod2 <- lm(y ~ ns(x, 2), data = sim5)
mod3 <- lm(y ~ ns(x, 3), data = sim5)
mod4 <- lm(y ~ ns(x, 4), data = sim5)
mod5 <- lm(y ~ ns(x, 5), data = sim5)
grid <- sim5 %>%
data_grid(x = seq_range(x, n = 50, expand = 0.1)) %>%
gather_predictions(mod1, mod2, mod3, mod4, mod5, .pred = "y")
ggplot(sim5, aes(x, y)) +
geom_point() +
geom_line(data = grid, color = "red") +
facet_wrap(~ model)
#' Notice that the extrapolation outside the range of the data is clearly bad. This is the
#' downside to approximating a function with a polynomial. But this is a very real
#' problem with every model: the model can never tell you if the behaviour is true when
#' you start extrapolating outside the range of the data that you have seen. You must
#' rely on _theory_ and **science**. (Emphasis added by _president_ Joe Biden.)
#'
#' ### 23.4 Exercises
#'
#' What happens if you repeat the analysis of sim2 using a model without an
#' intercept. What happens to the model equation? What happens to the
#' predictions?
#'
ggplot(sim2) +
geom_point(aes(x, y))
mod2 <- lm(y ~ x, data = sim2)
mod2_no_int <- lm(y ~ x - 1, data = sim2)
grid <- sim2 %>%
data_grid(x) %>%
gather_predictions(mod2,mod2_no_int)
grid
#' exactly the same
#'
#' Use model_matrix() to explore the equations generated for the models I fit to
#' sim3 and sim4. Why is * a good shorthand for interaction?
#'
model_matrix(y ~ x1 * x2, data = sim3)
model_matrix(y ~ x1 * x2, data = sim4)
#' For sim4, which of mod1 and mod2 is better? I think mod2 does a slightly better
#' job at removing patterns, but it’s pretty subtle. Can you come up with a plot
#' to support my claim?
#'
mod1 <- lm(y ~ x1 + x2, data = sim4)
mod2 <- lm(y ~ x1 * x2, data = sim4)
#' predictions
grid <- sim4 %>%
data_grid(
x1 = seq_range(x1, 5),
x2 = seq_range(x2, 5)
) %>%
gather_predictions(mod1, mod2)
grid
#' residuals
sim4 <- sim4 %>%
gather_residuals(mod1, mod2)
sim4
#' nothing really jumps out in residual plots
#'
#' raw
ggplot(sim4, aes(x1, resid, color = x2)) +
geom_ref_line(h = 0) +
geom_point() +
facet_grid(model ~ x2)
#' absolute
ggplot(sim4, aes(x1, abs(resid), color = x2)) +
geom_ref_line(h = 0) +
geom_point() +
facet_grid(model ~ x2)
#' nothing really jumps out in frequency residual plots
#'
#' raw
ggplot(sim4, aes(resid, color = model)) +
geom_freqpoly(binwidth = 0.5)
#' absolute
ggplot(sim4, aes(abs(resid), color = model)) +
geom_freqpoly(binwidth = 0.5)
#' very slight variation in residual standard deviations - a _tad_ more in the mod1 tails???
sim4 %>%
group_by(model) %>%
summarise(
resid_mn = round(mean(resid),3),
resid_sd = round(sd(resid),3)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.