blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
df28609119f243dd0a96cbee3e3fc0a4232142a5 | 0aaecd6991a7f16759a1f8d2b3be6093f8a183af | /inst/snippet/ticket01.R | 3bd5076703683885fd7a0a009103872a2cede276 | [] | no_license | cran/fastR | 3f0e3959dad4e5d361c341eb6bea670eab6bfdcc | 572a5dc31e5aa85af4126662f95268329179c87b | refs/heads/master | 2021-01-21T04:55:14.927487 | 2017-07-27T19:52:06 | 2017-07-27T19:52:06 | 17,695,981 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 65 | r | ticket01.R | d <- cbind(c(9,13),c(14,9)); d
fisher.test(d)
phyper(9,23,22,22)
|
5532ac81d83bd587e2553b001a1303e1b8ba3565 | e97f66fdbac0e27d8ce5107b903079c666eb7247 | /man/doTrainPredict.Rd | 1930674c48645935bfbbd52516b73f279128d2df | [] | no_license | philippstats/mlr | b9f0c0042148d3305bce4a017a97cb48d3d2e308 | eb4188d8c7f8157dd93a00a9be6b3b0063dfb973 | refs/heads/master | 2021-01-12T13:54:27.124915 | 2016-06-01T10:01:59 | 2016-06-01T10:01:59 | 54,952,701 | 0 | 0 | null | 2016-03-29T06:49:24 | 2016-03-29T06:49:23 | null | UTF-8 | R | false | true | 346 | rd | doTrainPredict.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StackedLearner_helpers.R
\name{doTrainPredict}
\alias{doTrainPredict}
\title{Training and prediction in one function (used for parallelMap)}
\usage{
doTrainPredict(bls, task, show.info)
}
\description{
Training and prediction in one function (used for parallelMap)
}
|
74a8c6978adc63f9ca3683404e167f92c2770aa3 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /timma/man/drugRank.Rd | 097fc24f7c4dccc2ffe85b05bf1d1f6c1c9615cc | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,916 | rd | drugRank.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/drugRank.R
\name{drugRank}
\alias{drugRank}
\title{Generate the list of ranked drug combinations}
\usage{
drugRank(profile_select, predicted_matrix, sens)
}
\arguments{
\item{profile_select}{the selected drug-target interaction data}
\item{predicted_matrix}{the predicted efficacy matrix}
\item{sens}{the drug sensitivity vector.}
}
\value{
a matrix contains the information about the list of drug combinations ranked by their synergy scores.
}
\description{
A function to provide a list of drug combinations ranked by their synergy scores
}
\examples{
\dontrun{
data(tyner_interaction_binary)
data(tyner_sensitivity)
float<-sffsBinary(tyner_interaction_binary, tyner_sensitivity[, 1], max_k = 8)
k_select<-float$k_sel
x<-data.frame(tyner_interaction_binary)
kinase_names <- dimnames(x)[[2]]
select_kinase_names <- findSameSet(x, k_select, kinase_names)
gc_timma <- graycode3(length(k_select))
gc_names <- graycodeNames(length(k_select), select_kinase_names, gc_timma$gc_row, gc_timma$gc_col)
nr <- gc_names$nr
nc <- t(gc_names$nc)
timma_row <- nrow(nr) + nrow(nc)
timma_col <- ncol(nr) + ncol(nc)
timma <- array("", dim = c(timma_row, timma_col))
timma[(nrow(nc) + 1):timma_row, 1:ncol(nr)] <- nr
timma[1:nrow(nc), (ncol(nr) + 1):timma_col] <- nc
timma[(nrow(nc) + 1):timma_row, (ncol(nr) + 1):timma_col] <- float$timma$dummy
profile_select<-data.frame(tyner_interaction_binary)[, k_select]
drug_combo_rank<-drugRank(profile_select, timma, tyner_sensitivity[, 1])
}
}
\author{
Jing Tang \email{jing.tang@helsinki.fi}
}
\references{
Tang J, Karhinen L, Xu T, Szwajda A, Yadav B, Wennerberg K, Aittokallio T.
Target inhibition networks: predicting selective combinations of druggable targets to block cancer
survival pathways. PLOS Computational Biology 2013; 9: e1003226.
}
|
9bc4c54f9c57c4547fadc7afecf2613f2b6e1be8 | 16913780a9dd6bcf426c739a04888aed93f1f00f | /figure/Plot4.R | daec084727968ea982592fd11e04c4415a4d48e9 | [] | no_license | maulik5781/ExData_Plotting1 | edb4a4e4e55b96e96a66a6c6851cb06f6bcc2769 | 8a39b43d8c9ec5abe362fe23e49cfffb9f328b75 | refs/heads/master | 2021-01-15T22:33:25.096720 | 2015-02-08T20:49:06 | 2015-02-08T20:49:06 | 30,473,408 | 0 | 0 | null | 2015-02-07T22:57:17 | 2015-02-07T22:57:17 | null | UTF-8 | R | false | false | 1,196 | r | Plot4.R | # read the table with ? as na string so we can filter later
df <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# remove ? (NA)
df <-na.omit(df)
# filter on interested dates Note the format is D/M/Y
df <- df[df$Date %in% c("1/2/2007", "2/2/2007"),]
# combine Date and Time so you can use in plot later as X axis
df$Date <- strptime(paste(df$Date,df$Time), "%d/%m/%Y %H:%M:%S")
par(mfrow = c(2,2))
with(df, plot(Date, Global_active_power, type="l",xlab="datetime", ylab="Global Power (killowatts)"))
plot(df$Date, df$Voltage, xlab = "datetime", ylab = "Voltage", type = "l")
plot(df$Date, df$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "datetime")
lines(df$Date, df$Sub_metering_2, type = "l", col = "red")
lines(df$Date, df$Sub_metering_3, type = "l", col = "blue")
legend("topright", lwd = .75, cex = .75, col = c("black", "blue", "red"),legend = c("sub_metering_1", "sub_metering_2", "sub_metering_3"))
plot(df$Date, df$Global_reactive_power, xlab = "datetime", ylab = "Global Reactive Power", type = "l")
dev.copy(png, file = "Plot4.png") ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device! |
f3706fc76313fd7c730d6e46a1f51b0d07ba7758 | a0d46cf44c8c6d054896f559400487c6aba85739 | /data-raw/table_data.R | 6d365f57e21af6cb2cd555e6b969844358b6112b | [
"MIT"
] | permissive | RichardMeyer-Eppler/studentenstatistikNRW | 20aec2ebe6c309d7744d067f19256c87584cb4a6 | cd9a08533148493bbf25522ff7eebf7d2eb70be1 | refs/heads/main | 2023-06-17T08:06:08.645864 | 2021-07-18T19:18:51 | 2021-07-18T19:18:51 | 384,389,222 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,395 | r | table_data.R | ################################################################################
# The code here is used to download the data for all tables in `tables`
# Retrieval of raw data needs to proceed in this order:
# 1. tables.R
# 2. metadata.R
# 3. value_labels.R
# 4. table_data.R
################################################################################
# list_df <- purrr::map(
# studentenstatistikNRW::tables[["tablename"]],
# wiesbaden::retrieve_data,
# genesis = c(
# db = "nrw"
# ),
# .id = "tablename"
# )
# save(
# list_df,
# file = here::here("list_df.rda"),
# version = 3
# )
devtools::load_all()
load(
file = "D:\\Git\\list_df.rda"
)
list_df_wrangled <- list_df %>%
# The table codes in the data source start with numbers, so the prefix "df_" is added for convenience when working in R
setNames(
nm = paste0(
# "df_",
studentenstatistikNRW::tables[["tablename"]]
)
) %>%
# Cleans column names, removes unwanted columns and turns data frame into a tibble.
purrr::map(
studentenstatistikNRW::clean_df
) %>%
# Sorts table by all character columns
purrr::map(
dplyr::arrange,
dplyr::across(
where(
is.character
)
)
) %>%
purrr::map2(
studentenstatistikNRW::tables[["tablename"]],
studentenstatistikNRW::label_variables
) %>%
purrr::map2(
studentenstatistikNRW::tables[["tablename"]],
studentenstatistikNRW::label_values
) %>%
setNames(
nm = paste0(
"df_",
studentenstatistikNRW::tables[["tablename"]]
)
)
# head(list_df_wrangled[[1]])
# dplyr::glimpse(list_df_wrangled[[1]])
# labelled::var_label(list_df_wrangled[[1]])
# labelled::generate_dictionary(list_df_wrangled[[1]])
# list_df_wrangled <- list_df %>%
# # The table codes in the data source start with numbers, so the prefix "df_" is added for convenience when working in R
# setNames(
# nm = paste0(
# "df_",
# studentenstatistikNRW::tables[["tablename"]]
# )
# ) %>%
# # Cleans column names, removes unwanted columns and turns data frame into a tibble.
# purrr::map(
# studentenstatistikNRW::clean_df
# ) %>%
# # Turns character vector into a factor. The labels for the levels are retrieved by joining `value_labels`
# purrr::map(
# studentenstatistikNRW::create_factors
# ) %>%
# # Sorts table by all factors.
# purrr::map(
# dplyr::arrange,
# dplyr::across(
# where(
# is.factor
# )
# )
# )
# Create rda files for each data frame
# See https://stackoverflow.com/questions/21809055/save-elements-of-a-list-to-rda-file-inside-a-function for as.environment
purrr::pwalk(
list(
list = names(
list_df_wrangled
),
file = here::here(
"data",
paste0(
# "df_",
names(
list_df_wrangled
),
".rda"
)
)
),
save,
version = 3,
envir = as.environment(
list_df_wrangled
)
)
# Create table documentation for every data frame
table_documentation <- purrr::map(
paste0(
"df_",
studentenstatistikNRW::tables[["tablename"]]
),
studentenstatistikNRW::document_table
)
# Write table documentation to R files
purrr::walk2(
table_documentation,
here::here(
"R",
paste0(
"df_",
studentenstatistikNRW::tables[["tablename"]],
".R"
)
),
writeLines,
useBytes = TRUE
)
|
7757ce71465419902e0b1ccd5f02dc56a385fe12 | c60126c5e91b1c6dced262d8026d9da4af9e18f6 | /man/get_path.Rd | 154f753be7c7a80a1522a877575dafdbd42d9a1b | [
"MIT"
] | permissive | davidallen02/pamngr | 3a36dd78903121c426b216fc96ed3e699ea4ac74 | cf89491132ca73c84d8b33ae6ac309303254b5af | refs/heads/master | 2023-07-15T06:14:23.118482 | 2021-09-01T21:34:02 | 2021-09-01T21:34:02 | 237,082,991 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 325 | rd | get_path.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-path.R
\name{get_path}
\alias{get_path}
\title{Get path to PAM Research Dropbox path}
\usage{
get_path()
}
\value{
A character string to the PAM Research Dropbox path specific to machine
}
\description{
Get path to PAM Research Dropbox path
}
|
9b3f7c4ef745178293370123deafc65a144ba962 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BaPreStoPro/examples/predict-est.hiddenDiffusion-method.Rd.R | 25d3c5975dfad9da2d290f2f1d5317095cf68143 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 801 | r | predict-est.hiddenDiffusion-method.Rd.R | library(BaPreStoPro)
### Name: predict,est.hiddenDiffusion-method
### Title: Prediction for a hidden diffusion process
### Aliases: predict,est.hiddenDiffusion-method
### ** Examples
## Not run:
##D model <- set.to.class("hiddenDiffusion", parameter = list(phi = 5, gamma2 = 1, sigma2 = 0.1))
##D t <- seq(0, 1, by = 0.01)
##D data <- simulate(model, t = t)
##D est_hiddiff <- estimate(model, t, data$Z, 100) # nMCMC should be much larger!
##D plot(est_hiddiff)
##D
##D pred_hiddiff <- predict(est_hiddiff, t = seq(0, 1, by = 0.1))
##D pred_hiddiff2 <- predict(est_hiddiff, which.series = "current")
##D
##D pred_hiddiff <- predict(est_hiddiff, pred.alg = "simpleTrajectory", sample.length = 100)
##D pred_hiddiff <- predict(est_hiddiff, pred.alg = "simpleBayesTrajectory")
## End(Not run)
|
9f7819933cf385ab6fd02b8c8db15ca019d7a558 | 3791bce46d95025c6b487c4c9ce41a5dab83d321 | /Scripts/Plot5.R | 3d1bd0888e5acdcadd04125d1b1f5be5f061697a | [] | no_license | scontador/Exploratory_Data_Analysis_Project | 67861c04a245e5ecf82c1278d483b84281d313d0 | eddb1dcbcbfbe3a26c02f85b6587c42e844d12a6 | refs/heads/master | 2020-03-23T05:55:56.834150 | 2017-03-03T21:13:06 | 2017-03-03T21:13:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,321 | r | Plot5.R |
###############################################################################
# Author: Sergio Contador
# Date: March 2017
# Title: Exploraty Data Analisys Course: Project 2
# Description:
## make a plot to answer next question: How have emissions from motor
## vehicle sources changed from 1999 to 2008 in Baltimore City?
###############################################################################
# Plot 5
# Load Data
dir.principal <- paste(getwd(), "/R/Programas/Data_Scientist/EDA/Project_2", sep = "")
# NEI Data
dir <- paste(dir.principal, "/Data/summarySCC_PM25.rds", sep = "")
NEI <- readRDS(dir)
# SCC Data
dir <- paste(dir.principal, "/Data/Source_Classification_Code.rds", sep = "")
SCC <- readRDS(dir)
# Viewing Data
names(NEI)
summary(NEI)
# View(NEI)
names(SCC)
summary(SCC)
# View(SCC)
# Subset Data
NEI2 <- NEI[NEI$fips == "24510" & NEI$type == "ON-ROAD", ]
NEI2 <- aggregate(Emissions ~ year, NEI2, sum)
# Plot5
dir <- paste(dir.principal, "/Plots/Plot5.png", sep = "")
png(filename = dir, width = 480, height = 480)
g <- ggplot(NEI2, aes(factor(year), Emissions))
g + geom_bar(stat = "identity") +
xlab("Year") + ylab(expression(paste("Log of PM"[2.5], " Emissions"))) +
ggtitle("Total Emissions of Motor Vehicle Sources in Baltimore City, Maryland")
dev.off()
|
b9e752e256dbaa1205856746ac1212eba120ac37 | 317620df37b13ac3b23aebe051c81d0edc9689d1 | /src/predict_different_mouse_models.R | d0bd1fb312e3002c4f29c323be44486bec822f72 | [] | no_license | andreaskapou/ames-mouse | 121b8f94e502311018d32411b3817f396c140c05 | 1eabe6a9a093b16d090ca1c058e4fce3d8791b9d | refs/heads/master | 2020-04-07T07:23:15.964213 | 2016-07-01T12:20:17 | 2016-07-01T12:20:17 | 54,209,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,236 | r | predict_different_mouse_models.R | # ------------------------------------------
# Set working directory and load libraries
# ------------------------------------------
if (interactive()){
cur.dir <- dirname(parent.frame(2)$ofile)
setwd(cur.dir)
}
library(mpgex)
library(processHTS)
library(earth)
library(e1071)
library(randomForest)
R.utils::sourceDirectory("lib", modifiedOnly = FALSE)
#
#
# ##----------- Parameters for filtering data --------
# gene_expr_thresh <- FALSE
# gene_outl_thresh <- TRUE
# gene_log2_transf <- TRUE
# max_outl <- 600
#
#
#
# ##----------------DF OLD mice results --------------
# load("../files/corr_df_old_SunFeb211527.RData")
# df_old_basis_prof <- basis_prof
# df_old_basis_mean <- basis_mean
# df_old_HTS_data <- HTS_data
# df_old_out_mean <- out_mean
# df_old_out_prof <- out_prof
#
# proc_data <- preprocess_data(HTS_data = df_old_HTS_data,
# max_outl = max_outl,
# gene_expr_thresh = gene_expr_thresh,
# gene_outl_thresh = gene_outl_thresh,
# gene_log2_transf = gene_log2_transf)
# df_old_obs <- proc_data$obs
# df_old_Y <- proc_data$Y
#
#
#
# ##-----------------DF YOUNG mice results ------------
# load("../files/corr_df_young_SunFeb211525.RData")
# df_young_basis_prof <- basis_prof
# df_young_basis_mean <- basis_mean
# df_young_HTS_data <- HTS_data
# df_young_out_mean <- out_mean
# df_young_out_prof <- out_prof
#
# proc_data <- preprocess_data(HTS_data = df_young_HTS_data,
# max_outl = max_outl,
# gene_expr_thresh = gene_expr_thresh,
# gene_outl_thresh = gene_outl_thresh,
# gene_log2_transf = gene_log2_transf)
# df_young_obs <- proc_data$obs
# df_young_Y <- proc_data$Y
#
#
#
# ##------------- Normal OLD mice results ------------
# load("../files/corr_N_old_MonFeb221046.RData")
# N_old_basis_prof <- basis_prof
# N_old_basis_mean <- basis_mean
# N_old_HTS_data <- HTS_data
# N_old_out_mean <- out_mean
# N_old_out_prof <- out_prof
#
# proc_data <- preprocess_data(HTS_data = N_old_HTS_data,
# max_outl = max_outl,
# gene_expr_thresh = gene_expr_thresh,
# gene_outl_thresh = gene_outl_thresh,
# gene_log2_transf = gene_log2_transf)
# N_old_obs <- proc_data$obs
# N_old_Y <- proc_data$Y
#
#
#
# ##------------ Normal YOUNG mice results ----------
# load("../files/corr_N_young_MonFeb221047.RData")
# N_young_basis_prof <- basis_prof
# N_young_basis_mean <- basis_mean
# N_young_HTS_data <- HTS_data
# N_young_out_mean <- out_mean
# N_young_out_prof <- out_prof
#
# proc_data <- preprocess_data(HTS_data = N_young_HTS_data,
# max_outl = max_outl,
# gene_expr_thresh = gene_expr_thresh,
# gene_outl_thresh = gene_outl_thresh,
# gene_log2_transf = gene_log2_transf)
# N_young_obs <- proc_data$obs
# N_young_Y <- proc_data$Y
# DF Old parameters
df_old_W <- data.frame(x = df_old_out_prof$W_opt,
y = df_old_Y)
# DF Young parameters
df_young_W <- data.frame(x = df_young_out_prof$W_opt,
y = df_young_Y)
# N Old parameters
N_old_W <- data.frame(x = N_old_out_prof$W_opt,
y = N_old_Y)
# N Young parameters
N_young_W <- data.frame(x = N_young_out_prof$W_opt,
y = N_young_Y)
# From DF OLD
message("Predicting from DF OLD")
DO_predict_DY <- predict_model_gex(model = df_old_out_prof$gex_model,
test = df_young_W,
is_summary = FALSE)
DO_predict_NO <- predict_model_gex(model = df_old_out_prof$gex_model,
test = N_old_W,
is_summary = FALSE)
DO_predict_NY <- predict_model_gex(model = df_old_out_prof$gex_model,
test = N_young_W,
is_summary = FALSE)
# From DF Young
message("Predicting from DF YOUNG")
DY_predict_DO <- predict_model_gex(model = df_young_out_prof$gex_model,
test = df_old_W,
is_summary = FALSE)
DY_predict_NO <- predict_model_gex(model = df_young_out_prof$gex_model,
test = N_old_W,
is_summary = FALSE)
DY_predict_NY <- predict_model_gex(model = df_young_out_prof$gex_model,
test = N_young_W,
is_summary = FALSE)
# From N OLD
message("Predicting from N OLD")
NO_predict_NY <- predict_model_gex(model = N_old_out_prof$gex_model,
test = N_young_W,
is_summary = FALSE)
NO_predict_DO <- predict_model_gex(model = N_old_out_prof$gex_model,
test = df_old_W,
is_summary = FALSE)
NO_predict_DY <- predict_model_gex(model = N_old_out_prof$gex_model,
test = df_young_W,
is_summary = FALSE)
# From N Young
message("Predicting from N YOUNG")
NY_predict_NO <- predict_model_gex(model = N_young_out_prof$gex_model,
test = N_old_W,
is_summary = FALSE)
NY_predict_DO <- predict_model_gex(model = N_young_out_prof$gex_model,
test = df_old_W,
is_summary = FALSE)
NY_predict_DY <- predict_model_gex(model = N_young_out_prof$gex_model,
test = df_young_W,
is_summary = FALSE)
#--------------- Create final data for plotting ---------------------
# DO to all other mouse models
out_DO_to_DY <- list(test_pred = DO_predict_DY$test_pred,
test = list(y = df_young_Y))
out_DO_to_NO <- list(test_pred = DO_predict_NO$test_pred,
test = list(y = N_old_Y))
out_DO_to_NY <- list(test_pred = DO_predict_NY$test_pred,
test = list(y = N_young_Y))
# DY to all other mouse models
out_DY_to_DO <- list(test_pred = DY_predict_DO$test_pred,
test = list(y = df_old_Y))
out_DY_to_NO <- list(test_pred = DY_predict_NO$test_pred,
test = list(y = N_old_Y))
out_DY_to_NY <- list(test_pred = DY_predict_NY$test_pred,
test = list(y = N_young_Y))
# NO to all other mouse models
out_NO_to_DY <- list(test_pred = NO_predict_DY$test_pred,
test = list(y = df_young_Y))
out_NO_to_DO <- list(test_pred = NO_predict_DO$test_pred,
test = list(y = df_old_Y))
out_NO_to_NY <- list(test_pred = NO_predict_NY$test_pred,
test = list(y = N_young_Y))
# NY to all other mouse models
out_NY_to_DY <- list(test_pred = NY_predict_DY$test_pred,
test = list(y = df_young_Y))
out_NY_to_DO <- list(test_pred = NY_predict_DO$test_pred,
test = list(y = df_old_Y))
out_NY_to_NO <- list(test_pred = NY_predict_NO$test_pred,
test = list(y = N_old_Y))
round(cor(out_DO_to_DY$test_pred, out_DO_to_DY$test$y),3)
round(cor(out_DO_to_NO$test_pred, out_DO_to_NO$test$y),3)
round(cor(out_DO_to_NY$test_pred, out_DO_to_NY$test$y),3)
round(cor(out_DY_to_DO$test_pred, out_DY_to_DO$test$y),3)
round(cor(out_DY_to_NO$test_pred, out_DY_to_NO$test$y),3)
round(cor(out_DY_to_NY$test_pred, out_DY_to_NY$test$y),3)
round(cor(out_NO_to_DY$test_pred, out_NO_to_DY$test$y),3)
round(cor(out_NO_to_DO$test_pred, out_NO_to_DO$test$y),3)
round(cor(out_NO_to_NY$test_pred, out_NO_to_NY$test$y),3)
round(cor(out_NY_to_DY$test_pred, out_NY_to_DY$test$y),3)
round(cor(out_NY_to_DO$test_pred, out_NY_to_DO$test$y),3)
round(cor(out_NY_to_NO$test_pred, out_NY_to_NO$test$y),3) |
20c23c74f0ea2204497ec306cc8be967866e1c27 | 56573eef148e0deba684c010dc35b16c6ba89b85 | /scriptPrueba.R | 92b04bce46461bc4555c615ee2b396413bc076a7 | [] | no_license | arubir01/Repo-practicas | f866a64470e5816a958c009984cbd3c822d60437 | 4ed03013ed241afb4eb9153641ee8f043df728e1 | refs/heads/master | 2020-05-16T17:55:12.175416 | 2019-05-07T10:08:13 | 2019-05-07T10:08:13 | 183,209,482 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,661 | r | scriptPrueba.R | #guia de comandos
?dbinom
#instalar un paquete
install.packages("ggplot2")
#cargar un paquete
library(gplot2)
#asignacion de valores
x <- 2
x^2-3
y <- x^2-3
#imprimir por pantalla
printf(y)
y
#estructura y manipulacion de hojas de datos
#cargar una hoja de datos
data(mtcars)
#imprimir por pantalla mtcars -> salida en consola
mtcars
#ver mtcars como hoja de calculo o matriz -> salida en scripts
View(mtcars)
# peso (sexta variable)
# del Hornet Sportabout (quinto vehículo)
mtcars[5,6]
#Todas las carc. del Hornet Sportabout
mtcars[5,]
#peso de todos los vehículos
mtcars[,6]
#extraer variable
mtcars$wt
#almacenar valores en un vector
pesos <- mtcars$wt
#media
mean(pesos)
#desviacion tipica
sd(pesos)
#importar datos
#importar archivo temperaturas.csv
temps <- read.csv("temperaturas.csv")
#leer temperaturas.csv
View(temps)
#vector temperaturas maximas
maximas <- temps$Tmax
#media de las maximas
mean(maximas)
#temp maxima de las maximas
max(maximas)
#instalar librerias
library(ggplot2)
library(scales)
#convertir fechas
temps$Fecha <- as.Date(temps$Fecha, format = "%d/%m/%Y")
View(temps)
#graficos
ggplot(data = temps, aes(x = Fecha)) +
geom_line(aes(y = Tmax), colour="red") +
geom_line(aes(y = Tmin), colour="blue") +
scale_x_date(
expand=c(0,0),
breaks = date_breaks("1 day"),
labels = date_format("%d")
) +
scale_y_continuous(breaks = seq(-5,35,5)) +
ggtitle ("Temperaturas máximas y mínimas en abril del 2018") +
xlab("Día") +
ylab ("Temperatura ( oC )")
|
4365eba01f6572112b436b1a185412a2feb0240a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/EpiReport/examples/getMap.Rd.R | 2a99e55462df76a7d380e2f199ab5bd94ded0b45 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 351 | r | getMap.Rd.R | library(EpiReport)
### Name: getMap
### Title: Get disease-specific map: distribution of cases by Member State
### Aliases: getMap
### ** Examples
# --- Preview of the PNG map using the default Salmonellosis dataset
getMap()
# --- Plot using external PNG image
# --- Please see examples in the vignette
browseVignettes(package = "EpiReport")
|
b924b9b8b8a91c59fbd810573151662619e69dda | 10eb64804c46ded22eddca725f2393380c728a7c | /lib/external_scripts/spam_detect.R | 0c06ae756177af9c224db3e14ab98cc74eb70631 | [] | no_license | maikhel/learn-r | aab3a53602b33212715828ac04d1babdc23ff0f2 | bdd9bf9fcda175bdfe2d409bc53b9d20c633a304 | refs/heads/master | 2020-03-27T08:46:52.370928 | 2018-08-28T22:32:04 | 2018-08-28T22:32:04 | 146,286,670 | 0 | 0 | null | 2018-08-28T22:32:05 | 2018-08-27T11:13:21 | Ruby | UTF-8 | R | false | false | 2,996 | r | spam_detect.R | #Load the required library silently
suppressPackageStartupMessages(require(jsonlite))
suppressPackageStartupMessages(require(optparse))
# training set
spam <- list(c("buy", "drugs", "online", "from", "our", "pharma"),
c("buy", "insurance", "at", "low", "prices"),
c("amazing", "stuff", "limited", "edition"),
c("bargain", "only", "today"),
c("click", "to", "buy", "free", "drugs"),
c("earn", "million", "dollars", "in", "two", "weeks"),
c("double", "your", "income", "in", "three", "days"))
legitimate <- list(c("newsletter", "from", "your", "favorite", "website"),
c("i", "was", "writing", "for", "ruby", "advice"),
c("new", "ruby", "library"),
c("service", "objects", "in", "rails"),
c("why", "ruby", "is", "better", "than", "go"),
c("rspec", "good", "practices"),
c("good", "article", "on", "rails"))
# training
categories = 2
priors <- c()
total <- length(spam) + length(legitimate)
priors[1] <- length(spam) / total
priors[2] <- length(legitimate) / total
training <-list(spam, legitimate)
features <- list();
zeroOccurrences = list()
for (category in 1:categories) {
categoryFeatures <- list();
singleOccurrence = 1 / length(training[[category]])
zeroOccurrences[[category]] = singleOccurrence
for (sampleMail in training[[category]]) {
for (word in sampleMail) {
if (word %in% names(categoryFeatures)) {
categoryFeatures[[word]] = categoryFeatures[[word]] + singleOccurrence
} else {
categoryFeatures[[word]] = zeroOccurrences[[category]] + singleOccurrence
}
}
}
features[[category]] <- categoryFeatures
}
score <- function (test_mail, category) {
score <- priors[category]
categoryFeatures = features[[category]]
for (word in test_mail) {
if (word %in% names(categoryFeatures)) {
score <- score * categoryFeatures[[word]]
} else {
score <- score * zeroOccurrences[[category]]
}
}
return(score)
}
# classifier
classify <- function(test_mail) {
scores = c()
for (i in 1:categories) {
scores[i] = score(test_mail, i)
}
# print(scores)
result <- which(scores==max(scores))
list(scores=scores,result=result)
}
# Set up the script option parsing
option_list = list(
make_option(c("-p", "--params"), action="store", default=NA, type='character',
help="a valid JSON")
)
opt_parser = OptionParser(option_list=option_list)
opt = parse_args(opt_parser)
# Validate the Option parameters
if (is.null(opt$params) | !validate(opt$params)){
print_help(opt_parser)
stop("At least one argument must be supplied or the JSON must be valid.", call.=FALSE)
}
params <- fromJSON(opt$params)
words <- params$words
out <- classify(words)
# Return the JSON
toJSON(out,auto_unbox=TRUE)
|
e779e74c1d9a6360ba4125976d60c3b7d535eaaf | 11de9e10b0b9fa137bbf8607bfd321e9d5fede6e | /mPowerEI/man/determineOrder.Rd | 790f10456762ae568f4b8d9c688bd5b05c3a319b | [
"MIT"
] | permissive | MonteShaffer/mPowerEI | dbc906db491716f15295899f7e10efac6601531b | d587c4fda58a377b9bbfb3327411249a11049aa2 | refs/heads/master | 2021-05-16T08:48:51.456692 | 2017-10-05T11:03:30 | 2017-10-05T11:03:30 | 104,287,224 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 393 | rd | determineOrder.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseData.R
\name{determineOrder}
\alias{determineOrder}
\title{Determine Activity Order}
\usage{
determineOrder(mlist)
}
\arguments{
\item{mlist}{}
}
\value{
dataframe sorted by order with start/stop timestamps as values
}
\description{
[rest, outbound, return]
[outbound, rest, return]
[outbound, return, rest]
}
|
6e4e8caa1a52dc3f86a13fe6746a4769f334e703 | 4db69756bfec375e75a5d8199063afeb52c62cd7 | /tests/testthat/cache_test_data.R | aa044a95d08bc40aba44f10f6ca4b2f9957bb8c0 | [] | no_license | hadley/supernova | 2588745b256f5bcba00c4abc8fda6d0e52c5e278 | f5d3b374b0b1c0a3f387c7698271cd65594db023 | refs/heads/main | 2023-05-05T05:06:15.471021 | 2021-05-28T19:59:51 | 2021-05-28T19:59:51 | 388,244,493 | 2 | 1 | null | 2021-07-21T21:00:38 | 2021-07-21T21:00:38 | null | UTF-8 | R | false | false | 2,909 | r | cache_test_data.R | # Cache the car::Anova data and other data used to test the package so that we
# don't have to include those packages just for testing
library(magrittr)
cache_dir <- "./tests/testthat/cache"
data_dir <- "./tests/testthat/data"
if (!dir.exists(cache_dir)) dir.create(cache_dir)
if (!dir.exists(data_dir)) dir.create(data_dir)
# Independent designs -----------------------------------------------------
as.character.call <- function(model) {
Reduce(paste, deparse(model$call))
}
df_missing <- mtcars
df_missing[1, ]$hp <- NA_real_
df_missing[2:3, ]$disp <- NA_real_
models <- list(
lm(Thumb ~ Weight, supernova::Fingers),
lm(Thumb ~ RaceEthnic, supernova::Fingers),
lm(Thumb ~ Weight + Height, supernova::Fingers),
lm(Thumb ~ RaceEthnic + Weight, supernova::Fingers),
lm(Thumb ~ RaceEthnic + Sex, supernova::Fingers),
lm(Thumb ~ RaceEthnic + Weight + Sex, supernova::Fingers),
lm(Thumb ~ Weight * Height, supernova::Fingers),
lm(Thumb ~ RaceEthnic * Weight, supernova::Fingers),
lm(Thumb ~ RaceEthnic * Sex, supernova::Fingers),
lm(Thumb ~ RaceEthnic + Weight * Sex, supernova::Fingers),
lm(Thumb ~ RaceEthnic * Weight * Sex, supernova::Fingers),
lm(mpg ~ hp, df_missing),
lm(mpg ~ hp * disp, df_missing),
lm(uptake ~ Treatment, data = CO2[1:80, ]),
lm(uptake ~ Treatment * Type, data = CO2[1:80, ])
) %>%
purrr::set_names(purrr::map(., ~ as.character.call(.x)))
models %>%
purrr::map(anova) %>%
readr::write_rds(file.path(cache_dir, "model_cache_type_1.Rds"))
models %>%
purrr::map(car::Anova, type = 2) %>%
readr::write_rds(file.path(cache_dir, "model_cache_type_2.Rds"))
models %>%
purrr::map(car::Anova, type = 3) %>%
readr::write_rds(file.path(cache_dir, "model_cache_type_3.Rds"))
# Simple nested designs ---------------------------------------------------
JMRData::ex11.1 %>%
tidyr::gather(id, value, dplyr::starts_with("score")) %>%
dplyr::mutate(dplyr::across(c(group, instructions, id), as.factor)) %>%
readr::write_rds(file.path(data_dir, "jmr_ex11.1.Rds"))
# Crossed designs ---------------------------------------------------------
JMRData::ex11.9 %>%
tidyr::gather(condition, puzzles_completed, -subject) %>%
dplyr::mutate(dplyr::across(c(subject, condition), as.factor)) %>%
readr::write_rds(file.path(data_dir, "jmr_ex11.9.Rds"))
JMRData::ex11.17 %>%
purrr::set_names(tolower(names(.))) %>%
tidyr::gather(condition, recall, -subject) %>%
tidyr::separate(condition, c("type", "time"), -1) %>%
dplyr::mutate(dplyr::across(c(subject, type, time), as.factor)) %>%
readr::write_rds(file.path(data_dir, "jmr_ex11.17.Rds"))
# Mixed designs -----------------------------------------------------------
JMRData::ex11.22 %>%
tidyr::gather(sex, rating, Male, Female) %>%
dplyr::mutate(dplyr::across(c(couple, children, sex, yearsmarried), as.factor)) %>%
readr::write_rds(file.path(data_dir, "jmr_ex11.22.Rds"))
|
4236f9ee816e473fc875cfb72be56cb2930a0b00 | 89bd40fc5d18a2ddd2cc481169dec9c489bfebda | /DevMid.R | c356d401ba74edc34dd9286b50048500ac8b4896 | [] | no_license | miguelmariagp/MTerm-Pereira | a8baffe3d214462cf66e4dc29d9c83ccc0ab0687 | 35df982eb9e82d475514d85aa1798226ef8cb33c | refs/heads/master | 2021-01-10T06:31:09.088922 | 2016-03-24T00:57:36 | 2016-03-24T00:57:36 | 54,565,367 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,147 | r | DevMid.R | #Miguel Maria Pereira
#Midterm
library(devtools)
library(roxygen2)
setwd("C:/Users/ststest/Dropbox/Spr16/Programming/MTerm-Pereira") #This will need to be changed to match your directory
#create("integrateIt")
current.code <- as.package("integrateIt")
load_all(current.code)
document(current.code)
#Example data
x1<-seq(0,5,by=.5)
y1<-x1^2
x2<-seq(0,5,by=.5)
y2<-cos(x2)
##########
#Trapezoid
##########
#Creating a Trapezoid object from scratch
tr<-new("Trapezoid",x1,y1,0,2)
#Using the integratIt method
tr<-integrateIt(X=x1,Y=y1,a=0,b=1,Rule="Trap")
#Using the show method
show(tr)
#Using the print method
print(tr)
#Using the plot method for Trapezoid objects
plot(tr)
tr2<-new("Trapezoid",x2,y2,0,4)
plot(tr2)
####
#Simpson
####
#Creating a Simpson object from scratch
sp<-new("Simpson",x1,y1,0,2)
#Using the integratIt method
sp<-integrateIt(X=x1,Y=y1,a=0,b=2,Rule="Simp")
#Using the show method
show(sp)
#Using the print method
print(sp)
#Using the plot method for Trapezoid objects
plot(sp)
sp2<-new("Simpson",x2,y2,0,4)
plot(sp2)
#Testing method tolTest
f<-function(x) x^2
tolTest(f,0,2,Rule="Trap")
tolTest(f,0,2,Rule="Simp") |
f4907c6bad053ac00fc46e4a16a47cdca1990013 | 3c883c8e8f1aad9cfbaeff60998ec9b0df2b7ba0 | /man/colorPalette.Rd | 3cb2cc3cba67292bec6ecd4e8ca9328d02d9fec0 | [] | no_license | genomelab/esFunctions | ec14979d03247120d54972f9b2b9213a5cbcc3cc | e721a3859ce29abdd50c930b213eebe503e6ad26 | refs/heads/master | 2023-05-13T11:23:07.544888 | 2023-05-01T04:51:05 | 2023-05-01T04:51:05 | 17,329,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 354 | rd | colorPalette.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colorPalette.R
\name{colorPalette}
\alias{colorPalette}
\title{Has a list of color palettes used}
\usage{
colorPalette(n)
}
\arguments{
\item{n}{n is the name of the color palette that one needs}
}
\description{
uses color palettes of interest
}
\author{
Shahab Asgharzadeh
}
|
6de9cc71ce34d42caa1aa767d6a7fa29c35a6efd | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/ldat/R/sort_ldat.R | 64a5ca3ce0f14ca0cde5afdade1127dd6bffb3ae | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 512 | r | sort_ldat.R |
#' Sort an ldat
#'
#' @param x \code{\link{ldat}} to sort
#' @param decreasing unused (a value unequal to \code{FALSE} will generate an error).
#' @param ... unused.
#'
#' @return
#' Sorts \code{x} and returns a sorted copy of \code{x}.
#'
#' @examples
#' x <- as_ldat(iris)
#' sort(x)
#'
#' @export
sort.ldat <- function(x, decreasing = FALSE, ...) {
if (decreasing != FALSE) stop("decreasing is not supported yet.")
if (!is_ldat(x)) stop("x should be of type ldat.")
o <- order(x)
x <- x[o, ]
x
}
|
2522a123223878597f39e074cb2bac445d5f7f54 | 13895420920703501ab66c28a3927089a2de042e | /man/cluster.loadings.Rd | e96118dae0c46e948ef54dd40dd9cade821943b5 | [] | no_license | cran/psych | 3349b3d562221bb8284c45a3cdd239f54c0348a7 | ee72f0cc2aa7c85a844e3ef63c8629096f22c35d | refs/heads/master | 2023-07-06T08:33:13.414758 | 2023-06-21T15:50:02 | 2023-06-21T15:50:02 | 17,698,795 | 43 | 42 | null | 2023-06-29T05:31:57 | 2014-03-13T05:54:20 | R | UTF-8 | R | false | false | 3,407 | rd | cluster.loadings.Rd | \name{cluster.loadings}
\alias{cluster.loadings}
\title{ Find item by cluster correlations, corrected for overlap and reliability }
\description{
Given a n x n correlation matrix and a n x c matrix of -1,0,1 cluster weights for those n items on c clusters, find the correlation of each item with each cluster. If the item is part of the cluster, correct for item overlap. Part of the \code{\link{ICLUST}} set of functions, but useful for many item analysis problems.
}
\usage{
cluster.loadings(keys, r.mat, correct = TRUE,SMC=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{keys}{ Cluster keys: a matrix of -1,0,1 cluster weights}
\item{r.mat}{ A correlation matrix }
\item{correct}{Correct for reliability}
\item{SMC}{Use the squared multiple correlation as a communality estimate, otherwise use the greatest correlation for each variable}
}
\details{Given a set of items to be scored as (perhaps overlapping) clusters and the intercorrelation matrix of the items, find the clusters and then the correlations of each item with each cluster. Correct for item overlap by replacing the item variance with its average within cluster inter-item correlation.
Although part of ICLUST, this may be used in any SAPA (\url{https://www.sapa-project.org/})
application where we are interested in item-whole correlations of items and composite scales.
For information about SAPA see Revelle et al, 2010, 2016. For information about SAPA based measures of ability, see \url{https://icar-project.org}.
These loadings are particularly interpretable when sorted by absolute magnitude for each cluster (see \code{\link{ICLUST.sort}}).
}
\value{
\item{loadings }{A matrix of item-cluster correlations (loadings)}
\item{cor }{Correlation matrix of the clusters}
\item{corrected }{Correlation matrix of the clusters, raw correlations below the diagonal, alpha on diagonal, corrected for reliability above the diagonal}
\item{sd }{Cluster standard deviations}
\item{alpha }{alpha reliabilities of the clusters}
\item{G6}{G6* Modified estimated of Guttman Lambda 6}
\item{count}{Number of items in the cluster}
}
\references{ ICLUST: \url{https://personality-project.org/r/r.ICLUST.html}
Revelle, W., Wilt, J., and Rosenthal, A. (2010) Individual Differences in Cognition: New Methods for examining the Personality-Cognition Link In Gruszka, A. and Matthews, G. and Szymura, B. (Eds.) Handbook of Individual Differences in Cognition: Attention, Memory and Executive Control, Springer.
Revelle, W, Condon, D.M., Wilt, J., French, J.A., Brown, A., and Elleman, L.G. (2016) Web and phone based data collection using planned missing designs. In Fielding, N.G., Lee, R.M. and Blank, G. (Eds). SAGE Handbook of Online Research Methods (2nd Ed), Sage Publcations.
}
\author{Maintainer: William Revelle \email{revelle@northwestern.edu}
}
\note{ Although part of ICLUST, this may be used in any SAPA application where we are interested in item- whole correlations of items and composite scales.}
\seealso{ \code{\link{ICLUST}}, \code{\link{factor2cluster}}, \code{\link{cluster.cor}} }
\examples{
r.mat<- Harman74.cor$cov
clusters <- matrix(c(1,1,1,rep(0,24),1,1,1,1,rep(0,17)),ncol=2)
cluster.loadings(clusters,r.mat)
}
\keyword{multivariate }% at least one, from doc/KEYWORDS
\keyword{ cluster }% __ONLY ONE__ keyword per line
|
e91cc15618677feb97b87e89732e886871a96ba1 | f1a363867ef9b2edf8fa5729cbebdcc1969a219c | /run_analysis.R | 75f71c3fb90bd2783ed7685f467e5352cc1c015f | [] | no_license | cyberis/TidyDataProject | 97cea3aa5f143e4f332eb89de5e9e9ace7c526f9 | 0e262cbce054486fc06bcc709ab5e1c095ac667b | refs/heads/master | 2016-09-10T14:30:29.429069 | 2014-06-30T15:11:03 | 2014-06-30T15:11:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,886 | r | run_analysis.R | # run_analysis.R - Create a tidy data set for smartphone usage
# Created and Submitted by Christopher Bortz
# For Getting and Cleaning Data, Dr. J, Section 004, June 2 - 30, 2014
# Course Peer-Assessed Project
# Load Required Libraries
library(data.table) # Used for faster data mutation
library(reshape2) # Used to reshape the data into a set of means by subject and activity
# Setup some global configuration variables
resetData <- FALSE # If this is set to true, remove the data directory and start fresh
debugScript <- FALSE # Set this to true to retain intermediate data tables otherwise they are removed
# Setup the Feature Columns that we want and user friendly (sort of) names for them
featureCols <- c(1:6, 41:46, 81:86, 121:126, 161:166, 201:202,
214:215, 227:228, 240:241, 253:254, 266:271, 345:350,
424:429, 503:504, 516:517, 529:530, 542:543)
featureNames <- c("Mean.BodyAccelerationTime.X", # Col 1 <- Col 1: tBodyAcc-mean()-X
"Mean.BodyAccelerationTime.Y", # Col 2 <- Col 2: tBodyAcc-mean()-Y
"Mean.BodyAccelerationTime.Z", # Col 3 <- Col 3: tBodyAcc-mean()-Z
"STD.BodyAccelerationTime.X", # Col 4 <- Col 4: tBodyAcc-std()-X
"STD.BodyAccelerationTime.Y", # Col 5 <- Col 5: tBodyAcc-std()-Y
"STD.BodyAccelerationTime.Z", # Col 6 <- Col 6: tBodyAcc-std()-Z
"Mean.GravityAccelerationTime.X", # Col 7 <- Col 41: tGravityAcc-mean()-X
"Mean.GravityAccelerationTime.Y", # Col 8 <- Col 42: tGravityAcc-mean()-Y
"Mean.GravityAccelerationTime.Z", # Col 9 <- Col 43: tGravityAcc-mean()-Z
"STD.GravityAccelerationTime.X", # Col 10 <- Col 44: tGravityAcc-std()-X
"STD.GravityAccelerationTime.Y", # Col 11 <- Col 45: tGravityAcc-std()-Y
"STD.GravityAccelerationTime.Z", # Col 12 <- col 46: tGravityAcc-std()-Z
"Mean.BodyAccelerationJerkTime.X", # Col 13 <- Col 81: tBodyAccJerk-mean()-X
"Mean.BodyAccelerationJerkTime.Y", # Col 14 <- Col 82: tBodyAccJerk-mean()-Y
"Mean.BodyAccelerationJerkTime.Z", # Col 15 <- Col 83: tBodyAccJerk-mean()-Z
"STD.BodyAccelerationJerkTime.X", # Col 16 <- Col 84: tBodyAccJerk-std()-X
"STD.BodyAccelerationJerkTime.Y", # Col 17 <- Col 85: tBodyAccJerk-std()-Y
"STD.BodyAccelerationJerkTime.Z", # Col 18 <- Col 86: tBodyAccJerk-std()-Z
"Mean.BodyGyroscopeTime.X", # Col 19 <- Col 121: tBodyGyro-mean()-X
"Mean.BodyGyroscopeTime.Y", # Col 20 <- Col 122: tBodyGyro-mean()-Y
"Mean.BodyGyroscopeTime.Z", # Col 21 <- Col 123: tBodyGyro-mean()-Z
"STD.BodyGyroscopeTime.X", # Col 22 <- Col 124: tBodyGyro-std()-X
"STD.BodyGyroscopeTime.Y", # Col 23 <- Col 125: tBodyGyro-std()-Y
"STD.BodyGyroscopeTime.Z", # Col 24 <- Col 126: tBodyGyro-std()-Z
"Mean.BodyGyroscopeJerkTime.X", # Col 25 <- Col 161: tBodyGyroJerk-mean()-X
"Mean.BodyGyroscopeJerkTime.Y", # Col 26 <- Col 162: tBodyGyroJerk-mean()-Y
"Mean.BodyGyroscopeJerkTime.Z", # Col 27 <- Col 163: tBodyGyroJerk-mean()-Z
"STD.BodyGyroscopeJerkTime.X", # Col 28 <- Col 164: tBodyGyroJerk-std()-X
"STD.BodyGyroscopeJerkTime.Y", # Col 29 <- Col 165: tBodyGyroJerk-std()-Y
"STD.BodyGyroscopeJerkTime.Z", # Col 30 <- Col 166: tBodyGyroJerk-std()-Z
"Mean.BodyAccelerationMagnitudeTime", # Col 31 <- Col 201: tBodyAccMag-mean()
"STD.BodyAccelerationMagnitudeTime", # Col 32 <- Col 202: tBodyAccMag-std()
"Mean.GravityAccelerationMagnitudeTime", # Col 33 <- Col 214: tGravityAccMag-mean()
"STD.GravityAccelerationMagnitudeTime", # Col 34 <- Col 215: tGravityAccMag-std()
"Mean.BodyAccelerationJerkMagnitudeTime", # Col 35 <- Col 227: tBodyAccJerkMag-mean()
"STD.BodyAccelerationJerkMagnitudeTime", # Col 36 <- Col 228: tBodyAccJerkMag-std()
"Mean.BodyGyroscopeMagnitudeTime", # Col 37 <- Col 240: tBodyGyroMag-mean()
"STD.BodyGyroscopeMagnitudeTime", # Col 38 <- Col 241: tBodyGyroMag-std()
"Mean.BodyGyroscopeJerkMagnitudeTime", # Col 39 <- Col 253: tBodyGyroJerkMag-mean()
"STD.BodyGyroscopeJerkMagnitudeTime", # Col 40 <- Col 254: tBodyGyroJerkMag-std()
"Mean.BodyAccelerationFreq.X", # Col 41 <- Col 266: fBodyAcc-mean()-X
"Mean.BodyAccelerationFreq.Y", # Col 42 <- Col 267: fBodyAcc-mean()-Y
"Mean.BodyAccelerationFreq.Z", # Col 43 <- Col 268: fBodyAcc-mean()-Z
"STD.BodyAccelerationFreq.X", # Col 44 <- Col 269: fBodyAcc-std()-X
"STD.BodyAccelerationFreq.Y", # Col 45 <- Col 270: fBodyAcc-std()-Y
"STD.BodyAccelerationFreq.Z", # Col 46 <- Col 271: fBodyAcc-std()-Z
"Mean.BodyAccelerationJerkFreq.X", # Col 47 <- Col 345: fBodyAccJerk-mean()-X
"Mean.BodyAccelerationJerkFreq.Y", # Col 48 <- Col 346: fBodyAccJerk-mean()-Y
"Mean.BodyAccelerationJerkFreq.Z", # Col 49 <- Col 347: fBodyAccJerk-mean()-Z
"STD.BodyAccelerationJerkFreq.X", # Col 50 <- Col 348: fBodyAccJerk-std()-X
"STD.BodyAccelerationJerkFreq.Y", # Col 51 <- Col 349: fBodyAccJerk-std()-Y
"STD.BodyAccelerationJerkFreq.Z", # Col 52 <- Col 350: fBodyAccJerk-std()-Z
"Mean.BodyGyroscopeFreq.X", # Col 53 <- Col 424: fBodyGyro-mean()-X
"Mean.BodyGyroscopeFreq.Y", # Col 54 <- Col 425: fBodyGyro-mean()-Y
"Mean.BodyGyroscopeFreq.Z", # Col 55 <- Col 426: fBodyGyro-mean()-Z
"STD.BodyGyroscopeFreq.X", # Col 56 <- Col 427: fBodyGyro-std()-X
"STD.BodyGyroscopeFreq.Y", # Col 57 <- Col 428: fBodyGyro-std()-Y
"STD.BodyGyroscopeFreq.Z", # Col 58 <- Col 429: fBodyGyro-std()-Z
"Mean.BodyAccelerationMagnitudeFreq", # Col 59 <- Col 503: fBodyAccMag-mean()
"STD.BodyAccelerationMagnitudeFreq", # Col 60 <- Col 504: fBodyAccMag-std()
"Mean.BodyAccelerationJerkMagnitudeFreq", # Col 61 <- Col 516: fBodyBodyAccJerkMag-mean()
"STD.BodyAccelerationJerkMagnitudeFreq", # Col 62 <- Col 517: fBodyBodyAccJerkMag-std()
"Mean.BodyGyroscopeMagnitudeFreq", # Col 63 <- Col 529: fBodyBodyGyroMag-mean()
"STD.BodyGyroscopeMagnitudeFreq", # Col 64 <- Col 530: fBodyBodyGyroMag-std()
"Mean.BodyGyroscopeJerkMagnitudeFreq", # Col 65 <- Col 542: fBodyBodyGyroJerkMag-mean()
"STD.BodyGyroscopeJerkMagnitudeFreq" # Col 66 <- Col 543: fBodyBodyGyroJerkMag-std()
)
# Setup Friendly Activity Names
activityNames <- c("Walking", "Walking Upstairs", "Walking Downstairs", "Sitting", "Standing", "Laying")
# Step 0: If reseting data delete existing data directory
if(resetData) {
if(file.exists("./data")){
unlink("./data", recursive = TRUE, force = TRUE)
}
cat("Data erased.\n")
}
# Step 1: Download the data zip file
if(!file.exists("./data")) {
dir.create("./data")
cat("Data directory(./data) created.\n")
}
if(!file.exists("./data/Dataset.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", "./data/Dataset.zip", method = "curl")
dateDownloaded <- date()
cat("Files downloaded at:", dateDownloaded, "\n")
cat(dateDownloaded, file = "./data/data_as_of.txt")
cat("Files in ./data:", list.files("./data"), "\n")
}
# Step 2: Unzip the Dataset and Normalize the Feature Data
if(file.exists("./data") & !file.exists("./data/UCI HAR Dataset")) {
cat("Unzipping files...\n")
unzip("./data/Dataset.zip", exdir = "./data")
cat("Directories in ./data/UCI HAR Dataset:\n")
print(list.dirs("./data/UCI Har Dataset"))
# NOTE: This is a total hack because fread barfs on leading blanks in a record
cat("Fixing up feature data sets for use with fread()...\n")
write.table(read.table("./data/UCI HAR Dataset/test/X_test.txt"),
"./data/UCI HAR Dataset/test/feature_test.txt",
row.names = FALSE, col.names = FALSE)
write.table(read.table("./data/UCI HAR Dataset/train/X_train.txt"),
"./data/UCI HAR Dataset/train/feature_train.txt",
row.names = FALSE, col.names = FALSE)
cat("feature_test.txt and feature_train.txt created.\n")
}
# Step 3: Read in our data files into data tables (fast!)
cat("Loading data into data.tables...\n")
dtFeatureTst <- fread("./data/UCI HAR Dataset/test/feature_test.txt",
select = featureCols)
dtSubjectTst <- fread("./data/UCI HAR Dataset/test/subject_test.txt")
dtActivityTst <- fread("./data/UCI HAR Dataset/test/y_test.txt")
dtFeatureTrn <- fread("./data/UCI HAR Dataset/train/feature_train.txt",
select = featureCols)
dtSubjectTrn <- fread("./data/UCI HAR Dataset/train/subject_train.txt")
dtActivityTrn <- fread("./data/UCI HAR Dataset/train/y_train.txt")
# Step 4: Add Column Names to data table columns (setnames is fast with no copy of data table)
cat("Labelling data...\n")
setnames(dtFeatureTst, featureNames)
setnames(dtSubjectTst, "Subject.ID")
setnames(dtActivityTst, "Activity.ID")
setnames(dtFeatureTrn, featureNames)
setnames(dtSubjectTrn, "Subject.ID")
setnames(dtActivityTrn, "Activity.ID")
# Step 5: Add a factor with friendly names to the Activity data table
dtActivityTst[,Activity:=factor(Activity.ID, labels = activityNames)]
dtActivityTrn[,Activity:=factor(Activity.ID, labels = activityNames)]
# Step 6: Join the Test data tables together and the Traning Tables together
cat("Merging data...\n")
dtTest <- cbind(dtSubjectTst, dtActivityTst, dtFeatureTst)
dtTrain <- cbind(dtSubjectTrn, dtActivityTrn, dtFeatureTrn)
# Step 7: Combine the Test Data and the Training Data
dtData <- rbindlist(list(dtTrain,dtTest))
# Step 8: Set Key for Optimal Sorting and subsetting
setkey(dtData, Subject.ID, Activity)
dtData[ , Activity.ID := NULL] # We no longer need this column
# Step 9: Create our Tidy Data Set from our big data table
cat("Summarizing data...\n")
dtMelt <- melt(dtData, id = c("Subject.ID", "Activity"))
tidyData <- dcast(dtMelt, Subject.ID + Activity ~ variable, mean)
# Step 10: Write out Our Tidy Data Set
cat("Writing Tidy Data set to ./data/UCITidyData.txt...\n")
write.table(tidyData, "./data/UCITidyData.txt", row.names = FALSE)
# Step 11: Clean up - Remove Intermediate data tables
if(!debugScript) {
rm(dtActivityTrn, dtSubjectTrn, dtFeatureTrn,
dtActivityTst, dtSubjectTst, dtFeatureTst,
dtTest, dtTrain, dtMelt)
cat("Intermediate results culled.\n")
}
cat("Done.\n")
|
b53c0b34650dc97878b2d998b6da694064d38236 | f7aefdf8ec31201e25b79e09dd46c84017632611 | /man/getHansard.Rd | d360ed13e847ed820f61bae7e4dfc9ea5b4d1e37 | [] | no_license | conjugateprior/twfy | a5ef6542e5ea2adc8127ae02097940ce6da471f6 | 8855b20584d9b757b6822b81d7409caa7d2d579a | refs/heads/master | 2021-03-19T12:31:09.531181 | 2018-10-20T11:07:20 | 2018-10-20T11:07:20 | 92,121,927 | 9 | 3 | null | 2018-10-20T10:59:31 | 2017-05-23T02:51:18 | R | UTF-8 | R | false | true | 593 | rd | getHansard.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/twfy.R
\name{getHansard}
\alias{getHansard}
\title{Search Hansard}
\usage{
getHansard(search = NULL, person = NULL, order = c("d", "r"),
page = NULL, num = NULL)
}
\arguments{
\item{search}{A search string}
\item{person}{A person identifier}
\item{order}{whether to order results by date or relevance. Defaults to date}
\item{page}{which page of results to provide. Defaults to first page}
\item{num}{Number of results to return}
}
\value{
Search results
}
\description{
This needs much more documentation.
}
|
10f54fbff37be2d5b85c9da0da92a95a3dadf388 | f005715862d8df805df2103f946d295b50ce4375 | /ui.R | 92084b6bbb8b4d04d5fa35ee80954a5c4dcb9ff5 | [] | no_license | raulmarquezgil/dev_data_products_w4 | 0833dd8feaa8cdeffd70f0d30c65e7751ad17c37 | 20d0c768d7f9034678a6eb86b3c9abcbab88e384 | refs/heads/master | 2020-04-13T17:06:42.030536 | 2018-12-27T21:52:21 | 2018-12-27T21:52:21 | 163,338,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,887 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(plotly)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Find the nearest cities to your location"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(width = 4,
sliderInput("lat_deg",
"Latitude Degress:",
min = -90,
max = 90,
value = 0,
step=1),
sliderInput("lat_min",
"Latitude Minutes:",
min = 0,
max = 59,
value = 0,
step=1),
br(),br(),
sliderInput("lon_deg",
"Longitude Degress:",
min = -180,
max = 180,
value = 0,
step=1),
sliderInput("lon_min",
"Longitude Minutes:",
min = 0,
max = 59,
value = 0,
step=1),
br(),br(),
numericInput(inputId = "numcities",
label = "Number of cities to show:",
value = 10,
min=1,
max=20,
step=1),
selectInput(inputId = "units",
label = "Choose units:",
choices = c("kms", "miles"))
),
# Show a plot of the generated distribution
mainPanel(width = 8,
textOutput("coords"),
br(),
plotlyOutput("cityplot", height = "650px")
)
)
))
|
f016a8365580d5d3f6014abf1e6de54ff97895fa | 5ef7227e8bb481381fd72a90d6fd15a1a9254002 | /man/scoreKmers.Rd | 8f01646a64472bc474672510b49a485baadbaf17 | [] | no_license | BussemakerLab/SelexGLM | 7a7b6110ba692966b32feba5d3cb726d753a9e25 | 9254ee2d1bb5b7ea7c1cefe125dc9b5bcc5dfdc9 | refs/heads/master | 2021-08-26T05:30:35.882556 | 2017-11-21T18:11:45 | 2017-11-21T18:11:45 | 111,583,849 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 624 | rd | scoreKmers.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scoreKmers.R
\name{scoreKmers}
\alias{scoreKmers}
\title{Score K-mer Sequences}
\usage{
scoreKmers(data, model, l.index, PSAMcol = "PredictedAffinity",
seqCol = "Kmer")
}
\arguments{
\item{data}{Table of k-mers.}
\item{model}{Object of class \linkS4class{model}.}
\item{l.index}{Left-most position of PSAM to be used for scoring,}
\item{PSAMcol}{Name for PSAM affinity column.}
\item{seqCol}{Name for k-mer variable to be scored.}
}
\description{
Scores Kmers using model beta values for all feature parameters estimated with regression.
}
|
42a3cc7c8b1d86942a75df140af5e903d25c3a80 | 966411ec3358509e5a66d6c01dd957f784c1d767 | /r/team-year.r | 6d38ad373f3cdab72429bc952dfe63149ed6593c | [] | no_license | marciobarros/sbse-ant-unirio | 529197f0dd1a69938c71e494e8aae3de4d3fd970 | 0ff6a65a9c19692a881b378ef84f2f5a2f892159 | refs/heads/master | 2020-05-19T19:13:48.506018 | 2014-02-27T17:40:32 | 2014-02-27T17:40:32 | 11,187,077 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 915 | r | team-year.r | data <- read.table("/Users/Marcio/Documents/GitHub/Pesquisa/SBSE/sbse-ant-unirio/log_years.data", header=TRUE);
years <- rev(unique(data$year));
columns <- c("team", "inTeam", "outTeam", "rev", "NAR");
result <- matrix(nrow=length(years), ncol=length(columns), dimnames=list(years, columns));
oldTeam <- c();
for (year_ in years)
{
vdata <- subset(data, year == year_);
developers <- split(vdata, vdata$author);
team <- unique(vdata$author);
inTeam <- setdiff(team, oldTeam);
outTeam <- setdiff(oldTeam, team);
oldTeam <- team;
commits <- unlist(lapply(developers, nrow));
commits <- subset(commits, commits > 0);
print(commits);
result[year_ - 2000 + 1, "team"] <- length(team);
result[year_ - 2000 + 1, "inTeam"] <- length(inTeam);
result[year_ - 2000 + 1, "outTeam"] <- length(outTeam);
result[year_ - 2000 + 1, "rev"] <- nrow(vdata);
result[year_ - 2000 + 1, "NAR"] <- sd(commits);
}
result
|
6adca4984b313fdda6fd74a34e92bb66f404b02e | 1e42b9829b85bc37d112ec5b8efa1682264297b2 | /man/idle_time.Rd | 3404e51ac60c9a210207bd95dbea7dba4a36becb | [] | no_license | strategist922/edeaR | ca83bf91f58e685bc9333f4db3bfea3d8c019343 | ad96118cccfdc90a7bed94f5aef2ee0cfab3aac8 | refs/heads/master | 2021-07-05T04:30:35.286640 | 2017-09-27T12:25:04 | 2017-09-27T12:25:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 797 | rd | idle_time.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/idle_time.R
\name{idle_time}
\alias{idle_time}
\title{Metric: Idle Time}
\usage{
idle_time(eventlog, level_of_analysis = c("log", "case", "trace", "resource"),
units = c("hours", "days", "weeks", "mins"))
}
\arguments{
\item{eventlog}{The event log to be used. An object of class
\code{eventlog}.}
\item{level_of_analysis}{At which level the analysis of activity type frequency should be performed: log, trace, case, resource.}
\item{units}{Time units to be used}
}
\description{
Calculates the amount of time that no activity occurs for a case or for a resource. At log level it gives summary statistics of all cases in the log. At trace level it provides summary statistics of all cases related to this case.
'
}
|
7695b434ba18158533d05653ad576bc6a3eef472 | f251702f2fac57ee8f0513cbf17512cbaac58ec2 | /code/quickpred_custom.R | 74558d132fb0c20bb7306290a7ddda441be08df5 | [
"MIT"
] | permissive | kylelang/burn-data-synthesis | f04fae55584c4dca861ed1335a38fcdaa94aedc0 | 78875a39fcc29a2ab14edd7376659b36a8e8013e | refs/heads/main | 2023-09-03T13:29:21.222459 | 2021-11-10T12:35:34 | 2021-11-10T12:35:34 | 419,653,017 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,526 | r | quickpred_custom.R | ### Title: Custom Quickpred
### Author: Gerko Vink (with modifications by Kyle M. Lang)
### Created: 2015-06
### Modified: 2021-10-25
### Purpose: Updated mice::quickpred() to accomodate maximum number of predictors as a selection criterion
quickpredCustom <- function(data,
maxnumber = NULL,
mincor = 0.1,
minpuc = 0,
include = "",
exclude = "",
method = "pearson")
{
## Argument checking:
if (!(is.matrix(data) | is.data.frame(data)))
stop("Data should be a matrix or data frame")
if ((nvar <- ncol(data)) < 2)
stop("Data should contain at least two columns")
if(!is.null(maxnumber)){
if (maxnumber > (ncol(data) - 1)) # Added GV 7 Dec 2014
stop("The maximum number of predictors per variable is exceeds the
number of variables. Solution: decrease `maxnumber`")
}
## Initialize
predictorMatrix <- matrix(0,
nrow = nvar,
ncol = nvar,
dimnames = list(names(data), names(data))
)
x <- data.matrix(data)
r <- !is.na(x)
## Calculate correlations among data:
suppressWarnings(
v <- abs(cor(x, use = "pairwise.complete.obs", method = method))
)
v[is.na(v)] <- 0
## Calculate correlations between data and response indicators:
suppressWarnings(
u <- abs(cor(y = x, x = r, use = "pairwise.complete.obs", method = method))
)
u[is.na(u)] <- 0
## Choose the stronger of the two correlations from above:
maxc <- pmax(v, u)
## Include only the `maxnumber` highest predictors
if(!is.null(maxnumber)) {
diag(maxc) <- 0
varRanks <- t(apply(maxc, 1, function(x) rank(x, ties = "first")))
predictorMatrix[varRanks > (nvar - maxnumber)] <- 1
} else {
predictorMatrix[maxc > mincor] <- 1
}
## Exclude predictors with a percentage usable cases below minpuc:
if(minpuc > 0) {
p <- md.pairs(data)
puc <- p$mr/(p$mr + p$mm)
predictorMatrix[puc < minpuc] <- 0
}
## Exclude predictors listed in the exclude argument
yz <- pmatch(exclude, names(data))
predictorMatrix[, yz] <- 0
## Include predictors listed in the include argument
yz <- pmatch(include, names(data))
predictorMatrix[, yz] <- 1
## Some final processing
diag(predictorMatrix) <- 0
predictorMatrix[colSums(!r) == 0, ] <- 0
predictorMatrix
}
|
f0d5b05bc9d4b0ec2d517328e88754be0e94ec64 | b95a42680a1f135ec63e6d2cfb4767bc09cb3e2b | /sowbugs_plotmaybe.R | 78dbc677ab9ce83272e63fbb42e3138dfab46b46 | [] | no_license | enifmada/Soup-Troop-makes-reproducible-code | e6e1784cf0d52667dcb1c951ea5bb684c6c85131 | db0be6480214c6cc2655801005b3290dc0b2ef9e | refs/heads/master | 2020-07-25T02:55:45.706447 | 2019-09-14T20:41:42 | 2019-09-14T20:41:42 | 208,142,733 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,398 | r | sowbugs_plotmaybe.R | #Plot # of sowbugs on X axis
library(tidyverse)
library(RMKdiscrete)
library(ggplot2)
cole_arthropod_data_1946 <- read_csv('cole_arthropod_data_1946.csv')
plot_sowbug_counts <- function(sowbug_data) {
g_sowbugs <- ggplot(sowbug_data, aes(x=sowbug_counts, y=arthropod_count_index)) +
geom_point(size = 3) + xlab("# of sowbugs") + ylab("count")
g_sowbugs
g_sowbugs <- g_sowbugs + geom_line(data=sowbug_data, aes(x=arthropod_count_index, y=sowbug_counts, colour='#006400'),
linetype='dotted', colour='#006400') +
geom_point(data=sowbug_data, aes(x=arthropod_count_index, y=count_sowbugs_p_theoretical),
colour='#006400', shape=0, size = 5)
g_sowbugs <- g_sowbugs + geom_line(data=sowbug_data, aes(x=arthropod_count_index, y=count_sowbugs_p_theoretical),
linetype='dashed', colour='#006400') +
geom_point(data=sowbug_data, aes(x=arthropod_count_index, y=count_sowbugs_p_theoretical),
colour='#006400', shape=0, size = 3)
g_sowbugs
g_sowbugs <- g_sowbugs + geom_line(data=sowbug_data, aes(x=arthropod_count_index, y=count_sowbugs_L_theoretical),
linetype='dashed', colour='orchid') +
geom_point(data=sowbug_data, aes(x=arthropod_count_index, y=count_sowbugs_p_theoretical),
colour='orchid', shape=0, size = 3)
g_sowbugs
return(g_sowbugs)
}
#Calculated total number of sowbugs
total_number_sowbugs <- sum(cole_arthropod_data_1946$arthropod_count_index*cole_arthropod_data_1946$sowbug_counts)
#calculate total number of observations
total_board_observations <- sum(cole_arthropod_data_1946$sowbug_counts)
#Calculate average (lambda)
avg_sowbugs_per_obs <- total_number_sowbugs/total_board_observations
#making theoretical propbabilities (dpois(x, lambda, log = FALSE))
p_theoretical_sowbugs <- dpois(cole_arthropod_data_1946$arthropod_count_index, avg_sowbugs_per_obs)
#making theoretical propbabilities
p_theoretical_sowbugs <- dpois(cole_arthropod_data_1946$arthropod_count_index, avg_sowbugs_per_obs)
#Poisson Theoretical number of times you would observe k sowbugs (in new column)
cole_arthropod_data_1946$count_sowbugs_p_theoretical <- total_board_observations*p_theoretical_sowbugs
#making theoretical propbabilities with LGP (dLGP(x,theta,lambda,nc=NULL,log=FALSE))
lambda2 <- 0.53214
lambda1 <- avg_sowbugs_per_obs*(1-lambda2)
L_theoretical_sowbugs <- dLGP(cole_arthropod_data_1946$arthropod_count_index,lambda1,lambda2)
#LGP Theoretical number of times you would observe k sowbugs (in new column)
cole_arthropod_data_1946$count_sowbugs_L_theoretical <- total_board_observations*L_theoretical_sowbugs
#Theta (or lambda2 in the workbook) is 0, as stated in the workbook. When lambda2 is = 0,
#then we get the same as the Poisson distribution
#making theoretical propbabilities with LGP
L_theoretical_sowbugs <- dLGP(cole_arthropod_data_1946$arthropod_count_index, theta = avg_sowbugs_per_obs, lambda = 0)
#LGP Theoretical number of times you would observe k sowbugs in new column
#cole_arthropod_data_1946$count_sowbugs_L_theoretical <- total_board_observations*L_theoretical_sowbugs
cole_arthropod_data_1946$count_sowbugs_p_theoretical <- total_board_observations*p_theoretical_sowbugs
plot_sowbug_counts(cole_arthropod_data_1946) |
6098fd6943e9e3fa47c27b5de5efc836019f5986 | 9dafb02b339a2adedfa854215e60d2a88d2fdcb0 | /class05/class05.R | d11dbe4c6f2b21a57a47245e44b8255efb27c396 | [] | no_license | rohkan/bimm143 | d2d8ba370a3585425af4836a49da6dfc2818143b | af1bacb013d3b08259697e99cc7626b908e92385 | refs/heads/master | 2020-04-04T06:36:22.188857 | 2018-12-04T18:13:18 | 2018-12-04T18:13:18 | 155,750,352 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,935 | r | class05.R | #Baby weight data input
weight <- read.table("bimm143_05_rstats/weight_chart.txt", header=TRUE)
#make a custom plot
plot(weight,type="o", main="Baby Weight with Age",pch=15, cex=1.5, lwd=2, ylim=c(2,10), xlab="Age (months)", ylab="Weight(kg)")
#1b bar plot
feature_count <- read.table("bimm143_05_rstats/feature_counts.txt", sep="\t", header=TRUE)
par(mar=c(3.1,11.1,4.1,2))
barplot(feature_count$Count, horiz= TRUE, ylab="", main="Mouse gene features", las=1, names.arg = feature_count$Feature, xlim=c(0,80000))
#2c histogram
hist(c(rnorm(10000), rnorm(10000)+4), breaks=80)
#look up cbind for boxplot binds things by
#section 3
m_f_counts<- read.table("bimm143_05_rstats/male_female_counts.txt", header=TRUE, sep="\t")
col=rainbow(nrow(m_f_counts))
par(mar=c(4,4,4,4))
barplot(m_f_counts$Count,col=col, names.arg=m_f_counts$Sample, las=2, ylab="counts", main="Male and Female Counts")
#version where male and femal colored differently
col_sep=c("blue", "red")
barplot(m_f_counts$Count,col=col_sep, names.arg=m_f_counts$Sample, las=2, ylab="counts", main="Male and Female Counts")
#sc
genes <- read.table("bimm143_05_rstats/up_down_expression.txt", header=TRUE, sep="\t")
#used palette() and levels(genes$State) to match see order and then match up desired colors
palette(c("blue", "gray", "red"))
plot(genes$Condition1, genes$Condition2, col=genes$State)
#color density
meth <- read.table("bimm143_05_rstats/expression_methylation.txt", header=TRUE, sep="\t")
dcols=densCols(meth$gene.meth, meth$expression)
plot(meth$gene.meth, meth$expression, col=dcols, pch=20)
inds <- meth$expression>0
dcols=densCols(meth$gene.meth[inds], meth$expression[inds])
plot(meth$gene.meth[inds], meth$expression[inds], col=dcols, pch=20)
# change colramp
dcols=densCols(meth$gene.meth[inds], meth$expression[inds], colramp = colorRampPalette(c("blue", "green", "red", "yellow")))
plot(meth$gene.meth[inds], meth$expression[inds], col=dcols, pch=20 )
|
9e9b279bb90009c48ae6c408cbca7b0b792bd979 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/relsurv/examples/rsmul.Rd.R | f9776f94cb41034f518a03c671b4d3c56c02da5b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 543 | r | rsmul.Rd.R | library(relsurv)
### Name: rsmul
### Title: Fit Andersen et al Multiplicative Regression Model for Relative
### Survival
### Aliases: rsmul
### Keywords: survival
### ** Examples
data(slopop)
data(rdata)
#fit a multiplicative model
#note that the variable year is given in days since 01.01.1960 and that
#age must be multiplied by 365.241 in order to be expressed in days.
fit <- rsmul(Surv(time,cens)~sex+as.factor(agegr),rmap=list(age=age*365.241),
ratetable=slopop,data=rdata)
#check the goodness of fit
rs.br(fit)
|
d0e56bda2cbfcbf8193e56fa1c7853e5250318be | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/reportr/examples/reportr.Rd.R | 6c40220e08277c7a8aec862f4829893fb14182e9 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 643 | r | reportr.Rd.R | library(reportr)
### Name: reportr
### Title: The reportr message reporting system
### Aliases: reportr OL setOutputLevel getOutputLevel withReportrHandlers
### ask report flag reportFlags clearFlags assert
### ** Examples
setOutputLevel(OL$Warning)
report(Info, "Test message") # no output
setOutputLevel(OL$Info)
report(Info, "Test message") # prints the message
flag(Warning, "Test warning") # no output
flag(Warning, "Test warning") # repeated warning
reportFlags() # consolidates the warnings and prints the message
## Not run:
##D name <- ask("What is your name?")
##D report(OL$Info, "Hello, #{name}")
## End(Not run)
|
459efdad6825fd3405f2df1a12c64cd6c8d0ce2f | 205e1e0a2e23f362b7987804ebe8e17a23ac6010 | /tests/testthat/test-apexcharter.R | c5aeef683b2b204919ca477bc97fd120e17f3c8c | [
"MIT"
] | permissive | dreamRs/apexcharter | 97f93ec61d2ad96f8bf2446fe50e2cb22f4824df | 11d244e9922a9abe41aee90124224d8f5cababa9 | refs/heads/master | 2023-06-22T11:11:57.709837 | 2023-06-14T12:05:06 | 2023-06-14T12:05:06 | 142,926,526 | 135 | 15 | NOASSERTION | 2023-03-22T15:30:53 | 2018-07-30T20:47:09 | R | UTF-8 | R | false | false | 328 | r | test-apexcharter.R | context("apexcharter")
test_that("apexchart works", {
ax <- apexchart(list())
expect_is(ax, "apexcharter")
})
test_that("add_locale_apex works", {
ax <- apexchart(list(chart = list(defaultLocale = "fr"))) %>%
add_locale_apex
expect_is(ax, "apexcharter")
expect_is(ax$x$ax_opts$chart$locales, "list")
})
|
5ba0134bd7d2e04880e6a45a8ee6f84214e63d55 | f00760356174e55eca3939bc18b4ee471cb5c7d1 | /code/03-descriptive-statistics.r | 0a7e8e44f3e032695565aba26817bc1a6d65cf83 | [
"MIT"
] | permissive | NetDem-USC/homepage-experiment | 14b93d7ebfec5253225c2e2a9cf1e51350bc4f7b | e5d205f28a315262cb3754433df3eb483643f8ad | refs/heads/main | 2023-04-02T22:17:20.514801 | 2021-03-29T20:26:04 | 2021-03-29T20:26:04 | 351,467,945 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,127 | r | 03-descriptive-statistics.r | #===============================================================================
# File: 03-descriptive-statistics.R
# Date: Feb 3, 2021
# Purpose: replicate appendix analyses: panel attrition, issue scaling, agenda
# setting, and compiling descriptive statistics
# Data In:
# ./data/survey_data.csv
# ./data/daily_pulse_data.csv
#===============================================================================
# PACKAGES
#===============================================================================
library(readr)
library(tidyverse)
library(estimatr)
library(ggplot2)
library(psych)
library(haven)
# devtools::install_github("hofnerb/papeR")
library(papeR)
source('code/functions.r')
# DATA
#===============================================================================
pulse <- read_csv("data/daily_pulse_data.csv")
svy <- read_csv("data/survey_data.csv")
# FIGURE 2: PANEL ATTRITION
#===============================================================================
# completes
cmp <- list()
w1_extra <- rep(TRUE, 212)
cmp[[1]] <- c(!is.na(svy$W1_endtime), w1_extra)
cmp[[2]] <- c(!is.na(svy$W2_endtime), !w1_extra)
cmp[[3]] <- c(!is.na(svy$endtime_w3), !w1_extra)
cmp[[4]] <- c(!is.na(svy$endtime), !w1_extra)
cmp[[5]] <- c(!is.na(svy$W5_endtime), !w1_extra)
cmp[[6]] <- c(!is.na(svy$W6_endtime), !w1_extra)
cmp[[7]] <- c(!is.na(svy$endtime_w7), !w1_extra)
cmp[[8]] <- c(!is.na(svy$endtime_w8), !w1_extra)
# how many completed waves 2, 3, 4?
(tab <- table(cmp[[2]] & cmp[[3]] & cmp[[4]]))
(1339 - 1037)/1339
res <- expand.grid(x = 1:8, y = 1:8, n = NA, mis = NA)
for (i in 1:8){
for (j in 1:8){
sl <- which(res$x==i & res$y==j)
if (i == j){
res$n[sl] <- sum(cmp[[i]])
res$mis[sl] <- 1 - mean(cmp[[i]])
}
if (i != j){
xy <- cmp[[i]][ cmp[[j]] ]
res$n[sl] <- sum(xy)
res$mis[sl] <- 1 - mean(xy)
}
}
}
res$label <- paste0(res$n, "\n(", display(res$mis, pct=TRUE), "%)")
res$label[res$x==res$y] <- paste0("N=", res$n[res$x==res$y])
p <- ggplot(res[res$x>=res$y,], aes(x=x, y=y, fill=mis))
pq <- p + geom_tile() + theme_linedraw() +
geom_text(aes(label=label), color="white", size=5) +
scale_fill_continuous("% attrition",
labels=scales::percent_format(accuracy=1)) +
scale_x_continuous("Respondent wave of comparison",
breaks=1:8, expand=c(0,0)) +
scale_y_continuous("Respondent's first wave", breaks=1:8,
expand=c(0,0)) +
theme(panel.grid = element_blank(),
panel.border = element_blank())
pq
ggsave(pq, file="graphs/appendix_fig2.pdf", width=10, height=6)
ggsave(pq, file="graphs/appendix_fig2.png", width=10, height=6)
# TABLE 4: ISSUE OPINIONS SCALE
#===============================================================================
psych::principal(svy %>% dplyr::select("policy1_gc_pre", "policy3_nafta_pre", "policy4_pp_pre",
"policy5_biz1_pre", "policy6_iso_pre", "policy8_biz2_pre", "policy7_ss",
"policy9_nk_pre", "policy10_harass_pre", "policy11_islam_pre", "policy12_cc_pre",
"policy13_fbi_pre", "policy14_imm_pre"),
nfactors = 2, rotate = "varimax", missing=TRUE, impute = "mean")$loadings
# TABLE 5: IMMIGRATION SCALE
#===============================================================================
principal(filter(svy, W3_PATA306_treatment_w3 == "Control") %>%
dplyr::select(policy14_imm, imm2, imm3),
nfactors = 2, rotate = "varimax")$loadings
# FIGURE 5: AGENDA-SETTING
#===============================================================================
# agenda setting
agendas <- array(NA, 21)
for(i in 1:21) {
tmp <- as.numeric(gsub(2, 1, (as.numeric(gsub(2, 0,
get(paste0("W2_PATA2_1_m_", i), svy))) +
as.numeric(gsub(2, 0, get(paste0("W3_PATA300_", i, "_w3"), svy))))))
agendas[i] <- mean(tmp[which(svy$partylean == "Democrat")], na.rm = TRUE) -
mean(tmp[which(svy$partylean == "Republican")], na.rm = TRUE)
}
df <- data.frame(
topics = c("Economy/unemployment", "Relationship with North Korea",
"Relationship with Western countries", "Intl trade imbalances",
"Immigration", "Terrorism", "Inequality",
"Racism", "Morality and values", "Health care",
"Crime", "Islam", "Fake news", "Political polarization",
"Donald Trump and his administration", "Gun control",
"Women's rights", "Identity politics", "Alt-right movement",
"Black Lives Matter", "Free speech"),
lean = agendas,
color = ifelse(agendas>0, "blue", "red")
)
df <- df[order(df$lean),]
df$topics <- factor(df$topics, levels=df$topics)
p <- ggplot(df, aes(x=topics, y=lean, fill=color))
pq <- p + geom_col() +
coord_flip() +
scale_fill_manual(values=c("blue", "red")) +
theme_minimal() +
geom_text(data=df[df$lean>0,],
aes(label=topics, x=topics, y=-0.01), hjust=1, size=3) +
geom_text(data=df[df$lean<0,],
aes(label=topics, x=topics, y=0.01), hjust=0, size=3) +
theme(axis.title.y = element_blank(),
axis.text.y = element_blank(),
legend.position="none",
panel.grid.major.y = element_blank()) +
scale_y_continuous("Partisan asymmetry in agenda setting, by topic")
pq
ggsave(pq, file="graphs/appendix_fig5.pdf", width=10, height=4)
ggsave(pq, file="graphs/appendix_fig5.png", width=10, height=4)
# TABLE 6: DESCRIPTIVE STATISTICS
#===============================================================================
# Dropping observations where treatment is missing
svy <- svy[!is.na(svy$W3_PATA306_treatment_w3),]
svy$age_labels <- cut(svy$age, breaks = c(min(svy$age, na.rm = TRUE), 29, 44, 59,
max(svy$age, na.rm = TRUE)),
labels = rev(c("60+", "45-59", "30-44", "18-29")),
include.lowest = TRUE, right = TRUE)
svy$pid3 <- as_factor(svy$W1_pid3)
svy$gender <- as_factor(svy$W1_gender)
svy$educ_factor <- as_factor(svy$W1_educ)
svy$raceeth <- as_factor(svy$raceeth)
print.xtable(xtable(papeR::summarize(svy,
variable.labels = c("Party ID", "Gender", "Race", "Education level", "Age group"),
type = "factor",
variables = c("pid3", "gender", "raceeth", "educ_factor", "age_labels"))),
include.rownames = FALSE, hline.after = c(3, 5, 9, 15),
only.contents = TRUE, include.colnames = FALSE)
# TABLES 7 & 8: COVARIATE BALANCE
#===============================================================================
# Dropping observations where treatment is missing
svy <- svy[!is.na(svy$W3_PATA306_treatment_w3),]
vars <- c("party7", "age", "agesq", "female", "raceeth", "educ",
"ideo", "income", "employ", "state", "polint", "freq_tv", "freq_np",
"freq_rad", "freq_net", "freq_disc", "log_news_pre", "diet_mean_pre")
dat <- svy %>% select(W3_PATA306_treatment_w3, W3_Browser_treatment_w3, vars[1:2], vars[4],
vars[6:8], vars[12:18]) %>%
filter(W3_PATA306_treatment_w3 != "HuffPost")
dat %>% select(-W3_PATA306_treatment_w3, -W3_Browser_treatment_w3) %>%
map(~ difference_in_means(.x ~ W3_PATA306_treatment_w3, blocks = W3_Browser_treatment_w3,
data = dat)) %>% map_df(tidy, .id = "var") %>%
select(var, estimate, p.value) %>% knitr::kable("latex", digits = 3,
caption = "Balance: Fox News treatment vs. Control")
dat <- svy[svy$W3_PATA306_treatment_w3 != "FoxNews",
c("W3_PATA306_treatment_w3", "W3_Browser_treatment_w3",
vars[1:2], vars[4], vars[6:8], vars[12:18])]
dat %>% select(-W3_PATA306_treatment_w3, -W3_Browser_treatment_w3) %>%
map(~ difference_in_means(.x ~ W3_PATA306_treatment_w3, blocks = W3_Browser_treatment_w3,
data = dat)) %>% map_df(tidy, .id = "var") %>%
select(var, estimate, p.value) %>% knitr::kable("latex", digits = 3,
caption = "Balance: HuffPost treatment vs. Control")
|
61bf0aca0c088cfc777b2f9a2ae6198cbe752ef6 | 033c440e58943c1b4092eb30398ed50c839093f2 | /man/TS_load_example_data.Rd | 75b5236a9c78f37b0c8da016670d797e3c27cb07 | [
"MIT"
] | permissive | Ylefol/TimeSeriesAnalysis | 462c9e0cf6f430ca2d92182a613e71768739453e | 2a612f23d451ec90dac354fd11a13b69ea680370 | refs/heads/master | 2023-07-06T10:05:29.277659 | 2023-06-29T07:50:57 | 2023-06-29T07:50:57 | 522,526,978 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 922 | rd | TS_load_example_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/object.R
\name{TS_load_example_data}
\alias{TS_load_example_data}
\title{Add counts and sample data of examples to a TimeSeriesObject}
\usage{
TS_load_example_data(time_object)
}
\arguments{
\item{time_object}{A timeseries object}
}
\value{
The timeseries object with the raw count matrix added to it as well as the sample data
}
\description{
A function takes an existing TimeSeriesObject and adds the specified
example data to the object. Added data is the count matrix and sample data
}
\examples{
TS_object <- new('TimeSeries_Object',
group_names=c('IgM','LPS'),group_colors=c("#e31a1c","#1f78b4"),DE_method='DESeq2',
DE_p_filter='padj',DE_p_thresh=0.05,DE_l2fc_thresh=1,
PART_l2fc_thresh=4,sem_sim_org='org.Hs.eg.db',Gpro_org='hsapiens')
TS_object <- TS_load_example_data(TS_object)
}
|
f9d76cf1ffc108de4de3788b134bf68e8591960c | aba27c2349e7d48cb704159997bd9aea582856dd | /Fifa_Analysis_Final | a056951a20989e396fe8bc1231020a1298c870a7 | [] | no_license | elcheffito/Advanced-Programming-Fifa-Analysis | 195b9a2c60dd7f05efcef524df5f76641dc4c55b | 34e8dd337fdd53f774a142b68dc75344e3181ba6 | refs/heads/master | 2020-11-25T21:38:33.283431 | 2019-12-20T22:21:13 | 2019-12-20T22:21:13 | 228,857,515 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,720 | Fifa_Analysis_Final | # Analysis of Football players' Fifa ratings based on their market value, wage and other characteristics
# 7,784: Skills: Programming with Advanced Computer Languages
# Alice Miceli - Dominik Nellen - Julian Staubli - Oliver Radon - Sandro Roth
# [FIFA 19 player dataset](https://www.kaggle.com/karangadiya/fifa19/)
# Install necessary packages
install.packages("styler")
install.packages("tidyverse")
install.packages("readxl")
install.packages("xts")
install.packages("zoo")
install.packages("dplyr")
install.packages("rmarkdown")
install.packages("ggplot2")
install.packages("tidyverse")
install.packages("plyr")
install.packages("stargazer")
install.packages("plot3D")
install.packages("plotly")
install.packages ("maps")
install.packages("WDI")
install.packages("countrycode")
install.packages("viridis")
install.packages("conflicted")
# Load the necessary packages
library("readxl")
library("zoo")
library("xts")
library("plyr")
library("dplyr")
library("styler")
library("rmarkdown")
library("ggplot2")
library("readr")
library("stargazer")
library("tidyverse")
library("plot3D")
library("plotly")
library("maps")
library("WDI")
library("countrycode")
library("conflicted")
require("maps")
require("viridis")
###########################################################################
###########################################################################
### ###
### 1: Import and tidy the data ###
### ###
###########################################################################
###########################################################################
# Please set working directory (setwd())
# Load the dataset
Fifa_data <- read_excel("data.xls")
# Tidy data
# Delete unnecessary columns (e.g. link to picture of player)
Fifa_data <- Fifa_data[,-c(1, 2, 5, 7, 11, 20, 21)]
# Delete players with no information on wage and value
Zero_Value <- which(Fifa_data$Value==0)
Zero_Wage <- which(Fifa_data$Wage==0)
Fifa_data <- Fifa_data[-Zero_Value, ]
Fifa_data <- Fifa_data[-Zero_Wage, ]
# Delete duplicate players
Fifa_data <- distinct(Fifa_data, Name, .keep_all = TRUE)
# Change column name "Overall" to "Player_Rating"
colnames(Fifa_data)[4] = "Player_Rating"
# Create new columns that show the players' market value in millions and their wage in thousands
Fifa_data$Market_Value <- Fifa_data$Value/1000000
Fifa_data$Wage_in_k <- Fifa_data$Wage/1000
###########################################################################
###########################################################################
### ###
### 2: Overview of relevant data ###
### ###
###########################################################################
###########################################################################
# Calculate the average, minimum and maximum of the players' ratings
mean_Player_Rating <- mean(Fifa_data$Player_Rating)
max_Player_Rating <- max(Fifa_data$Player_Rating)
min_Player_Rating <- min(Fifa_data$Player_Rating)
# Calculate the average, minimum and maximum of the players' market values
mean_Value <- mean(Fifa_data$Market_Value)
max_Value <- max(Fifa_data$Market_Value)
min_Value <- min(Fifa_data$Market_Value)
# Calculate the average, minimum and maximum of the players' wages
mean_Wage <- mean(Fifa_data$Wage_in_k)
max_Wage <- max(Fifa_data$Wage_in_k)
min_Wage <- min(Fifa_data$Wage_in_k)
# Calculate the quantiles and the standard deviation of the players' ratings
sd_Player_Rating <- sd(Fifa_data$Player_Rating)
quant_Player_Rating <- quantile(Fifa_data$Player_Rating, c(0.25, 0.5, 0.75))
# Calculate the quantiles and the standard deviation of the players' market values
sd_Value <- sd(Fifa_data$Market_Value)
quant_Value <- quantile(Fifa_data$Market_Value, c(0.25, 0.5, 0.75))
# Calculate the quantiles and the standard deviation of the players' wages
sd_Wage <- sd(Fifa_data$Wage_in_k)
quant_Wage <- quantile(Fifa_data$Wage_in_k, c(0.25, 0.5, 0.75))
# Gather and print the statistical data of players' ratings, market values (in millions) and wages (in thousands)
Data_Summary <- data.frame(Mean = c(mean_Player_Rating, mean_Value, mean_Wage),
Std = c(sd_Player_Rating, sd_Value, sd_Wage),
Min = c(min_Player_Rating, min_Value, min_Wage),
Q = rbind(quant_Player_Rating, quant_Value, quant_Wage),
Max = c(max_Player_Rating, max_Value, max_Wage))
Data_Summary
###########################################################################
###########################################################################
### ###
### 3: Data regression functions ###
### ###
###########################################################################
###########################################################################
# Distinguish the relationship between the players' ratings in Fifa and their market value
# Plot the players' ratings against the players' market values
plot(Fifa_data$Market_Value, Fifa_data$Player_Rating,
col = "darkblue",
type = "p",
cex = 0.5,
pch = 20,
cex.main = 0.9,
main = "Players' ratings and their market values",
xlab = "Market value (in millions)",
ylab = "Player rating")
# Define a linear, quadratic and cubic regression model
linear_model <- lm(Player_Rating ~ Market_Value, data = Fifa_data)
quadratic_model <- lm(Player_Rating ~ Market_Value + I(Market_Value^2), data = Fifa_data)
cubic_model <- lm(Player_Rating ~ poly(Market_Value, degree = 3, raw = TRUE), data = Fifa_data)
# Plot the linear regression line
abline(linear_model,
col = "red",
lwd = 2)
# Sort the players' according to their market value
order_id <- order(Fifa_data$Market_Value)
# Add the quadratic and cubic model regression line
lines(x = Fifa_data$Market_Value[order_id],
y = fitted(quadratic_model)[order_id],
col = "darkgreen",
lwd = 2)
lines(x = Fifa_data$Market_Value[order_id],
y = fitted(cubic_model)[order_id],
col = "violet",
lwd = 2)
# Define a linear-log model
linearlog_model <- lm(Player_Rating ~ log(Market_Value), data = Fifa_data)
# Add the linear-log model regression line
lines(Fifa_data$Market_Value[order_id],
fitted(linearlog_model)[order_id],
col = "darkred",
lwd = 2)
# Add a legend to the plot
legend("bottomright", legend=c("Linear regression",
"Quadratic regression",
"Cubic regression",
"Linear-log regression"),
col=c("red", "darkgreen", "violet", "darkred"), lwd = 2)
# Get the statistical summaries of the 4 regression models
summary(linear_model)
summary(quadratic_model)
summary(cubic_model)
summary(linearlog_model)
# The linear-log model has the highest R squared (0.8388) of all the 4 regression models.
# Therefore, it most adequately describes the relationship.
# Additionaly adding further variables can increase the explanatory power of the regression model
# Add players' ages and wages as additional variables to the linear-log regression model
Multi_linearlog_model <- lm(Player_Rating ~ log(Market_Value) + Age + Wage_in_k, data = Fifa_data)
summary(Multi_linearlog_model)
# Adding these variables increases the R squared to 0.957 (highly accurate regression model).
###########################################################################
###########################################################################
### ###
### 4: Analyze Swiss players ###
### ###
###########################################################################
###########################################################################
# Identify Swiss players and create a new column
Fifa_data$Swiss_Player <- as.character(Fifa_data$Nationality == "Switzerland")
Swiss <- Fifa_data$Swiss_Player == "TRUE"
# How good are the Swiss players rated in Fifa in an international comparison?
# Plot all players except the Swiss
plot(Fifa_data$Market_Value[-Swiss], Fifa_data$Player_Rating[-Swiss],
col = "green",
pch = 20,
cex = 0.5,
cex.main = 1.2,
xlim = c(0,120),
ylim = c(60,95),
main = "The ratings and market values of Swiss players",
xlab = "Market value (in millions)",
ylab = "Player's rating")
# Add the Swiss players in red color
points(Fifa_data$Market_Value[Swiss], Fifa_data$Player_Rating[Swiss],
pch = 4,
cex = 1,
col = "red")
# Add a legend to the plot
legend("bottomright", legend=c("Swiss players",
"Non Swiss players"),
col=c("red", "green"), pch = c(4, 20))
###########################################################################
###########################################################################
### ###
### 5: Analyze a specific country ###
### ###
###########################################################################
###########################################################################
# Identify players from a specific country and plot them
# Additionally plot all other players in a different color
# User can input a country of choice and see their ratings in international comparison
# Define a function that searches and gathers the players' rating and market value of a specified country (user input necessary)
marketvalue_by_Nationality <- function (Nationality_Players){
if(Nationality_Players %in% Fifa_data$Nationality) {
Fifa_data$country_players <- as.character(Fifa_data$Nationality == Nationality_Players)
Fifa_data$country_players
# Identify the players that match the input
id <- Fifa_data$country_players == "TRUE"
# Plot all players except the ones from the country input
plot(Fifa_data$Market_Value[-id], Fifa_data$Player_Rating[-id],
col = "yellow",
type = "p",
pch = 20,
cex = 0.5,
cex.main = 0.9,
main = "Payers' ratings and their market values",
xlab = "Market value in millions",
ylab = "Player rating")
# Add the players from the defined country to the plot in a different colour
points(Fifa_data$Market_Value[id], Fifa_data$Player_Rating[id],
pch = 4,
cex = 0.8,
col = "darkblue")
# Add a legend to the plot
legend("bottomright", legend=c(Nationality_Players,
"Other countries"),
col=c("darkblue", "yellow"), pch = c(4, 20))
}
# Request user to enter a valid input if initial input is invalid
else {stop("Please make a valid input")}
}
# User can change the input according to a country of choice (e.g. Croatia)
marketvalue_by_Nationality("Croatia")
###########################################################################
###########################################################################
### ###
### 6: 3D Analysis ###
### ###
###########################################################################
###########################################################################
# Generate a 3D analysis
# Analyze players' ratings, market value and wage of a specific country (user can enter input)
# Visualize the resulting values in a 3D plot
# Define a function that searches for the market values, the wages and ratings of players of a defined country (user can choose the country)
rating_by_Nationality <- function (Nationality_Players){
if(Nationality_Players %in% Fifa_data$Nationality) {
Fifa_data$country_players <- as.character(Fifa_data$Nationality == Nationality_Players)
Fifa_data$country_players
# Identify the players that match the input
id <- Fifa_data$country_players == "TRUE"
# Plot the players' ratings against the players' market values and wages
x <- Fifa_data$Market_Value[id]
y <- Fifa_data$Wage_in_k[id]
z <- Fifa_data$Player_Rating[id]
scatter3D(x, y, z, phi = 0, bty = "g", type = "h",
ticktype = "detailed", pch = 19, cex = 0.5,cex.lab=0.7, cex.axis=0.5,
xlab = "Value in millions", ylab = "Wage in thousands", zlab = "Player rating",
main = c(Nationality_Players, "Player rating, market value and wage analysis"), cex.main = 0.9)
}
# Request user to enter a valid input if initial input is invalid
else {stop("Please make a valid input")}
}
# Change input according to a country of choice (e.g. France)
rating_by_Nationality("France")
###########################################################################
###########################################################################
### ###
### 7: Boxplot of age distribution ###
### ###
###########################################################################
###########################################################################
# Analyze the median, the minimum and maximum age of a specific country (input can be chosen)
# Show the distribution of the age in an interactive boxplot
# Define a function that searches for the age of players of a defined country
Average_Age_by_Nationality <- function (Nationality_Players){
if(Nationality_Players %in% Fifa_data$Nationality) {
Fifa_data$country_players <- as.character(Fifa_data$Nationality == Nationality_Players)
Fifa_data$country_players
# Identify the players that match the input
id <- Fifa_data$country_players == "TRUE"
# Print min, max and mean age for the given country
print(min(Fifa_data$Age[id]))
print(mean(Fifa_data$Age[id]))
print(max(Fifa_data$Age[id]))
}
# Request user to enter a valid input if initial input is invalid
else {stop("Please make a valid input")}
# Plot the age distribution in an interactive boxplot
p <- plot_ly(data.frame(Fifa_data$Age[id]), type = "box", y = Fifa_data$Age[id],
color = I("red"), x = Nationality_Players, marker = list(color = "blue"))
p
}
# Change input according to a country of choice (e.g. Serbia)
Average_Age_by_Nationality("Serbia")
###########################################################################
###########################################################################
### ###
### 8: Calculate the highest wage per position ###
### ###
###########################################################################
###########################################################################
# Determine which player has the highest wage on a given position and what the wage is (input can be chosen)
# Define a function that searches for the wage of a player on a given position
max_wage_by_position <- function (Input_Position){
if(Input_Position %in% Fifa_data$Position) {
Fifa_data$right_position <- as.character(Fifa_data$Position == Input_Position)
Fifa_data$right_position
# Identify the players that match the input
id <- Fifa_data$right_position == "TRUE"
# Indentify the highest wage for the given input
max_id <- (max(Fifa_data$Wage_in_k[id], na.rm = TRUE))
# Identify the name of the player with the highest wage
find_row <- which(Fifa_data$Wage_in_k == max_id & Fifa_data$Position == Input_Position)
# Print max wage and the name of the player that matches the input position
print(max_id)
print(Fifa_data$Name[find_row])
}
# Request user to enter a valid input if initial input is invalid
else {stop("Please make a valid input")}
}
# Print all valid positions which exist and can be entered in function by the user
print(unique(Fifa_data$Position))
# Choose a position and find out which player has the highest wage and what the wage is (in thousands)
max_wage_by_position("LCM")
###########################################################################
### ###
### 9: Market value and wage per club ###
### ###
###########################################################################
###########################################################################
# Determine the average market value and age for a specifiy club (input can be chosen)
# Plot the players' market value and wage for a specific club
# Define a function that searches for the market value and the wage of players for a specific club
Value_wage_by_club <- function (Input_Club){
if(Input_Club %in% Fifa_data$Club) {
Fifa_data$right_club <- as.character(Fifa_data$Club == Input_Club)
Fifa_data$right_club
# Identify the players that match the input
id <- Fifa_data$right_club == "TRUE"
# Identify the average market value and wage for the specified club and round the number
value_id <- round((mean(Fifa_data$Market_Value[id], na.rm = TRUE)), digits = 1)
wage_id <- round((mean(Fifa_data$Wage_in_k[id], na.rm = TRUE)), digits = 1)
# Plot the players' market value and wage
plot(x = Fifa_data$Wage_in_k[id],
y = Fifa_data$Market_Value[id],
main = c(Input_Club, "Market value and wage"),
ylab = "Market value (in millions)",
xlab = "Wage (in thousands)",
ylim = c(0,120),
pch = 20,
col = "blue")
# Add a legend stating the averages
legend("topleft", legend = c(paste("Average market value: ", value_id, "millions"),
paste("Average wage: ", wage_id, "thousand")))
}
# Request user to enter a valid input if initial input is invalid
else {stop("Please make a valid input")}
}
# Choose a club to see the market values and the wages of the players (e.g. Arsenal)
Value_wage_by_club("Arsenal")
###########################################################################
###########################################################################
### ###
### 10: World map with average player ratings per country ###
### ###
###########################################################################
###########################################################################
# Give an overview of the average Fifa rating per country on a world map
# Rename de column "Nationality" to "region" in the Fifa data set,
# in order to match the name of the variable with the map_data("world") data set
colnames(Fifa_data)[3] = "region"
# Specify which package to use in case of conflict
conflict_prefer("mutate", "dplyr")
conflict_prefer("summarize", "dplyr")
# Change the country name in the Fifa_data to the same name as in the other data set
Score_Country <- Fifa_data %>%
mutate(region = ifelse(region == "United States", "USA", region)) %>%
mutate(region = ifelse(region == "China PR", "China", region)) %>%
mutate(region = ifelse(region == "England", "UK", region)) %>%
mutate(region = ifelse(region == "Wales", "UK", region)) %>%
mutate(region = ifelse(region == "Scotland", "UK", region)) %>%
mutate(region = ifelse(region == "Republic of Ireland", "Ireland", region)) %>%
mutate(region = ifelse(region == "Northern Ireland", "Ireland", region)) %>%
mutate(region = ifelse(region == "DR Congo", "Democratic Republic of the Congo", region)) %>%
mutate(region = ifelse(region == "Congo", "Republic of Congo", region)) %>%
mutate(region = ifelse(region == "Korea Republic", "South Korea", region)) %>%
mutate(region = ifelse(region == "Korea DPR", "North Korea", region)) %>%
mutate(region = ifelse(region == "Central African Rep.", "Central African Republic", region)) %>%
# Calculate the mean of the Fifa player rating per region
select(Player_Rating, region) %>%
group_by(region) %>%
summarize(
n = n(),
mean_Player_Rating = mean(Player_Rating, na.rm = TRUE)
) %>%
ungroup()
# Access the coordinates for each country
world_map <- map_data("world")
# Extract Antarctica as not relevant for this data set
world_map <- subset(world_map, region!="Antarctica")
# Left join world map with mean player rating per country
Average_Score.map <- left_join(Score_Country, world_map, by = "region")
# Create map with countries coloured by mean of the player rating
Average_Score_map <- ggplot(Average_Score.map, aes(map_id = region, fill = mean_Player_Rating)) +
geom_map(map = Average_Score.map, color = "black") +
expand_limits(x = Average_Score.map$long, y = Average_Score.map$lat) +
scale_fill_viridis_c(option = "C")
# Create empty map in order to display all countries (also the countries that do not have Fifa players)
Average_Score_map <- Average_Score_map + geom_map(dat=world_map, map = world_map,
aes(map_id=region), fill="white", color="black")
# Put filled world map over empty world map
Average_Score_map <- Average_Score_map + geom_map(map = world_map,
aes(map_id = region, fill = mean_Player_Rating),
colour = "black")
# Fit limits
Average_Score_map <- Average_Score_map + expand_limits(x = world_map$long, y = world_map$lat)
Average_Score_map
| |
9d6b910bb3b633ce13a77c0f0e15c417c0ff347e | 5183d8d039e97a90ed8044f3169ffc328f4e0903 | /plot3.R | 31a9d5e41fac293fac278763b101dde0b9f25baf | [
"MIT"
] | permissive | unistbig/CellEnrich | c0edc0cc93bcc18f97d2da85a6f604e3ea899fd5 | 27af32d461a867e780310a2b5c22f1d13bd6488f | refs/heads/master | 2023-06-23T17:17:46.580861 | 2020-07-23T12:58:23 | 2020-07-23T12:58:23 | 239,447,156 | 1 | 3 | NOASSERTION | 2023-06-19T00:50:38 | 2020-02-10T06:55:08 | R | UTF-8 | R | false | false | 5,610 | r | plot3.R | # plot3.R
# compare between methods
plot3 <- function(compare, Params){
Params <- c(Params, 'Fisher')
compare2 <- compare %>%
inner_join(
compare %>%
filter(Param %in% Params) %>%
group_by(Cell, Pathway) %>%
summarise(Count = n()) %>%
mutate(Count2 = ifelse(Count == 1, 'Unique', 'Duplicate')) %>%
select(-Count)
) %>%
filter(Param %in% Params) %>%
mutate(Param = ifelse(Count2 == 'Unique', Param, 'Intersect')) %>%
distinct() %>%
select(-Count2) %>%
group_by(Cell, Param) %>%
summarise(Count = n()) %>%
mutate(Count = ifelse(Param=='Fisher', -Count, Count)) %>%
mutate(Param = ifelse(!Param %in% c('Fisher', 'Intersect'), 'CellEnrich', Param))
maxV <- max(abs(compare2$Count))
gobj <- ggplot(compare2, aes(x = Cell, y = Count, fill = Param)) +
geom_bar(
stat = 'identity',
position = 'identity',
width = 0.6,
colour = '#2d3436'
) +
ylim(-50, 50) +
scale_fill_manual(values = c('#74b9ff', '#fdcb6e', '#00b894')) +
labs(title = paste(Params, collapse = ' VS '))
return(gobj)
}
# UNIQ FUNCTION
compare %>% inner_join(
compare %>%
filter(Param %in% c('Fisher', '0.5')) %>%
group_by(Pathway, Param) %>%
summarise(Count = n())
) %>%
filter(Count==1) %>%
mutate(Param = ifelse(Param=='Fisher', Param, 'CellEnrich')) %>%
select(-Count) %>%
write.csv(quote = FALSE, row.names = FALSE)
plot4 <- function(compare, Params){
Params <- c(Params, 'Fisher')
compare2 <- compare %>%
inner_join(
compare %>%
filter(Param %in% Params) %>%
group_by(Param, Pathway) %>%
summarise(Count = n()) %>%
mutate(Count2 = ifelse(Count == 1, 'Unique', 'Duplicate')) %>%
select(-Count)
) %>%
filter(Param %in% Params) %>%
mutate(Param = ifelse(grepl('Fisher', Param),Param, 'CellEnrich' )) %>%
mutate(Param = ifelse(Count2 == 'Unique', paste0(Param, 'U'), Param)) %>%
select(-Count2) %>%
group_by(Cell, Param) %>%
summarise(Count = n()) %>%
mutate(Count = ifelse(grepl('Fisher',Param), -Count, Count))
#
#compare2 <- compare %>%
#filter(Param %in% Params) %>%
#group_by(Cell, Param) %>%
#summarise(Count = n()) %>%
#mutate(Param = ifelse(Param=='Fisher', Param, 'CellEnrich')) %>%
#rbind(
#compare %>%
#inner_join(
#compare %>%
#filter(Param %in% Params) %>%
#group_by(Pathway, Param) %>%
#summarise(Count = n()) %>%
#mutate(Unique = ifelse(Count==1, 'Unique', 'Not')) %>%
#select(-Count)
#) %>%
#group_by(Cell, Param) %>%
#summarise(Count = n()) %>%
#mutate(Param = ifelse(Param=='Fisher', paste0(Param, 'U'), paste0('CellEnrich', 'U')))
#) %>%
#mutate(Count = ifelse(grepl('Fisher', Param), -Count, Count ))
maxV <- max(abs(compare2$Count))
gobj <- ggplot(compare2, aes(x = Cell, y = Count, fill = Param)) +
geom_bar(
stat = 'identity',
position = 'identity',
width = 0.6,
colour = '#2d3436'
) +
ylim(-50, 50) +
scale_fill_manual(values = c('#ffeaa7', '#fdcb6e', '#74b9ff', '#0984e3')) +
labs(title = paste(Params, collapse = ' VS '))
return(gobj)
}
plot3(compare, 0.5)
plot3(compare, 0.3)
plot3(compare, 0.1)
plot4(compare, 0.5)
plot4(compare, 0.3)
plot4(compare, 0.1)
# plot5 boxplot odd ratio?
compare <- read.csv('mixture.csv')
compare[which(compare$Cell=='NA?VE'),'Cell'] <- 'NAIVE'
plot5 <- function(value){
Params <- paste0( c('CELLENRICH', 'SCMERGE'), value)
compare2 <- compare %>%
inner_join(
compare %>%
filter(Param %in% Params) %>%
group_by(Cell, Pathway) %>%
dplyr::summarise(Count = n()) %>%
mutate(Count2 = ifelse(Count == 1, 'Unique', 'Duplicate')) %>%
select(-Count)
) %>%
filter(Param %in% Params) %>%
mutate(Param = ifelse(Count2 == 'Unique', Param, 'Intersect')) %>%
distinct() %>%
select(-Count2) %>%
group_by(Cell, Param) %>%
dplyr::summarise(Count = n()) %>%
mutate(Count = ifelse(Param == paste0('SCMERGE', value), -Count, Count)) %>%
mutate(Param = ifelse(!Param %in% c(paste0('SCMERGE', value), 'Intersect'), 'CellEnrich', Param))
maxV <- max(abs(compare2$Count))
gobj <- ggplot(compare2, aes(x = Cell, y = Count, fill = Param)) +
geom_bar(
stat = 'identity',
width = 0.6,
colour = '#2d3436'
) +
ylim(-50, 50) +
scale_fill_manual(values = c('#74b9ff', '#fdcb6e', '#00b894')) +
labs(title = paste(Params, collapse = ' VS '))
return(gobj)
}
plot5(0.1)
plot5(0.3)
plot5(0.5)
Cells <- unique(compare$Cell)
res <- data.frame()
for(i in 1:length(Cells)){
res <- rbind(res,
compare %>%
filter(grepl('0.1',Param)) %>%
inner_join(
compare %>%
filter(Cell==Cells[i]) %>%
filter(grepl('0.1',Param)) %>%
arrange(Pathway) %>%
group_by(Cell, Pathway) %>%
dplyr::summarise(Count = n()) %>%
filter(Count == 1) %>% select(-Count)
)
)
}
res %>%
mutate(Param = ifelse(grepl('CELLENRICH', Param), 'CellEnrich', 'scMerge')) %>%
arrange(Param) %>%
write.csv(quote = FALSE, row.names = FALSE, file = 'tmp.csv')
compare %>% inner_join(
compare %>%
filter(grepl('0.3', Param)) %>%
group_by(Pathway, Param) %>%
dplyr::summarise(Count = n())
) %>%
filter(Count==1) %>%
#mutate(Param = ifelse(Param=='Fisher', Param, 'CellEnrich')) %>%
select(-Count) %>%
write.csv(quote = FALSE, row.names = FALSE, file = 'tmp.csv')
|
f6ae5ed7f2f5c166b5b4a3eb79144c2cc543b9e8 | 2b5d2142b792c3499e351c39b4ea72a36354b1b2 | /tests/testthat/test-io-ext-params.R | 9da1b7621d2d1115ed0480f1f598000273d6943f | [] | no_license | tonyelhabr/teproj | 862037146090c0a0d4cfb82a076ee84254eda94d | 4c2332dbfa3aa5e972e179ca576730b7868a95cd | refs/heads/master | 2021-06-04T09:42:15.814399 | 2020-06-15T17:19:23 | 2020-06-15T17:19:23 | 116,820,254 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,346 | r | test-io-ext-params.R |
context("io-ext-params")
require("datasets")
suppressWarnings(require("tibble"))
test_that("import NSE", {
idx_1 <- 1.1
idx_2 <- 3.0
idxs <- c(idx_1:idx_2)
df <- data.frame(one = idxs, two = letters[idxs], stringsAsFactors = FALSE)
df2 <- tibble::as_tibble(df)
path <- export_ext_csv(df)
expect_true(file.exists(path))
expected <- df2
# expected$one <- as.integer(expected$one)
rm("df")
actual <- import_ext_csv(df)
# actual$one <- as.integer(actual$one)
# expect_equal(actual, expected)
expect_equivalent(actual, expected)
# expected <- df2
# expect_equal(actual, expected)
unlink(path)
})
test_that("backup", {
path <- export_ext_csv(iris)
unlink(path)
path_2 <- gsub("\\.csv", "_2\\.csv", path)
path <- export_ext(iris, ext = "csv", backup = TRUE, path_backup = path_2)
expect_true(file.exists(path))
expect_true(file.exists(path_2))
unlink(path)
unlink(path_2)
})
test_that("overwrite", {
path <- export_ext_csv(iris)
expect_true(file.exists(path))
path_2 <- export_ext(iris, ext = "csv", overwrite = TRUE)
expect_equal(path, path_2)
expect_true(file.exists(path))
unlink(path)
# unlink(path_2)
})
test_that("ggsave params", {
viz_iris <-
ggplot2::qplot(data = iris, x = Petal.Length, y = Petal.Width)
path <- export_ext_png(viz_iris)
unlink(path)
})
|
5b08f975151fee1a5024d1bb4265bfecd5a2bcab | 2bcc6e76ea7591d028f14e2cb24e8bc60d44959d | /man/compile_html_exercises.Rd | 5e4fd0d51a100af3317318bfe42dd67e1b418594 | [] | no_license | msperlin/afedR | 03e50bdbf53d3388d05c6a0d0da7d7e5d79c452c | 8bd5f365b6af0a10efb9c4c1883965bec34bc32c | refs/heads/master | 2022-09-10T16:55:50.490206 | 2022-09-01T17:55:48 | 2022-09-01T17:55:48 | 220,323,368 | 9 | 24 | null | null | null | null | UTF-8 | R | false | true | 1,762 | rd | compile_html_exercises.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exams_fcts_html.R
\name{compile_html_exercises}
\alias{compile_html_exercises}
\title{Compiles exercises from book afedR}
\usage{
compile_html_exercises(
students_names,
students_ids = paste0("Exam ", 1:length(students_names)),
class_name = "Sample class",
exercise_name = paste0("Sample Exercise"),
links_in_html = dplyr::tibble(text = "Analyzing Financial and Economic Data with R",
url = "https://www.msperlin.com/blog/publication/2020_book-afedr-en/"),
chapters_to_include = 1:13,
solution = FALSE,
dir_out = "html exams",
language = "en"
)
}
\arguments{
\item{students_names}{Names of students (a vector)}
\item{students_ids}{Ids of students (a vector)}
\item{class_name}{The name of the class}
\item{exercise_name}{The name of the exercises}
\item{links_in_html}{A dataframe with links to be added in the html page. This can
be anything that helps the students. The dataframe must have two columns: "text" with the text to
appear in the html and "url" with the actual link (see default options for details).}
\item{chapters_to_include}{Chapter to include in exercise (1-13)}
\item{dir_out}{Folder to copy exercise html files}
\item{language}{Selection of language ("en" only so far)}
}
\value{
TRUE, if sucessfull
}
\description{
This function uses the \link{exam} package to create exercises in the html or pdf format with
random selections. This means that each student will receive a different version of the same
exercise. All exercise files are taken from book "Analysing Financial and Economic Data with R".
}
\examples{
\dontrun{
afedR_build_exam(students_names = 'George', chapters_to_include = 2,
dir_out = tempdir())
}
}
|
b8fb200033b62b8bfaac42b8871f3208fda7ef91 | 9776cb29013107879510b64a1ab00fbf5b1a5fc6 | /other_plots.R | 66f43d63ca7fa2d39f3c3b8e125e48f1197c1337 | [] | no_license | tayrone/medulloblastoma_miscellaneous_analyses | 351a2f5493db10a6e6043e0793b0a353f2535cfc | d8fb28030fef0bd206c94ce28de7d307dd248975 | refs/heads/master | 2022-11-10T18:17:36.311080 | 2020-06-30T18:05:22 | 2020-06-30T18:05:22 | 262,129,751 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,116 | r | other_plots.R | library(ggplot2)
load("./interanalysis_files/rdata_files/5_dm_regulons.RData")
regulon_methylation <- function(x, threshold){
x <- as.data.frame(x)
regulon_elements <- x["in_regulon", ]
if((regulon_elements$dm/(regulon_elements$not_dm + regulon_elements$dm)) > threshold &
(regulon_elements$dm/(regulon_elements$not_dm + regulon_elements$dm)) <= (threshold + 0.05)){
return (TRUE)
}else{
return (FALSE)
}
}
plot_data <- data.frame(threshold = NULL, hm_regulons = NULL)
threshold <- seq(0, 0.95, 0.05)
for(i in threshold){
hm_regulons <- sapply(tables, regulon_methylation, threshold = i)
hm_regulons <- names(tables)[hm_regulons]
current_values <-
data.frame(threshold = i,
hm_regulons = length(intersect(hm_regulons, dm_regulons)))
plot_data <- rbind(plot_data, current_values)
}
ggplot(plot_data, aes(x = threshold, y = hm_regulons)) +
geom_col(orientation = "x", position = position_nudge(x = 0.025)) +
labs(x = "Proporção de elementos diferencialmente metilados, no regulon",
y = "Número de regulons") +
theme_minimal()
|
321c04a73b87dd8cda6cfad452c892fda2f6fedf | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/11562_0/rinput.R | 61b3bf5ee2cc5af2be3edbd7f99c47adfef56bb3 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("11562_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11562_0_unrooted.txt") |
4eaefbe2abcb154bcc9391681b49b4d193c6a024 | 1ca02e118c539347ac1cec3e12e4ded668d79f18 | /man/PISAShinyApp.Rd | b9721f85a748b222d906ea8abb70ebb16d73bc84 | [] | no_license | michaelgeobrown/RESEV552 | 6823654d20106668f95f82bb0da812b8df7cbecc | 790229196199b577ca53c3bc1287583cdef6d2ff | refs/heads/master | 2021-08-16T23:11:39.586865 | 2020-04-01T17:06:55 | 2020-04-01T17:06:55 | 145,609,608 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 857 | rd | PISAShinyApp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RESEV552.R
\name{PISAShinyApp}
\alias{PISAShinyApp}
\title{PISA Shiny App}
\format{Three Shiny Outputs about the Variable Choice for a Specific Dataset
\describe{
\item{Description}{A Brief Description of the Chosen Variable}
\item{Table}{A Table of the Responses for the Chosen Variable (Summary of the Repsonses if the Variable is Numeric)}
\item{Plot}{A Bar Plot of the 10 Most Popular Repsonses for the Chosen Variable (Histogram if the Variable is Numeric)}
\item{Plot}{A Bar Plot of the 10 Most Popular Repsonses for the Chosen Variable (Histogram if the Variable is Numeric)}
}}
\usage{
PISAShinyApp()
}
\value{
A Shiny App with choices of Dataset, Type of Variable and Specific Variable
}
\description{
This is a Shiny App which describes all of the PISA 2015 datasets
}
|
38ec5b3bd58effce6cc415d1883a6a11c42a3275 | 9a8c406d54c96421752ca2eb05421b2f5c49d2c0 | /plot1.R | 2a9d515c325a63af302cb998e97dc0da43843998 | [] | no_license | mgk2014/ExData_Plotting1 | 8ef8be73d8e9c17dcb85e44addf47ca4b3581212 | a99e5efafeebcaa1eeccf6266f99959dd222e199 | refs/heads/master | 2021-01-16T21:47:58.063396 | 2014-08-09T00:14:40 | 2014-08-09T00:14:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 719 | r | plot1.R | # plot1.R
# Coursera - course 4, Project 1
# Exploratary Data Analysis
# student: mgk2010
# github URL - https://github.com/mgk2014/ExData_Plotting1
#
# purpose: create plot 1 for the assignment. This plot leverages the common source
# helper script to read data-set filter it for the selected dates
#
# source the helper script
source("read_power_data.R")
# get the subset power data
selectedPowerData <- get_power_data();
# open a PNG device
png(file = "plot1.png")
# create the plot
with(selectedPowerData, hist(Global_active_power, col="red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power"))
# write the file
dev.off()
|
fa5b881d35296135027b9c2a657b5bd6a902042f | df5b20d6f0958f4e2d817cc2d17c1d7397235cf9 | /man/eWrapper.Rd | 1e7ea93cfcbe8b335e5b3f6625fb5c418f8ce7d7 | [] | no_license | joshuaulrich/IBrokers | 95e29522f1f9cd6bd2eb9a615b00c1b29aaa582a | ac8f12cff2f884044061fb458d4902372be881c4 | refs/heads/master | 2023-07-06T13:40:11.976460 | 2023-06-30T15:09:12 | 2023-06-30T15:09:12 | 32,220,781 | 65 | 61 | null | 2023-04-20T15:18:07 | 2015-03-14T16:23:55 | R | UTF-8 | R | false | false | 4,681 | rd | eWrapper.Rd | \name{eWrapper}
\alias{eWrapper}
\alias{eWrapper.MktData.CSV}
\alias{eWrapper.RealTimeBars}
\alias{eWrapper.RealTimeBars.CSV}
\alias{eWrapper.data}
\alias{eWrapper.MktDepth.CSV}
\title{ eWrapper Closure For Message Processing }
\description{
Create an eWrapper closure to allow for custom
incoming message management.
}
\usage{
eWrapper(debug = FALSE, errfile=stderr())
eWrapper.data(n)
eWrapper.MktData.CSV(n=1)
eWrapper.RealTimeBars.CSV(n=1)
}
\arguments{
\item{debug}{ should debugging be enabled }
\item{errfile}{ where error messages are directed (stderr) }
\item{n}{ number of contracts being watched }
}
\details{
\pkg{IBrokers} implements an eWrapper scheme similar
to that provided by the official Java API.
The general idea is that each real-time data
capture function must manage all incoming signals
correctly, while allowing for the end user to
create custom handlers for each specific event.
Internal to the \code{reqRealTimeBars},
\code{reqMktData}, and \code{reqMktDepth}
functions is a single call to the CALLBACK routine
passed to it. By default this is \code{twsCALLBACK} (see also).
A standard argument to this callback is an
eventWrapper --- which is an instance of eWrapper.
eWrapper is an \R closure that contains a list
of functions to manage all incoming message type, as
found in \code{.twsIncomingMSG}. Each message has a corresponding
function in the eWrapper designed
to handle the particular details of each incoming message type.
There is also an embedded environment in which data
can be saved and retrieved via a handful of accessor functions mimicking
the standard \R tools.
The data environment is \code{.Data}, with accessor
methods \code{get.Data}, \code{assign.Data}, and \code{remove.Data}.
These methods can be called from the closure object \code{eWrapper$get.Data},
\code{eWrapper$assign.Data}, etc.
The basic eWrapper call simply produces a visually informative
display of the incoming stream. E.g. bidSize data would be represented
with a \emph{bidSize} label, instead of the internal TWS code(s) returned
by the TWS.
By creating an instance of an eWrapper, accomplished by calling
it as a function call, one can then modify any or all the particular
methods embedded in the object.
This allows for rapid customization, as well as a built in assurance
that all incoming messages will be handled appropriately without
additional programmer time and resources.
An example of this ability to modify the object is given in
the \code{eWrapper.MktData.CSV} code. This object produces
output deisgned to be space efficient, as well as easily read back into
any R session as a standard CSV file.
Setting \code{debug=NULL} will cause empty function objects
to be created within the eWrapper object returned. This object
can be treated as a template to implement only the methods
that are needed. By default, all functions silently return
the entire message they would normally parse. This includes
\emph{empty} functions created by setting debug to NULL.
\code{eWrapper.data()} allows for data states to be maintained
from call to call, as an xts history of updates/messages is stored
within the object. This is designed to minimize calling overhead by
removing unneeded function calls from each message parsed.
Additional, but creating methods that update the internal environment
of the eWrapper object, it is possible to maintain a snapshot of
last k values for any field of interest. This is directly applicable to
implementing an automated strategy from within a custom \code{twsCALLBACK}
method.
}
\value{
A list of functions [and optionally data] to be used for the \code{eventWrapper} argument
to \code{reqMktData} and \code{reqMktDepth}
}
\author{ Jeffrey A. Ryan }
\note{
It is possible to also attach data to the closure object, allowing for
a single in-memory object to contain current top of book data. This is
exemplified in the \code{eWrapper.MktData.CSV} code, and can be extended
in the user's own direction.
}
\seealso{ \code{\link{twsCALLBACK}}, \code{\link{processMsg}} }
\examples{
myWrapper <- eWrapper()
str(myWrapper)
# remove tickPrice action
myWrapper$tickPrice <- function(msg, timestamp, file, ...) {}
# add new tickPrice action
myWrapper$tickPrice <- function(msg, timestamp, file, ...) { cat("tickPrice",msg) }
# add new data into the object, and retrieve
myWrapper$assign.Data("myData", 1010)
myWrapper$get.Data("myData")
\dontrun{
tws <- twsConnect()
reqMktData(tws, twsSTK("SBUX"))
reqMktData(tws, twsSTK("SBUX"), eventWrapper=myWrapper)
twsDisconnect(tws)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ utilities }
|
00009aa0c9d0a67199cbd216c9da26db947b1312 | 9213f0339c60c788fe9d9f1edce6bf48d183764f | /ANALYSES/MANHATTEN_PLOTS/eye/sliding_window_Eye_stringent.R | 3d2261f9892959bcbfeda3665902c30aac1c33f1 | [] | no_license | HullUni-bioinformatics/Diplotaxodon_twilight_RAD | d47ad9d29db831ee868ef95e59aeab445d921a3d | 7983db82847072fbb56193b522e57078e5b7e49a | refs/heads/master | 2021-01-12T05:52:29.795840 | 2017-07-31T12:00:27 | 2017-07-31T12:00:27 | 77,223,728 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 31,772 | r | sliding_window_Eye_stringent.R | #record the default parameters for plotting
default_par <- par()
# set working directory
setwd(dir = '/media/chrishah/STORAGE/RAD/popgen/sliding_window_plots/DIPLOTAXODON_FOR_PAPER/eye')
svg(filename = 'sliding_window_Eye_stringent.svg')
mat <- matrix(c(1,2,3,4,5,6,7), 7)
layout(mat, widths=c(1), heights=c(1,1,1,1,1,1,1))
par(mar=c(0.2, 0, 0, 0), oma = c(3,3,1,1))
# read files
Di_1_Di_2.tsv <- read.delim(file = 'Di_1-Di_2.tsv', header = T, sep = "\t")
pop = Di_1_Di_2.tsv
plot(71177, 0, axes=T, cex=0.5, ylab = "", xlab = "", ylim = c(0,1), xlim = c(71177/1000,18454481/1000), yaxt = 'n', xaxt = 'n', col = 'white')
axis(side = 2, at = c(0,0.5,1), labels = T, las=1, cex.axis=0.8)
scf <- 'scaffold_12'
rect(71177/1000+0.0,0,8693769/1000+0.0,1, col = 'white', border = 'NA')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(7628845/1000+0.0,0,7728845/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_39'
rect(80328/1000+8622.592,0,4871528/1000+8622.592,1, col = 'grey85', border = 'NA')
rect(1227431/1000+8622.592,0,1327431/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_148'
rect(43527/1000+13413.792,0,1696972/1000+13413.792,1, col = 'white', border = 'NA')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_159'
rect(23794/1000+15067.237,0,1637194/1000+15067.237,1, col = 'grey85', border = 'NA')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_197'
rect(29497/1000+16680.637,0,952411/1000+16680.637,1, col = 'white', border = 'NA')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_215'
rect(135232/1000+17603.551,0,850930/1000+17603.551,1, col = 'grey85', border = 'NA')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_12'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_39'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_148'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_159'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_197'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_215'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
abline(h=mean(pop$AMOVA.Fst), lty=5, lwd=1)
abline(h=0.5, lty=3, lwd=0.2)
abline(h=0, lty=1, lwd=0.2)
abline(h=1, lty=1, lwd=0.2)
# read files
Di_1_Di_4.tsv <- read.delim(file = 'Di_1-Di_4.tsv', header = T, sep = "\t")
pop = Di_1_Di_4.tsv
plot(71177, 0, axes=T, cex=0.5, ylab = "", xlab = "", ylim = c(0,1), xlim = c(71177/1000,18454481/1000), yaxt = 'n', xaxt = 'n', col = 'white')
axis(side = 2, at = c(0,0.5,1), labels = T, las=1, cex.axis=0.8)
scf <- 'scaffold_12'
rect(71177/1000+0.0,0,8693769/1000+0.0,1, col = 'white', border = 'NA')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(7628845/1000+0.0,0,7728845/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_39'
rect(80328/1000+8622.592,0,4871528/1000+8622.592,1, col = 'grey85', border = 'NA')
rect(1227431/1000+8622.592,0,1327431/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_148'
rect(43527/1000+13413.792,0,1696972/1000+13413.792,1, col = 'white', border = 'NA')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_159'
rect(23794/1000+15067.237,0,1637194/1000+15067.237,1, col = 'grey85', border = 'NA')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_197'
rect(29497/1000+16680.637,0,952411/1000+16680.637,1, col = 'white', border = 'NA')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_215'
rect(135232/1000+17603.551,0,850930/1000+17603.551,1, col = 'grey85', border = 'NA')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_12'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_39'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_148'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_159'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_197'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_215'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
abline(h=mean(pop$AMOVA.Fst), lty=5, lwd=1)
abline(h=0.5, lty=3, lwd=0.2)
abline(h=0, lty=1, lwd=0.2)
abline(h=1, lty=1, lwd=0.2)
# read files
Di_1_Di_5.tsv <- read.delim(file = 'Di_1-Di_5.tsv', header = T, sep = "\t")
pop = Di_1_Di_5.tsv
plot(71177, 0, axes=T, cex=0.5, ylab = "", xlab = "", ylim = c(0,1), xlim = c(71177/1000,18454481/1000), yaxt = 'n', xaxt = 'n', col = 'white')
axis(side = 2, at = c(0,0.5,1), labels = T, las=1, cex.axis=0.8)
scf <- 'scaffold_12'
rect(71177/1000+0.0,0,8693769/1000+0.0,1, col = 'white', border = 'NA')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(7628845/1000+0.0,0,7728845/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_39'
rect(80328/1000+8622.592,0,4871528/1000+8622.592,1, col = 'grey85', border = 'NA')
rect(1227431/1000+8622.592,0,1327431/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_148'
rect(43527/1000+13413.792,0,1696972/1000+13413.792,1, col = 'white', border = 'NA')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_159'
rect(23794/1000+15067.237,0,1637194/1000+15067.237,1, col = 'grey85', border = 'NA')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_197'
rect(29497/1000+16680.637,0,952411/1000+16680.637,1, col = 'white', border = 'NA')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_215'
rect(135232/1000+17603.551,0,850930/1000+17603.551,1, col = 'grey85', border = 'NA')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_12'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_39'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_148'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_159'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_197'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_215'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
abline(h=mean(pop$AMOVA.Fst), lty=5, lwd=1)
abline(h=0.5, lty=3, lwd=0.2)
abline(h=0, lty=1, lwd=0.2)
abline(h=1, lty=1, lwd=0.2)
# read files
Di_2_Di_4.tsv <- read.delim(file = 'Di_2-Di_4.tsv', header = T, sep = "\t")
pop = Di_2_Di_4.tsv
plot(71177, 0, axes=T, cex=0.5, ylab = "", xlab = "", ylim = c(0,1), xlim = c(71177/1000,18454481/1000), yaxt = 'n', xaxt = 'n', col = 'white')
axis(side = 2, at = c(0,0.5,1), labels = T, las=1, cex.axis=0.8)
scf <- 'scaffold_12'
rect(71177/1000+0.0,0,8693769/1000+0.0,1, col = 'white', border = 'NA')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(7628845/1000+0.0,0,7728845/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_39'
rect(80328/1000+8622.592,0,4871528/1000+8622.592,1, col = 'grey85', border = 'NA')
rect(1227431/1000+8622.592,0,1327431/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_148'
rect(43527/1000+13413.792,0,1696972/1000+13413.792,1, col = 'white', border = 'NA')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_159'
rect(23794/1000+15067.237,0,1637194/1000+15067.237,1, col = 'grey85', border = 'NA')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_197'
rect(29497/1000+16680.637,0,952411/1000+16680.637,1, col = 'white', border = 'NA')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_215'
rect(135232/1000+17603.551,0,850930/1000+17603.551,1, col = 'grey85', border = 'NA')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_12'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_39'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_148'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_159'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_197'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_215'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
abline(h=mean(pop$AMOVA.Fst), lty=5, lwd=1)
abline(h=0.5, lty=3, lwd=0.2)
abline(h=0, lty=1, lwd=0.2)
abline(h=1, lty=1, lwd=0.2)
# read files
Di_2_Di_5.tsv <- read.delim(file = 'Di_2-Di_5.tsv', header = T, sep = "\t")
pop = Di_2_Di_5.tsv
plot(71177, 0, axes=T, cex=0.5, ylab = "", xlab = "", ylim = c(0,1), xlim = c(71177/1000,18454481/1000), yaxt = 'n', xaxt = 'n', col = 'white')
axis(side = 2, at = c(0,0.5,1), labels = T, las=1, cex.axis=0.8)
scf <- 'scaffold_12'
rect(71177/1000+0.0,0,8693769/1000+0.0,1, col = 'white', border = 'NA')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(7628845/1000+0.0,0,7728845/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_39'
rect(80328/1000+8622.592,0,4871528/1000+8622.592,1, col = 'grey85', border = 'NA')
rect(1227431/1000+8622.592,0,1327431/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_148'
rect(43527/1000+13413.792,0,1696972/1000+13413.792,1, col = 'white', border = 'NA')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_159'
rect(23794/1000+15067.237,0,1637194/1000+15067.237,1, col = 'grey85', border = 'NA')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_197'
rect(29497/1000+16680.637,0,952411/1000+16680.637,1, col = 'white', border = 'NA')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_215'
rect(135232/1000+17603.551,0,850930/1000+17603.551,1, col = 'grey85', border = 'NA')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_12'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_39'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_148'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_159'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_197'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_215'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
abline(h=mean(pop$AMOVA.Fst), lty=5, lwd=1)
abline(h=0.5, lty=3, lwd=0.2)
abline(h=0, lty=1, lwd=0.2)
abline(h=1, lty=1, lwd=0.2)
# read files
Di_4_Di_5.tsv <- read.delim(file = 'Di_4-Di_5.tsv', header = T, sep = "\t")
pop = Di_4_Di_5.tsv
plot(71177, 0, axes=T, cex=0.5, ylab = "", xlab = "", ylim = c(0,1), xlim = c(71177/1000,18454481/1000), yaxt = 'n', xaxt = 'n', col = 'white')
axis(side = 2, at = c(0,0.5,1), labels = T, las=1, cex.axis=0.8)
scf <- 'scaffold_12'
rect(71177/1000+0.0,0,8693769/1000+0.0,1, col = 'white', border = 'NA')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(7628845/1000+0.0,0,7728845/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_39'
rect(80328/1000+8622.592,0,4871528/1000+8622.592,1, col = 'grey85', border = 'NA')
rect(1227431/1000+8622.592,0,1327431/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_148'
rect(43527/1000+13413.792,0,1696972/1000+13413.792,1, col = 'white', border = 'NA')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_159'
rect(23794/1000+15067.237,0,1637194/1000+15067.237,1, col = 'grey85', border = 'NA')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_197'
rect(29497/1000+16680.637,0,952411/1000+16680.637,1, col = 'white', border = 'NA')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_215'
rect(135232/1000+17603.551,0,850930/1000+17603.551,1, col = 'grey85', border = 'NA')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_12'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+0.0, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_39'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+8622.592, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_148'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+13413.792, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_159'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+15067.237, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_197'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+16680.637, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
scf <- 'scaffold_215'
sub <- subset(pop, pop$Chr == scf)
lines(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, type='b', lty=1, pch=20, cex = 0.8, lwd = 0.6)
points(sub$BP/1000+17603.551, sub$Smoothed.AMOVA.Fst, cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
abline(h=mean(pop$AMOVA.Fst), lty=5, lwd=1)
abline(h=0.5, lty=3, lwd=0.2)
abline(h=0, lty=1, lwd=0.2)
abline(h=1, lty=1, lwd=0.2)
# read files
Diplo_1M_smoothed_V_EY_D_p_TL_incl_support.txt <- read.delim(file = 'Diplo_1M_smoothed_V_EY_D_p_TL-incl_support.txt', header = T, sep = "\t")
pop = Diplo_1M_smoothed_V_EY_D_p_TL_incl_support.txt
plot(71177, 0, axes=T, cex=0.5, ylab = "", xlab = "", ylim = c(0,1), xlim = c(71177/1000,18454481/1000), yaxt = 'n', xaxt = 'n', col = 'white')
axis(side = 2, at = c(0,0.5,1), labels = T, las=1, cex.axis=0.8)
axis(side = 1, at = c(4382.473,11098.52,14284.0415,15897.731,17171.591,18096.632), labels = c(12,39,148,159,197,215), las=3, cex.axis=1)
scf <- 'scaffold_12'
scf
rect(71177/1000+0.0,0,8693769/1000+0.0,1, col = 'white', border = 'NA')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(7628845/1000+0.0,0,7728845/1000+0.0,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(3749847/1000+0.0,0,3849847/1000+0.0,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_39'
scf
rect(80328/1000+8622.592,0,4871528/1000+8622.592,1, col = 'grey85', border = 'NA')
rect(1227431/1000+8622.592,0,1327431/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(1721937/1000+8622.592,0,1821937/1000+8622.592,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_148'
scf
rect(43527/1000+13413.792,0,1696972/1000+13413.792,1, col = 'white', border = 'NA')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(493741/1000+13413.792,0,593741/1000+13413.792,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_159'
scf
rect(23794/1000+15067.237,0,1637194/1000+15067.237,1, col = 'grey85', border = 'NA')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(544470/1000+15067.237,0,644470/1000+15067.237,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_197'
scf
rect(29497/1000+16680.637,0,952411/1000+16680.637,1, col = 'white', border = 'NA')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(75383/1000+16680.637,0,200472/1000+16680.637,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_215'
scf
rect(135232/1000+17603.551,0,850930/1000+17603.551,1, col = 'grey85', border = 'NA')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#a6f7baff', border = '#a6f7baff', lwd = '0.5')
rect(526905/1000+17603.551,0,626905/1000+17603.551,1, col = '#16e74dff', border = '#16e74dff', lwd = '0.5')
scf <- 'scaffold_12'
sub <- subset(pop, pop$chrom == scf)
#points(sub$bp/1000+0.0, sub$avg_rank_rel, col='grey',cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
#points(sub$bp/1000+0.0, sub$avg_rank_rel*(1-(sub$std_rank/5892.5)), col='blue', cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
lines(sub$bp/1000+0.0, sub$avg_rank_rel_smoothed*(1-(sub$std_rank/5892.5)), type='b', col='black', lty=1, pch=20, cex = 0.8, lwd = 1)
#lines(sub$bp/1000+0.0, sub$avg_rank_rel, type='b', col='red', lty=1, pch=20, cex = 0.8, lwd = 0.6)
scf <- 'scaffold_39'
sub <- subset(pop, pop$chrom == scf)
#points(sub$bp/1000+8622.592, sub$avg_rank_rel, col='grey',cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
#points(sub$bp/1000+8622.592, sub$avg_rank_rel*(1-(sub$std_rank/5892.5)), col='blue', cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
lines(sub$bp/1000+8622.592, sub$avg_rank_rel_smoothed*(1-(sub$std_rank/5892.5)), type='b', col='black', lty=1, pch=20, cex = 0.8, lwd = 1)
#lines(sub$bp/1000+8622.592, sub$avg_rank_rel, type='b', col='red', lty=1, pch=20, cex = 0.8, lwd = 0.6)
scf <- 'scaffold_148'
sub <- subset(pop, pop$chrom == scf)
#points(sub$bp/1000+13413.792, sub$avg_rank_rel, col='grey',cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
#points(sub$bp/1000+13413.792, sub$avg_rank_rel*(1-(sub$std_rank/5892.5)), col='blue', cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
lines(sub$bp/1000+13413.792, sub$avg_rank_rel_smoothed*(1-(sub$std_rank/5892.5)), type='b', col='black', lty=1, pch=20, cex = 0.8, lwd = 1)
#lines(sub$bp/1000+13413.792, sub$avg_rank_rel, type='b', col='red', lty=1, pch=20, cex = 0.8, lwd = 0.6)
scf <- 'scaffold_159'
sub <- subset(pop, pop$chrom == scf)
#points(sub$bp/1000+15067.237, sub$avg_rank_rel, col='grey',cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
#points(sub$bp/1000+15067.237, sub$avg_rank_rel*(1-(sub$std_rank/5892.5)), col='blue', cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
lines(sub$bp/1000+15067.237, sub$avg_rank_rel_smoothed*(1-(sub$std_rank/5892.5)), type='b', col='black', lty=1, pch=20, cex = 0.8, lwd = 1)
#lines(sub$bp/1000+15067.237, sub$avg_rank_rel, type='b', col='red', lty=1, pch=20, cex = 0.8, lwd = 0.6)
scf <- 'scaffold_197'
sub <- subset(pop, pop$chrom == scf)
#points(sub$bp/1000+16680.637, sub$avg_rank_rel, col='grey',cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
#points(sub$bp/1000+16680.637, sub$avg_rank_rel*(1-(sub$std_rank/5892.5)), col='blue', cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
lines(sub$bp/1000+16680.637, sub$avg_rank_rel_smoothed*(1-(sub$std_rank/5892.5)), type='b', col='black', lty=1, pch=20, cex = 0.8, lwd = 1)
#lines(sub$bp/1000+16680.637, sub$avg_rank_rel, type='b', col='red', lty=1, pch=20, cex = 0.8, lwd = 0.6)
scf <- 'scaffold_215'
sub <- subset(pop, pop$chrom == scf)
#points(sub$bp/1000+17603.551, sub$avg_rank_rel, col='grey',cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
#points(sub$bp/1000+17603.551, sub$avg_rank_rel*(1-(sub$std_rank/5892.5)), col='blue', cex=0.8, pch=20, ylab = "", xlab = "", yaxt = 'n', xaxt = 'n')
lines(sub$bp/1000+17603.551, sub$avg_rank_rel_smoothed*(1-(sub$std_rank/5892.5)), type='b', col='black', lty=1, pch=20, cex = 0.8, lwd = 1)
#lines(sub$bp/1000+17603.551, sub$avg_rank_rel, type='b', col='red', lty=1, pch=20, cex = 0.8, lwd = 0.6)
abline(h=0.344191087299, lty=5, lwd=1)
abline(h=0.95, lty=3, lwd=0.2)
abline(h=0.5, lty=3, lwd=0.2)
abline(h=0, lty=1, lwd=0.2)
abline(h=1, lty=1, lwd=0.2)
dev.off()
#reset to default
par(default_par) |
32d7345cc5ef91c545e97daa1ce739619762593a | 50d0854fdd9750de3a2dc54233ed0ccfbb3d6735 | /data_processing_lj.R | 1ae8110226c7d37719606c11348b12b75c60ee2e | [] | no_license | leahrjones/workforce_data_team | 1ec38d64fb5ad8eaef50d2aca2c6cac05d68713e | 6836eef713d5f14a45d34539cbec802f19040fb8 | refs/heads/master | 2023-03-31T01:00:07.876751 | 2021-04-02T01:03:46 | 2021-04-02T01:03:46 | 352,823,989 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,326 | r | data_processing_lj.R | # load packages -----------------------------------
library(readxl)
library(readr)
library(dplyr)
library(janitor)
library(here)
library(purrr)
library(glue)
library(lubridate)
# pull data from data.ca.gov, this may take a few minutes
# do we need type_convert?
all_years_5102 <- readr::read_csv(url('https://data.ca.gov/dataset/e620a64f-6b86-4ce0-ab4b-03d06674287b/resource/aba87ad9-f6b0-4a7e-a45e-d1452417eb7f/download/calhr_5102_statewide_2011-2020.csv')) %>%
type_convert()
# !!!!!!!!!!!!!!! ENTER THE RANGE OF YEARS WITH 5102 REPORTS !!!!!!!!!!!!!!!
# IF YOU JUST WANT ONE YEAR, INPUT THAT ONE YEAR AS FIRST AND SECOND
first_year <- 2011
second_year <- 2011
# creates date values
first_date <- first_year %>% glue('-01-01')
second_date <- second_year %>% glue('-12-31')
year_range <- as.Date(as.Date(first_date):as.Date(second_date), origin="1970-01-01")
# save the original column names - may want to revert back to these when saving the output file
names_all_5102_report <- names(all_years_5102)
# clean up the column names to make them easier to work with in R
all_years_5102 <- all_years_5102 %>%
clean_names()
# check the number of NAs in the original dataset (to be sure there's a value for each record)
# this should come out as 0
sum(is.na(all_years_5102$as_of_date))
# filters for the years you want to view
my_years_5102 <- all_years_5102 %>% filter(between(as_of_date, as.Date(first_date), as.Date(second_date)))
View(my_years_5102)
# write the processed data to a new file -----------------------------------
# revert back to the original names
# (assuming that we want the output dataset to have the same column names as the source datasets)
names(my_years_5102) <- names_all_5102_report
# write the data to the '03_data_processed' folder
# NOTE: writing the data to a gzip file rather than a regular csv to save space - you can
# read/write using this format directly with R using the readr package, and you can extract
# it to a regular csv using 7zip (or some other software)
write_csv(x = my_years_5102,
file = "my_years_5102.csv",
col_names = TRUE)
# also writing a copy of the data directly to the shiny folder, since all of the code/data for
# the app needs to be contained within a single folder in order to load to shinyapps.io
#if they want to do this multiple times, would they just have to change it themselves? or a for loop?
dir.create("calhr_5102_shiny")
write_csv(x = my_years_5102,
file = here('calhr_5102_shiny',
glue('calhr_5102_',
first_year,
'_to_',
second_year,
'.csv')))
#DONE!
#left to do:
#check our data outputs to make sure it's right
#answer unanswered questions in above comments
#BELOW IS DAVID'S ORIGINAL CODE
#---------------------------------------------------------------
#convert csv into a readable data frame
#df_5102_report <- map_df(.x = year_range,
# .f = ~ all_years_5102,
# col_types = 'text') %>%
# type_convert()
#it works, but there are 10X as many observations... huh???
#it seems to be repeating... why??
#upon further thought, I don't think we need this code anymore... just go straight to sorting...
###below is David's original df code
# read data into R -----------------------------------
#df_5102_report <- map_df(.x = year_range,
# .f = ~ read_excel(here('02_data_raw',
# glue('calhr-5102-statewide-', .x, '.xlsx')),
# col_types = 'text')) %>%
# type_convert()
# to check an individual year's file
# year <- 2019
# df_year <- read_excel(path = here('02_data_raw', glue('calhr-5102-statewide-', year, '.xlsx')),
# col_types = 'text') #%>%
# #type_convert()
# head(df_year) # view the first couple of records
# tail(df_year) # view the last couple of records
# re-format data -----------------------------------
# fix dates
# check the number of NAs in the original dataset (to be sure there's a value for each record)
#sum(is.na(df_5102_report$as_of_date))
# convert the dates (it's okay if there are warning messages from this step, as long as the checks below look okay)
#df_5102_report <- df_5102_report %>%
# mutate(as_of_date = case_when(!is.na(mdy(as_of_date)) ~ mdy(as_of_date),
# !is.na(excel_numeric_to_date(as.numeric(as_of_date))) ~
# excel_numeric_to_date(as.numeric(as_of_date)),
# TRUE ~ NA_Date_))
# check to make sure the conversion worked
#sum(is.na(df_5102_report$as_of_date)) # should be the same as the number above, probably zero
#range(df_5102_report$as_of_date) # check to make sure the new dates are within the correct range
# write the processed data to a new file -----------------------------------
# revert back to the original names (assuming that we want the output dataset to have the same column names as the source datasets)
#names(df_5102_report) <- names_df_5102_report
# write the data to the '03_data_processed' folder
# NOTE: writing the data to a gzip file rather than a regular csv to save space - you can
# read/write using this format directly with R using the readr package, and you can extract
# it to a regular csv using 7zip (or some other software)
#write_csv(x = df_5102_report,
# file = here('03_data_processed',
# glue('calhr_5102_statewide_',
# year_range[1],
# '-',
# year_range[length(year_range)],
# '.csv.gz')))
# also writing a copy of the data directly to the shiny folder, since all of the code/data for
# the app needs to be contained within a single folder in order to load to shinyapps.io
#write_csv(x = df_5102_report,
# file = here('05_shiny_app',
# 'data',
# glue('calhr_5102_statewide_',
# year_range[1],
# '-',
# year_range[length(year_range)],
# '.csv.gz')))
|
1d714b4cdc284b5c037fc2ea89fde6e6fe532179 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/vosonSML/examples/CollectDataTwitter.Rd.R | 935bb461eb492cb8890b8dc8534dffc1f4a7c239 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,359 | r | CollectDataTwitter.Rd.R | library(vosonSML)
### Name: CollectDataTwitter
### Title: Note: this function is DEPRECATED and will be removed in a
### future release. Please use the 'Collect' function
### Aliases: CollectDataTwitter
### Keywords: SNA data mining twitter
### ** Examples
## Not run:
##D # Firstly specify your API credentials
##D my_api_key <- "1234567890qwerty"
##D my_api_secret <- "1234567890qwerty"
##D my_access_token <- "1234567890qwerty"
##D my_access_token_secret <- "1234567890qwerty"
##D
##D # Authenticate with the Twitter API using \code{AuthenticateWithTwitterAPI}
##D AuthenticateWithTwitterAPI(api_key=my_api_key, api_secret=my_api_secret,
##D access_token=my_access_token, access_token_secret=my_access_token_secret)
##D
##D # Collect tweets data using \code{myTwitterData}
##D myTwitterData <- CollectDataTwitter(searchTerm="#auspol",
##D numTweets=150,writeToFile=FALSE,verbose=FALSE)
##D
##D # Create an 'actor' network using \code{CreateActorNetwork}
##D g_actor_twitter <- CreateActorNetwork(myTwitterData)
##D
##D # Create a 'bimodal' network using \code{CreateBimodalNetwork}
##D g_bimodal_twitter <- CreateBimodalNetwork(myTwitterData)
##D
##D # Create a 'semantic' network using \code{CreateSemanticNetwork}
##D g_semantic_twitter <- CreateSemanticNetwork(myTwitterData)
##D
## End(Not run)
|
0de35de110d6ef2897954fbbdadd06f2a439fa78 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sidier/examples/pop.dist.Rd.R | 79349b7529a609d340248565c3d7ddd9ca68ae9b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,347 | r | pop.dist.Rd.R | library(sidier)
### Name: pop.dist
### Title: Distances among populations
### Aliases: pop.dist
### ** Examples
cat(" H1 H2 H3 H4 H5",
"Population1 1 2 1 0 0",
"Population2 0 0 0 4 1",
"Population3 0 1 0 0 3",
file = "4_Example3_HapPerPop_Weighted.txt", sep = "\n")
cat("H1 H2 H3 H4 H5",
"H1 0 1 2 3 1",
"H2 1 0 3 4 2",
"H3 2 3 0 1 1",
"H4 3 4 1 0 2",
"H5 1 2 1 2 0",
file = "4_Example3_IndelDistanceMatrixMullerMod.txt", sep = "\n")
example3_2 <- read.table("4_Example3_IndelDistanceMatrixMullerMod.txt"
,header=TRUE)
# Checking row names to estimate NameIniHaplotypes,NameEndHaplotypes:
row.names(read.table(file="4_Example3_IndelDistanceMatrixMullerMod.txt"))
## [1] "H1" "H2" "H3" "H4" "H5" NameIniHaplotypes=1. NameEndHaplotypes=2
# Checking row names to estimate NameIniPopulations, and NameEndPopulations
row.names(read.table(file="4_Example3_HapPerPop_Weighted.txt"))
## [1] "Population1" "Population2" "Population3"
## NameIniPopulations=1 NameEndPopulations =11
# Reading files. Distance matrix must contain haplotype names. Abundance
# matrix must contain both, haplotype and population names:
pop.dist (DistFile="4_Example3_IndelDistanceMatrixMullerMod.txt",
HaploFile="4_Example3_HapPerPop_Weighted.txt", outType="O",
NameIniHaplotypes=1,NameEndHaplotypes=2,NameIniPopulations=1,
NameEndPopulations=11)
|
5861f7105c17c4e786f428549f9615809cbf4ece | 86cb2d9a9c8aab4cfe59493d3a187a239451efd7 | /plots/plotNovelJunction.R | 98de499b243aa73047f27355c8a62df646d5e8f5 | [] | no_license | wckdouglas/tgirtERCC | 198878608cb9480847a907f7d22f5f234e791077 | fd807759c158b24d56a282bdbde3313406d9a2c1 | refs/heads/master | 2021-01-17T11:34:30.967484 | 2017-03-10T19:36:45 | 2017-03-10T19:36:45 | 41,743,074 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,047 | r | plotNovelJunction.R | #!/usr/bin/env Rscript
library(readr)
library(dplyr)
library(tidyr)
library(stringr)
library(cowplot)
library(R.utils)
library(stringi)
library(tgirtABRF)
datapath <- '/Users/wckdouglas/cellProject/result/junction'
filename <- 'novelJunctionCounts_old.tsv'
figurepath <- '/Users/wckdouglas/cellProject/figures'
assignCat <- function(x){
ifelse(x == 'Novel','Unannotated splice junctions',
ifelse(x == 'Sense','Annotated splice junctions','Antisesne to annotated splice junctions'))
}
df <- datapath %>%
str_c(filename,sep='/') %>%
read_tsv(col_names=c('name','categories','count')) %>%
spread(categories,count) %>%
mutate(antisense = allKnown - sense ) %>%
select(-allKnown) %>%
gather(categories,count,-name) %>%
mutate(categories = capitalize(categories)) %>%
mutate(prep = getPrep(name) ) %>%
mutate(templ = getTemplate(name)) %>%
mutate(repl = getReplicate(name)) %>%
mutate(name = paste0(templ,repl)) %>%
select(-templ,-repl) %>%
group_by(name,prep) %>%
do(data.frame(categories = .$categories,
count = .$count*100/sum(.$count))) %>%
mutate(categories = assignCat(categories)) %>%
# mutate(categories = factor(categories,levels=unique(categories))) %>%
mutate(categories = factor(categories,levels = c('Unannotated splice junctions',
'Antisesne to annotated splice junctions',
'Annotated splice junctions'))) %>%
tbl_df
p <- ggplot(data=df,aes(x=name,y=count,
fill=factor(categories,levels=rev(levels(categories))),
order=factor(categories,levels = (levels(categories))))) +
geom_bar(stat='identity') +
facet_grid(.~prep,scale='free_x',space='free_x') +
theme(axis.text.x = element_text(angle=90,face='bold',color='black',vjust=0.5,hjust=1)) +
theme(strip.text.x = element_text(face='bold',color='black')) +
theme(text = element_text(size=20)) +
theme(legend.position='bottom')+
labs(x= ' ',y='Percentage',fill='Junction Type')
figurename <- str_c(figurepath,'junctionType.pdf',sep='/')
ggsave(p,file=figurename,width=10)
message('Plotted ', figurename)
|
9c86d78efbc8b202110cc8e7c74f9b1d66a60050 | 1db8664321f34ba8981a713e998d4226d6084ece | /R Files/PaleoResilience_RunSimulations_ 2018-11-14.R | cf4f4b3beb738449f723e95af246a22503d9d85c | [] | no_license | allisonstegner/PaleoResilience | 00ef9e69eafee7c968e0f5ad08111f72c5a7dd5e | fecffc804be691443f9b1ef2edf8c9b109450f42 | refs/heads/master | 2020-03-18T04:42:33.530725 | 2018-12-12T00:07:35 | 2018-12-12T00:07:35 | 134,301,653 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,047 | r | PaleoResilience_RunSimulations_ 2018-11-14.R | #######################################
# Paleoecological Resilience Indicator functions
# Stegner et al.
# updated 08 July 2018
#######################################
source("Grass-Wood model 30June2018.R")
source("PaleoResilience functions 07July2018.R")
# Set Grass-Wood model parameters________________________
h = 0.5
r=0.25
delta_t = 1
gens = 10000
K_Start = 1
K_Pulse_amt = -0.4
pulse_time = 1000
sigma_sd = 0.005
V0 = 1
beta_ps<-estBetaParams(mu=0.15, var=0.015)
phi = 0.05
# Set taphonomic parameters______________________________
exRStime=6000
nreps=5
nsamp=200
AC.buff=0.1
samp.freq2=0.4
steps<-c(1,1,1,1)
a0=2
a1=0.025
sd.pct=0.05
AC.samp=0.4
# Run time series iterations________________________________
# linear (5yr/cm)
TAtop=5
TAbottom=5
agemodel="linearTA"
windows<-c(2500,600,50,50)
# run simulations for gradually-forced critical transitions
system.time(Xct1<-rep.ews(TStype="TSct",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0,a1,cutoff=5000,cutoff2=2000,trim.type="to.RS",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
# run simulations for gradually-forced non-critical transitions
system.time(Xdc1<-rep.ews(TStype="TSdc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.set.bounds",start=1000,q=1,sd.pct=sd.pct,AC.samp=AC.samp))
# run simulations for abruptly-forced critical transitions
system.time(Xrs1<-rep.ews(TStype="TSrs",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.RS",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
# run simulations for no change scenario
system.time(Xnc1<-rep.ews(TStype="TSnc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.set.bounds",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
# linear (20yr/cm) #
TAtop=20
TAbottom=20
agemodel="linearTA"
windows<-c(2500,150,50,50)
system.time(Xct2<-rep.ews(TStype="TSct",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0,a1,cutoff=5000,cutoff2=2000,trim.type="to.RS",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(Xdc2<-rep.ews(TStype="TSdc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.set.bounds",start=1000,q=1,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(Xrs2<-rep.ews(TStype="TSrs",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.RS",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(Xnc2<-rep.ews(TStype="TSnc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.set.bounds",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
# broken stick 1 #
TAtop=5
TAbottom=20
breakpoint=2500
agemodel="brokenstick"
windows<-c(2500,400,50,50)
system.time(Xct3<-rep.ews(TStype="TSct",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0,a1,cutoff=5000,cutoff2=2000,trim.type="to.RS",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(Xdc3<-rep.ews(TStype="TSdc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.set.bounds",start=1000,q=1,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(Xrs3<-rep.ews(TStype="TSrs",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.RS",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(Xnc3<-rep.ews(TStype="TSnc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.set.bounds",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
# broken stick 2 #
TAtop=5
TAbottom=20
breakpoint=4000
agemodel="brokenstick"
windows<-c(2500,300,50,50)
system.time(Xct4<-rep.ews(TStype="TSct",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0,a1,cutoff=5000,cutoff2=2000,trim.type="to.RS",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(Xdc4<-rep.ews(TStype="TSdc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.set.bounds",start=1000,q=1,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(Xrs4<-rep.ews(TStype="TSrs",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.RS",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(Xnc4<-rep.ews(TStype="TSnc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff=5000,cutoff2=2000,trim.type="to.set.bounds",start=1000,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
# Plot Figure 4 (SD for each age model)_____________________________________
# settings for Figures 4 and 5
colorCT<-rgb(0.1,0.3,0.4,1)
colorDC<-rgb(0.1,0.3,0.4,0.7)
colorRS<-rgb(0.1,0.3,0.4,0.5)
colorNC<-rgb(0.1,0.3,0.4,0.3)
mains2<-c("","","","")
ind="sd"
dev.new(width=9,height=5)
par(oma=c(6,4,3,1),mar=c(0.75,0.5,0,0.5))
nf<-layout(matrix(c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),nrow=4,ncol=5,byrow=F))
letters.list<-c("a)","b)","c)","d)")
plot.taph.hists(Xct1,Xdc1,Xrs1,Xnc1,indicator=ind,yaxis=T,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Untransformed",type.label=F,taph.ind=1)
letters.list<-c("e)","f)","g)","h)")
plot.taph.hists(Xct1,Xdc1,Xrs1,Xnc1,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Linear 5 yrs/cm",type.label=F,taph.ind=2)
letters.list<-c("i)","j)","k)","l)")
plot.taph.hists(Xct2,Xdc2,Xrs2,Xnc2,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Linear 20 yrs/cm",type.label=F,taph.ind=2)
letters.list<-c("m)","n)","o)","p)")
plot.taph.hists(Xct3,Xdc3,Xrs3,Xnc3,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Broken Stick (2500)",type.label=F,taph.ind=2)
letters.list<-c("q)","r)","s)","t)")
plot.taph.hists(Xct4,Xdc4,Xrs4,Xnc4,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Broken Stick (4000)",type.label=T,taph.ind=2)
mtext("Frequency",2,line=2,outer=T,cex=1.2)
mtext("Kendall's tau",1,line=2.5,outer=T,cex=1.2)
# Plot Figure 5 (AC for each age model)_____________________________________
# settings for Figures 4 and 5
colorCT<-rgb(0.1,0.3,0.4,1)
colorDC<-rgb(0.1,0.3,0.4,0.7)
colorRS<-rgb(0.1,0.3,0.4,0.5)
colorNC<-rgb(0.1,0.3,0.4,0.3)
mains2<-c("","","","")
ind<-"ac"
dev.new(width=9,height=5)
par(oma=c(6,4,3,1),mar=c(0.75,0.5,0,0.5))
nf<-layout(matrix(c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),nrow=4,ncol=5,byrow=F))
# Plot col 1: untransformed time series
letters.list<-c("a)","b)","c)","d)")
plot.taph.hists(Xct1,Xdc1,Xrs1,Xnc1,indicator=ind,yaxis=T,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Untransformed",type.label=F,taph.ind=1)
letters.list<-c("e)","f)","g)","h)")
plot.taph.hists(Xct1,Xdc1,Xrs1,Xnc1,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Linear 5 yrs/cm",type.label=F,taph.ind=2)
letters.list<-c("i)","j)","k)","l)")
plot.taph.hists(Xct2,Xdc2,Xrs2,Xnc2,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Linear 20 yrs/cm",type.label=F,taph.ind=2)
letters.list<-c("m)","n)","o)","p)")
plot.taph.hists(Xct3,Xdc3,Xrs3,Xnc3,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Broken Stick (2500)",type.label=F,taph.ind=2)
letters.list<-c("q)","r)","s)","t)")
plot.taph.hists(Xct4,Xdc4,Xrs4,Xnc4,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=letters.list,title="Broken Stick (4000)",type.label=T,taph.ind=2)
mtext("Frequency",2,line=2,outer=T,cex=1.2)
mtext("Kendall's tau",1,line=2.5,outer=T,cex=1.2)
# Plot Figure 6 (SD for age models and subsampling)____________________________________________
ind<-"sd"
mains2<-c("","","","")
ymax<-1.05
dev.new(width=10,height=6)
par(oma=c(6,4,4,1),mar=c(0.75,0.5,0,0.5))
nf<-layout(matrix(c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24),nrow=4,ncol=6,byrow=F))
colorCT<-rgb(0,0,1,1)
colorDC<-rgb(0,0,1,0.7)
colorRS<-rgb(0,0,1,0.4)
colorNC<-rgb(0,0,1,0.2)
title2<-"Age-Depth"
letters.list<-c("a)","b)","c)","d)")
plot.taph.hists(Xct2,Xdc2,Xrs2,Xnc2,indicator=ind,yaxis=T,mains=mains2,ymax=ymax,labs2=NULL,letters=letters.list,title=title2,type.label=F,taph.ind=2)
letters.list<-c("e)","f)","g)","h)")
title2<-"AD+Even Samp"
plot.taph.hists(Xct2,Xdc2,Xrs2,Xnc2,indicator=ind,yaxis=F,mains=mains2,ymax=ymax,labs2=NULL,letters=letters.list,title=title2,type.label=F,taph.ind=3)
letters.list<-c("i)","j)","k)","l)")
title2<-"AD+Targeted Samp"
plot.taph.hists(Xct2,Xdc2,Xrs2,Xnc2,indicator=ind,yaxis=F,mains=mains2,ymax=ymax,labs2=NULL,letters=letters.list,title=title2,type.label=F,taph.ind=4)
mtext("Linear 20 yrs/cm",3,line=2.25,outer=T,cex=1.2,adj=0.2)
mtext("Frequency",2,line=2,outer=T,cex=1.2)
mtext("Kendall's tau",1,line=2.25,outer=T,cex=1.2)
colorCT<-rgb(1,0,0,1)
colorDC<-rgb(1,0,0,0.7)
colorRS<-rgb(1,0,0,0.4)
colorNC<-rgb(1,0,0,0.2)
title2<-"Age-Depth"
letters.list<-c("m)","n)","o)","p)")
plot.taph.hists(Xct4,Xdc4,Xrs4,Xnc4,indicator=ind,yaxis=F,mains=mains2,ymax=ymax,labs2=NULL,letters=letters.list,title=title2,type.label=F,taph.ind=2)
title2<-"AD+Even Samp"
letters.list<-c("q)","r)","s)","t)")
plot.taph.hists(Xct4,Xdc4,Xrs4,Xnc4,indicator=ind,yaxis=F,mains=mains2,ymax=ymax,labs2=NULL,letters=letters.list,title=title2,type.label=F,taph.ind=3)
title2<-"AD+Targeted Samp"
letters.list<-c("u)","v)","w)","x)")
plot.taph.hists(Xct4,Xdc4,Xrs4,Xnc4,indicator=ind,yaxis=F,mains=mains2,ymax=ymax,labs2=NULL,letters=letters.list,title=title2,type.label=T,taph.ind=4)
mtext("Broken Stick (4000)",3,line=2,outer=T,cex=1.1,adj=0.8)
# Generate Supplemental_____________________________________________
# create table
Supp1<-rbind(Kt.summary.stats(Xct1,Xdc1,Xrs1,Xnc1,3,"sd"),
Kt.summary.stats(Xct2,Xdc2,Xrs2,Xnc2,3,"sd"),
Kt.summary.stats(Xct3,Xdc3,Xrs3,Xnc3,3,"sd"),
Kt.summary.stats(Xct4,Xdc4,Xrs4,Xnc4,3,"sd"))
Supp2<-rbind(Kt.summary.stats(Xct1,Xdc1,Xrs1,Xnc1,3,"ac"),
Kt.summary.stats(Xct2,Xdc2,Xrs2,Xnc2,3,"ac"),
Kt.summary.stats(Xct3,Xdc3,Xrs3,Xnc3,3,"ac"),
Kt.summary.stats(Xct4,Xdc4,Xrs4,Xnc4,3,"ac"))
supp1s<-Supp1[,c(1,2,3,5,6,8,9,10)]
supp2s<-Supp2[,c(1,2,3,5,6,8,9,10)]
# Plot Supplemental
plot.supp(Xct1,Xdc1,Xrs1,Xnc1,"sd","Linear 5 yr/cm, Standard Deviation")
plot.supp(Xct2,Xdc2,Xrs2,Xnc2,"sd","Linear 20 yr/cm, Standard Deviation")
plot.supp(Xct3,Xdc3,Xrs3,Xnc3,"sd","Broken Stick (2500), Standard Deviation")
plot.supp(Xct4,Xdc4,Xrs4,Xnc4,"sd","Broken Stick (4000), Standard Deviation")
plot.supp(Xct1,Xdc1,Xrs1,Xnc1,"ac","Linear 5 yr/cm, Autocorrelation Time")
plot.supp(Xct2,Xdc2,Xrs2,Xnc2,"ac","Linear 20 yr/cm, Autocorrelation Time")
plot.supp(Xct3,Xdc3,Xrs3,Xnc3,"ac","Broken Stick (2500), Autocorrelation Time")
plot.supp(Xct4,Xdc4,Xrs4,Xnc4,"ac","Broken Stick (4000), Autocorrelation Time")
# Set up for plotting figures 2, 3___________________________________
# single runs of GW model, all 4 time series types
single_test = single_run(r=r, gens=gens, delta_t=delta_t, K_Start=K_Start, K_Pulse_amt=K_Pulse_amt, V0=V0, pulse_time=pulse_time,driver_press_topo="gradual",q=5)
TS<-single_test[,3]
TSct<-cbind(c(1:length(TS)),TS)
Kct<-single_test[,2]
bp.out<-CE.Normal.Mean(as.data.frame(TS),Nmax=1)
timeCT<-bp.out$BP.Loc
single_test = single_run(r=r, gens=gens, delta_t=delta_t, K_Start=K_Start, K_Pulse_amt=K_Pulse_amt, V0=V0, pulse_time=pulse_time,driver_press_topo="gradual",q=1)
TS<-single_test[,3]
TSdc<-cbind(c(1:length(TS)),TS)
Kdc<-single_test[,2]
single_test = single_run(r=r, gens=gens, delta_t=delta_t, K_Start=K_Start, K_Pulse_amt=K_Pulse_amt, V0=V0, pulse_time=exRStime,driver_press_topo="abrupt",q=5)
TS<-single_test[,3]
TSrs<-cbind(c(1:length(TS)),TS)
Krs<-single_test[,2]
single_test = single_run(r=r, gens=gens, delta_t=delta_t, K_Start=K_Start, K_Pulse_amt=0, V0=V0, pulse_time=50,driver_press_topo="gradual",q=5)
TS<-single_test[,3]
TSnc<-cbind(c(1:length(TS)),TS)
Knc<-single_test[,2]
#Age models
# single runs of GW model, all 4 time series types
single_test = single_run(r=0.25, gens=300, delta_t=1, K_Start=1, K_Pulse_amt=-0.4, V0=V0, pulse_time=3,driver_press_topo="gradual",q=5)
TS<-single_test[,3]
TSctexp<-cbind(c(1:length(TS)),TS)
CTexp<-trimtoRS2(TSctexp,cutoff=150,cutoff2=50,trim.type="to.RS",start=60)
oTSexp<-CTexp$trimTS
tailexp<-CTexp$tail.length
am1<-Carpenter_timeavgTS(oTSexp,a0,a1,tailexp)
XX<-trimtoRS2(TSct,cutoff=5000,cutoff2=3000,trim.type="to.RS",start=1000)
oTS<-XX$trimTS
tail<-XX$tail.length
am2<-timeavgTS(oTS,TAbottom=5,TAtop=5,sd.pct=sd.pct)
am3<-timeavgTS(oTS,TAbottom=20,TAtop=20,sd.pct=sd.pct)
am4<-brokenstick.timeavgTS(oTS,TAbottom=20,TAtop=5,breakpoint=2500,sd.pct=sd.pct)
am5<-brokenstick.timeavgTS(oTS,TAbottom=20,TAtop=5,breakpoint=4000,sd.pct=sd.pct)
adTS<-am3$adTS
regTS<-sampleTS(adTS,sample.by="distribute.samples",samp.freq1=NULL,nsamp,timeCT)
acTS<-sampleTSatAC(adTS,AC.buffer=0.10,AC.samp=0.4,timeCT,XX$tail.length)
# Plot Figure 2__________________________________________________
dev.new(width=10,height=6)
X<-c(1,1,1,2,2,2,3,3,3,4,4,4,
1,1,1,2,2,2,3,3,3,4,4,4,
5,5,5,5,5,6,6,7,7,7,7,7,
8,8,8,8,8,9,9,10,10,10,10,10,11)
matx<-matrix(X,nrow=12,ncol=4)
nf<-layout(matx)
layout.show(nf)
par(mar=c(0,5,0.25,0.5),oma=c(6,2,3,2),mgp=c(2.4,1,0))
# plot 4 time series types with CPT results
plot(TSnc,type="l",ylab="",ylim=c(-0.05,1.1),xaxt="n",xlab="",las=1,col="gray40",lwd=1.5,xlim=c(-300,10000),yaxt="n")
text(-350,1,"a)",cex=1.75)
axis(2,at=seq(0,1,0.5),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
tempK<-cbind(1:gens,Knc)
int<-seq(1,gens,275)
points(tempK[int,],pch="-",col="red",cex=2)
#lines(1:gens,Knc,col="red",lty=6,lwd=2)
legend("bottomright",c("Tree cover","K parameter"),lty=c(1,2),col=c("gray40","red"),bty="n")
plot(TSrs,type="l",ylab="",ylim=c(-0.05,1.1),xaxt="n",xlab="",las=1,col="gray40",lwd=1.5,xlim=c(-300,10000),yaxt="n")
text(-350,1,"b)",cex=1.75)
axis(2,at=seq(0,1,0.5),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
tempK<-cbind(1:gens,Krs)
int<-seq(1,gens,275)
points(tempK[int,],pch="-",col="red",cex=2)
#lines(1:gens,Krs,col="red",lty=3,lwd=2)
plot(TSdc,type="l",ylab="",ylim=c(-0.05,1.1),xaxt="n",xlab="",las=1,col="gray40",lwd=1.5,xlim=c(-300,10000),yaxt="n")
text(-350,1,"c)",cex=1.75)
axis(2,at=seq(0,1,0.5),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
tempK<-cbind(1:gens,Kdc)
int<-seq(1,gens,275)
points(tempK[int,],pch="-",col="red",cex=2)
#lines(1:gens,Kdc,col="red",lty=3,lwd=2)
plot(TSct,type="l",ylab="",xlab="time steps",ylim=c(-0.05,1.1),las=1,col="gray40",lwd=1.5,xlim=c(-300,10000),yaxt="n",xaxt="n")
text(-350,1,"d)",cex=1.75)
axis(2,at=seq(0,1,0.5),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
axis(1,hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
mtext("Time Steps (Years)",1,line=2,cex=1)
tempK<-cbind(1:gens,Kct)
int<-seq(1,gens,275)
points(tempK[int,],pch="-",col="red",cex=2)
#lines(1:gens,Kct,col="red",lty=3,lwd=2)
mtext("Tree cover (proportional) (gray) ",2,line=-1,outer=T,adj=0.2)
mtext("K parameter (red)",2,line=-1,outer=T,col="red",adj=0.85)
# Plot age models
plot(am2$adTS[,1],am2$TAbins[1:(length(am2$TAbins)-1)],type="l",ylim=c(0,26),ylab="",xlab="Time steps",lwd=1.5,col="black",lty=1,xlim=c(0,7000),xaxt="n",las=1,yaxt="n")
axis(2,at=seq(0,35,10),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
axis(1,hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
lines(am3$adTS[,1],am3$TAbins[1:(length(am3$TAbins)-1)],lwd=1.5,lty=1,col="gray40")
text(300,24.5,"e)",cex=1.75)
mtext("Time averaging (yrs/cm)",2,line=2.25,cex=1,outer=F)
mtext("Time Steps (Years)",1,line=2.5,cex=1,outer=F)
# plot spacer
plot(c(0,1),c(0,1),type="n",bty="n",xaxt="n",yaxt="n",ylab="",xlab="")
plot(am4$adTS[,1],am4$TAbins[1:(length(am4$TAbins)-1)],type="l",ylim=c(0,26),ylab="",xlab="Time steps",lwd=1.5,col="black",lty=1,xlim=c(0,7000),xaxt="n",las=1,yaxt="n")
axis(2,at=seq(0,35,10),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
axis(1,hadj=0.6,las=1,tcl=-0.25,,cex.axis=1.5)
text(300,24.5,"f)",cex=1.75)
mtext("Time averaging (yrs/cm)",2,line=2.25,cex=1,outer=F)
mtext("Time Steps (Years)",1,line=2.5,cex=1,outer=F)
plot(am5$adTS[,1],am5$TAbins[1:(length(am5$TAbins)-1)],type="l",ylim=c(0,26),ylab="",xlab="Time steps",lwd=1.5,col="black",lty=1,xlim=c(0,7000),xaxt="n",las=1,yaxt="n")
axis(2,at=seq(0,35,10),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
axis(1,at=seq(1000,7000,2000),labels=seq(2000,8000,2000),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
text(300,24.5,"g)",cex=1.75)
mtext("Time Steps (Years)",1,line=2.5,cex=1,outer=F)
mtext("Time averaging (yrs/cm)",2,line=2.25,cex=1,outer=F)
# plot spacer
plot(c(0,1),c(0,1),type="n",bty="n",xaxt="n",yaxt="n",ylab="",xlab="")
plot(am1$adTS[,1],am1$TAvect,type="l",xlim=c(330,75),las=1,ylab="",xlab="",lwd=1.5,xaxt="n",yaxt="n")
axis(2,at=seq(0,8,2),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
axis(1,at=seq(100,250,50),labels=seq(100,250,50),hadj=0.6,las=1,tcl=-0.25,cex.axis=1.5)
text(340,6.6,"h)",cex=1.75,pos=4)
mtext("Time averaging (yrs/cm)",2,line=2.25,cex=1,outer=F)
mtext("Time Steps (Years)",1,line=2.5,cex=1,outer=F)
# Plot Figure 3__________________________________________________
XX<-trimtoRS2(TSct,cutoff=5000,cutoff2=3000,trim.type="to.RS",start=1000)
oTS<-XX$trimTS
tail<-XX$tail.length
adTS<-am5$adTS
regTS<-sampleTS(adTS,sample.by="distribute.samples",samp.freq1=NULL,nsamp,timeCT)
acTS<-sampleTSatAC(adTS,AC.buffer=0.10,AC.samp=0.4,timeCT,XX$tail.length)
variant.cols<-c("gray20","gray35","gray60","gray75")
dev.new(width=10,height=5.1)
par(mar=c(0,3.5,0.45,1),oma=c(4,3,3,2),mgp=c(2.4,1,0))
plot.matx<-c(1,1,2,2,3,3,4,4,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,5,5,6,6,7,7,8,8,9,9,9,9,10,10,10,10,9,9,9,9,10,10,10,10)
nf<-layout(matrix(plot.matx, nrow = 8, ncol = 6, byrow = FALSE))
# Plot examples of paleo transformation
plot(oTS[,1],oTS[,2],pch=16,cex=0.7,col=variant.cols[1],ylab="",las=1,yaxt="n",xlim=c(500,9500),xaxt="n",ylim=c(0,1.2))
axis(2,at=seq(0.2,1,0.4),las=1,cex.axis=1.5)
text(700,1.25,"a)",cex=1.75,pos=1)
Xs<-XX$tail.length+regTS[,1]
Ys<-rep(1.16,length(Xs))
points(Xs,Ys,pch="|",cex=0.8)
text(max(Xs)-75,Ys[1],"E",pos=4,cex=1)
Xs<-XX$tail.length+acTS[,1]
Ys<-rep(1.02,length(Xs))
points(Xs,Ys,pch="|",col="black",cex=0.8)
text(max(Xs)-75,Ys[1],"T",pos=4,cex=1)
plot(adTS[,1]+oTS[1,1],adTS[,2],pch=16,cex=0.7,col=variant.cols[2],ylab="",las=1,yaxt="n",xlim=c(500,9500),xaxt="n",ylim=c(0,1.2))
axis(2,at=seq(0.2,1,0.4),las=1,cex.axis=1.5)
text(700,1.25,"b)",cex=1.75,pos=1)
plot(regTS[,1]+oTS[1,1],regTS[,2],pch=16,cex=0.7,col=variant.cols[3],ylab="",las=1,yaxt="n",xlim=c(500,9500),xaxt="n",ylim=c(0,1.2))
axis(2,at=seq(0.2,1,0.4),las=1,cex.axis=1.5)
text(700,1.25,"c)",cex=1.75,pos=1)
plot(acTS[,1]+oTS[1,1],acTS[,2],pch=16,cex=0.7,col=variant.cols[4],ylab="",las=1,yaxt="n",xlim=c(500,9500),xaxt="n",ylim=c(0,1.2))
axis(2,at=seq(0.2,1,0.4),las=1,cex.axis=1.5)
text(700,1.25,"d)",cex=1.75,pos=1)
axis(1,tcl=-0.25,padj=-0.5,cex.axis=1.5)
mtext("Time Steps (Years)",1,line=2.5,cex=1.2,outer=F)
mtext("Tree Cover (proportion)",2,line=-0.25,cex=1.2,out=T)
#plot detrended time series
oTS<-detrendTS(TSct,method="gaussian")
plot(oTS[,1],oTS[,2],col=variant.cols[1],xaxt="n",pch=16,cex=0.7,las=1,ylab="",yaxt="n",ylim=c(-0.25,0.25))
axis(2,seq(-0.2,0.2,0.2),las=1,tcl=-0.5,hadj=0.75,cex.axis=1.5)
text(min(oTS[,1])*100,0.19,"e)",cex=1.75)
adTS<-detrendTS(adTS,method="gaussian")
plot(adTS[,1]+TSct[1,1],adTS[,2],col=variant.cols[2],xaxt="n",pch=16,cex=0.7,las=1,ylab="",yaxt="n",ylim=c(-0.25,0.25))
axis(2,seq(-0.2,0.2,0.2),las=1,tcl=-0.5,hadj=0.75,cex.axis=1.5)
text(min(oTS[,1])*100,0.19,"f)",cex=1.75)
mtext("Detrended Proportional Tree Cover",2,line=2.75,cex=1.2,adj=0.75)
regTS<-detrendTS(regTS,method="gaussian")
plot(regTS[,1]+TSct[1,1],regTS[,2],col=variant.cols[3],xaxt="n",pch=16,cex=0.7,las=1,ylab="",yaxt="n",ylim=c(-0.25,0.25))
axis(2,seq(-0.2,0.2,0.2),las=1,tcl=-0.5,hadj=0.75,cex.axis=1.5)
text(min(oTS[,1])*100,0.19,"g)",cex=1.75)
acTS<-detrendTS(acTS,method="gaussian")
plot(acTS[,1]+TSct[1,1],acTS[,2],col=variant.cols[4],xaxt="n",pch=16,cex=0.7,las=1,ylab="",yaxt="n",ylim=c(-0.25,0.25))
axis(2,seq(-0.2,0.2,0.2),las=1,tcl=-0.5,hadj=0.75,cex.axis=1.5)
axis(1,tcl=-0.25,padj=-0.5,cex.axis=1.5)
text(min(oTS[,1])*100,0.19,"h)",cex=1.75)
mtext("Time Steps (Years)",1,line=2.5,cex=1.2,outer=F)
#plot SD EWS
Ktau<-matrix(NA,nrow=5,ncol=2)
Ktau[1,2]<-"tau"
Xsd<-std.sd(oTS,2500,1)
temp.matx<-cbind(Xsd$std.SD,Xsd$windows2[,1])
temp.matx2<-temp.matx[which(temp.matx[,2]<timeCT),]
Ktau[2,2]<-round(cor(temp.matx2[,1],temp.matx2[,2],method="kendall"),digits=2)
plot(Xsd$midpoints,Xsd$std.SD,xaxt="n",type="l",las=1,ylab="",ylim=c(0,5),col=variant.cols[1],yaxt="n",xlim=c(1000,9000),lwd=3)
text(1200,4.8,"i)",cex=1.75)
axis(2,seq(0,5,1),las=1,tcl=-0.25,hadj=0.5,cex.axis=1.5)
mtext("Standardized SD",2,line=2,cex=1.2,outer=F)
abline(v=timeCT-TSct[1,1])
Xsd<-std.sd(adTS,150,1)
temp.matx<-cbind(Xsd$std.SD,Xsd$windows2[,1])
temp.matx2<-temp.matx[which(temp.matx[,2]<timeCT),]
Ktau[3,2]<-round(cor(temp.matx2[,1],temp.matx2[,2],method="kendall"),digits=2)
lines(Xsd$midpoints,Xsd$std.SD,col=variant.cols[2],lwd=3)
Xsd<-std.sd(regTS,100,1)
temp.matx<-cbind(Xsd$std.SD,Xsd$windows2[,1])
temp.matx2<-temp.matx[which(temp.matx[,2]<timeCT),]
Ktau[4,2]<-round(cor(temp.matx2[,1],temp.matx2[,2],method="kendall"),digits=2)
lines(Xsd$midpoints,Xsd$std.SD,col=variant.cols[3],lwd=3)
Xsd<-std.sd(acTS,100,1)
temp.matx<-cbind(Xsd$std.SD,Xsd$windows2[,1])
temp.matx2<-temp.matx[which(temp.matx[,2]<timeCT),]
Ktau[5,2]<-round(cor(temp.matx2[,1],temp.matx2[,2],method="kendall"),digits=2)
lines(Xsd$midpoints,Xsd$std.SD,col=variant.cols[4],lwd=3)
legend("topright",c(Ktau[,2]),col=c(rgb(0,0,0,0),variant.cols),pch=16,bty="n")
#plot AC EWS
Ktau<-matrix(NA,nrow=5,ncol=2)
Ktau[1,2]<-"tau"
Xac<-ACtime(oTS,2500,1)
temp.matx<-cbind(Xac$ACtime,Xac$windows2[,1])
temp.matx2<-temp.matx[which(temp.matx[,2]<timeCT),]
Ktau[2,2]<-round(cor(temp.matx2[,1],temp.matx2[,2],method="kendall"),digits=2)
plot(Xac$midpoints,Xac$ACtime,xaxt="n",type="l",las=1,ylab="",yaxt="n",ylim=c(0,40),col=variant.cols[1],xlim=c(1000,9000),lwd=3)
text(1200,39,"j)",cex=1.75)
abline(v=timeCT-TSct[1,1])
axis(1,tcl=-0.25,padj=-0.5,cex.axis=1.5)
axis(2,seq(0,40,5),las=1,tcl=-0.25,hadj=0.5,cex.axis=1.5)
mtext("Autocorrelation Time",2,line=2,cex=1.2,outer=F)
Xac<-ACtime(adTS,150,1)
temp.matx<-cbind(Xac$ACtime,Xac$windows2[,1])
temp.matx2<-temp.matx[which(temp.matx[,2]<timeCT),]
Ktau[3,2]<-round(cor(temp.matx2[,1],temp.matx2[,2],method="kendall"),digits=2)
lines(Xac$midpoints,Xac$ACtime,col=variant.cols[2],lwd=3)
Xac<-ACtime(regTS,100,1)
temp.matx<-cbind(Xac$ACtime,Xac$windows2[,1])
temp.matx2<-temp.matx[which(temp.matx[,2]<timeCT),]
Ktau[4,2]<-round(cor(temp.matx2[,1],temp.matx2[,2],method="kendall"),digits=2)
lines(Xac$midpoints,Xac$ACtime,col=variant.cols[3],lwd=3)
Xac<-ACtime(acTS,100,1)
temp.matx<-cbind(Xac$ACtime,Xac$windows2[,1])
temp.matx2<-temp.matx[which(temp.matx[,2]<timeCT),]
Ktau[5,2]<-round(cor(temp.matx2[,1],temp.matx2[,2],method="kendall"),digits=2)
lines(Xac$midpoints,Xac$ACtime,col=variant.cols[4],lwd=3)
mtext("Time Steps (Years)",1,line=2.5,cex=1.2,outer=F)
legend("topright",c(Ktau[,2]),col=c(rgb(0,0,0,0),variant.cols),pch=16,bty="n")
# SIMULATIONS USING THE EXPONENTIAL AGE MODEL#######
# set model paramters_____________________________________________
h = 0.5
r=0.25
# c=1
delta_t = 1
gens = 300
K_Start = 1
K_Pulse_amt = -0.4
pulse_time = 3
sigma_sd = 0.005
V0 = 1
FRI = 1
beta_ps<-estBetaParams(mu=0.15, var=0.015)
# set taphonomic parameters____________________________________
agemodel="Carpenter"
a0=2
a1=0.025
nsamp=25
title<-"Exponential"
windows<-c(75,20,10,10)
exRStime=210
nsamp=200
AC.buff=0.1
samp.freq2=0.4
steps<-c(1,1,1,1)
a0=2
a1=0.025
cutoff=150
cutoff2=50
start=60
# Run simulations____________________________________
# occasionally this x1 simulation fails. Just re-run
system.time(x1<-rep.ews(TStype="TSct",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0,a1,cutoff,cutoff2,trim.type="to.RS",start,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(x2<-rep.ews(TStype="TSdc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff,cutoff2,trim.type="to.set.bounds",start,q=1,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(x3<-rep.ews(TStype="TSrs",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff,cutoff2,trim.type="to.RS",start,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
system.time(x4<-rep.ews(TStype="TSnc",nreps=nreps, TAbottom=TAbottom,TAtop=TAtop,sample.by="distribute.samples",nsamp,samp.freq1=NULL,samp.freq2=NULL,AC.buff,windows=windows,steps=steps,agemodel=agemodel,breakpoint=breakpoint,det_method="gaussian",a0=a0,a1=a1,cutoff,cutoff2,trim.type="to.set.bounds",start,q=5,sd.pct=sd.pct,AC.samp=AC.samp))
Xct<-x1
Xdc<-x2
Xrs<-x3
Xnc<-x4
# plot Figure 7_____________________________________________________
dev.new(width=7,height=5.5)
par(oma=c(4,4,4,2),mar=c(0.75,0.5,0,0.5))
nf<-layout(matrix(c(1,2,3,4,5,6,7,8,9,10,11,12),nrow=4,ncol=3,byrow=F))
colorCT<-rgb(0.1,0.3,0.4,1)
colorDC<-rgb(0.1,0.3,0.4,0.7)
colorRS<-rgb(0.1,0.3,0.4,0.5)
colorNC<-rgb(0.1,0.3,0.4,0.3)
mains2<-c("","","","")
ind<-"sd"
main<-""
plot.taph.hists(Xct,Xdc,Xrs,Xnc,indicator=ind,yaxis=T,mains=mains2,ymax=1.05,labs2=NULL,letters=c("a)","b)","c)","d)"),title="Untransformed",type.label=F,taph.ind=1)
plot.taph.hists(Xct,Xdc,Xrs,Xnc,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=c("e)","f)","g)","h)"),title="Sedimentation",type.label=F,taph.ind=2)
plot.taph.hists(Xct,Xdc,Xrs,Xnc,indicator=ind,yaxis=F,mains=mains2,ymax=1.05,labs2=NULL,letters=c("i)","j)","k)","l)"),title="Sed.+Even Sampling",type.label=T,taph.ind=3)
mtext("Frequency",2,line=2.25,outer=T,cex=1.1)
mtext("Kendall's tau",1,line=2.5,outer=T,cex=1.1)
# Exponential Supplemental___________________________________
plot.supp(Xct,Xdc,Xrs,Xnc,"sd","Exponential, Standard Deviation")
plot.supp(Xct,Xdc,Xrs,Xnc,"ac","Exponential, Autocorrelation Time")
# Exponential Supplemental table
Supp1<-Kt.summary.stats(Xct,Xdc,Xrs,Xnc,3,"sd")
Supp2<-Kt.summary.stats(Xct,Xdc,Xrs,Xnc,3,"ac")
supp1<-Supp1[,c(1,2,3,5,6,8,9,10)]
supp2<-Supp2[,c(1,2,3,5,6,8,9,10)]
|
8ae6e3f756fc33cc31652677a2b711ad486b1e98 | 4201e9b754760dc35fc0aeef9df5a8b9d801c47f | /bin/R-3.5.1/src/library/base/R/ifelse.R | 24ed767517ef59127908bd0e75155fe713b51b05 | [
"LGPL-2.1-only",
"GPL-2.0-only",
"GPL-2.0-or-later",
"LGPL-3.0-only",
"GPL-3.0-only",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lifebit-ai/exomedepth | cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e | 5a775ae5e2a247aeadc5208a34e8717c7855d080 | refs/heads/master | 2020-03-27T12:55:56.400581 | 2018-10-11T10:00:07 | 2018-10-11T10:00:07 | 146,578,924 | 0 | 0 | MIT | 2018-08-29T09:43:52 | 2018-08-29T09:43:51 | null | UTF-8 | R | false | false | 2,066 | r | ifelse.R | # File src/library/base/R/ifelse.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 1995-2017 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
ifelse <- function (test, yes, no)
{
if(is.atomic(test)) { # do not lose attributes
if (typeof(test) != "logical")
storage.mode(test) <- "logical"
## quick return for cases where 'ifelse(a, x, y)' is used
## instead of 'if (a) x else y'
if (length(test) == 1 && is.null(attributes(test))) {
if (is.na(test)) return(NA)
else if (test) {
if (length(yes) == 1) {
yat <- attributes(yes)
if (is.null(yat) || (is.function(yes) &&
identical(names(yat), "srcref")))
return(yes)
}
}
else if (length(no) == 1) {
nat <- attributes(no)
if (is.null(nat) || (is.function(no) &&
identical(names(nat), "srcref")))
return(no)
}
}
}
else ## typically a "class"; storage.mode<-() typically fails
test <- if(isS4(test)) methods::as(test, "logical") else as.logical(test)
ans <- test
ok <- !is.na(test)
if (any(test[ok]))
ans[test & ok] <- rep(yes, length.out = length(ans))[test & ok]
if (any(!test[ok]))
ans[!test & ok] <- rep(no, length.out = length(ans))[!test & ok]
ans
}
|
122d1381540456ee41f220635201a9ed316b25b2 | 4e97c19349e0bfcc96e1ba74cd7a02e053c8d320 | /plot1.R | cca3ba02e2aa209a5e7b6562acf663ef73885dc6 | [] | no_license | segregorio/ExData_Plotting1 | 966ee6e15944a2b445c85b3d387fc03dd6def20c | b48803544b0af7443a8b25d020e8d6043fd09dfd | refs/heads/master | 2020-12-25T05:30:14.489212 | 2016-02-01T00:33:33 | 2016-02-01T00:33:33 | 50,783,171 | 0 | 0 | null | 2016-01-31T16:20:15 | 2016-01-31T16:20:15 | null | UTF-8 | R | false | false | 932 | r | plot1.R | ## Load dplyr package
library(dplyr)
plot1 <- function() {
## Read file using GREP; 2880 is numer of 1-minute intervals from Feb 1 to 2, 2007
data <- read.table("household_power_consumption.txt", na.strings=c("?"), header=FALSE, sep=";",
skip=grep("31/1/2007;23:59:00", readLines("household_power_consumption.txt")),nrows=2880)
## Read trhe header into memory
header <- read.table("household_power_consumption.txt", nrows = 1, header = FALSE,
sep =';', stringsAsFactors = FALSE)
## Add header to data set
colnames(data) <- unlist(header)
## Set active device to PNG.
png('plot1.png')
## Plot histogram of Global_active_power column
hist(data$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
## Turn PNG device off
dev.off()
## Return the data so we can manipulate it further.
data
} |
6d386f5425a1ebc63dd9676970abdc521d946687 | 8327aedc9fca9c1d5f11c160d440ecc082fb915d | /R/functions_ncdfExtract.r | 537ad8033a8124de8a2057819b758f4f7ad27a6f | [] | no_license | SESjo/SES | f741a26e9e819eca8f37fab71c095a4310f14ed3 | e0eb9a13f1846832db58fe246c45f107743dff49 | refs/heads/master | 2020-05-17T14:41:01.774764 | 2014-04-17T09:48:14 | 2014-04-17T09:48:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,850 | r | functions_ncdfExtract.r | #' Create [Chl]_surf variable by extracting relevant values from NetCDF files.
#'
#' @param stat The 'statdives' object.
#' @param chldir The directory where to find the NetCDF files with [chl] values.
#' @param append Should the variable be returned with the entire statdives object ?
#' @family chl
#' @export
extractChl <- function(stat, chldir, append=TRUE) {
findDefaultVars(c("Lat", "Lon"), stat, type.obj="stat", type="check")
findVars("Date", stat, type="check")
chl <- rep(NA, nrow(stat))
chlgrid <- ncgrid(list.files(chldir, "*.nc", full.names=TRUE)[1])
chlPix <- idPixel(stat, chlgrid, append=FALSE)
for (date in unique(stat$Date)){
chlDate <- importChl(date, chldir)
if (all(is.na(chlDate))) next
chl[stat$Date == date] <- chlDate$Chl[stat$Pixel.id[stat$Date == date]]
}
return(chl)
}
#' Create Biomass variable by extracting relevant values from NetCDF files
#'
#' When the time resolution of the biomass NetCDF files is > 1day, then the biomass is extracted in the pixel where the
#' averaged daily location of seal belongs (Beware, implies day/night same location).
#'
#' @param stat The 'statdives' object.
#' @param tdr The 'tdr' object.
#' @param biomdir The directory where to find the NetCDF files with biomass values.
#' @export
extractBiom <- function(stat, tdr, biomdir) {
findDefaultVars(c("Lat", "Lon"), stat, type.obj="stat", type="check")
findVars("Date", stat)
findVars(c("Layer", "is.Day", "Pixel.id", "Date"), tdr,
varnames=c("tdrLayer", "tdris.Day", "tdrPixel.id", "tdrDate"))
biom <- rep(NA, nrow(tdr))
ncfiles <- list.files(biomdir, "*.nc", full.names=TRUE)
ncres <- median(diff(text2posx(ncfiles), lag=1, units="day"))
message(paste("Time resolution of micronekton biomass input is", ncres, "day(s)"))
if (ncres == 7){
tmp <- aggregate(cbind(Lat, Lon), by=list(Date=Date), mean)
biomgrid <- ncgrid(ncfiles[1])
tmp <- idPixel(tmp, biomgrid)
pixelstot <- na.omit(unique(tmp))
}
for (date in unique(Date)){
biomDate <- importSEAPOpred(date, biomdir)
if (all(is.na(biomDate))) next
if (ncres == 1){pixels <- na.omit(unique(Pixel.id[Date == date]))}
else if (ncres == 7){pixels <- na.omit(unique(pixelstot$Pixel.id[pixelstot$Date == date]))}
for (pix in pixels){
if (ncres == 1){cond <- tdrDate == date & tdrPixel.id == pix}
else if (ncres == 7){cond <- tdrDate == date} # Pixel.id is recomputed according to the daily averaged locations
layers <- unique(tdrLayer[cond])
is.day <- unique(tdris.Day[cond])
for (layer in layers){
for (day in is.day){
val <- layerBiom(biomDate[pix, 3:8], layers=layer, is.day=day)
biom[cond & tdrLayer==layer & tdris.Day==day] <- val
}
}
}
}
return(unlist(biom))
}
#' Compute the biomass in each layer during the day and night periods.
#'
#' @param grp Atomic vector giving the functional groups biomass in the following order:
#' \code{c(epi, meso, mmeso, bathy, mbathy, hmbathy)}.
#' @param all.col Should the function return all columns: \code{Layer} and \code{is.Day}
#' @param layers Should the function focus on a specific layer (to choose in
#' \code{c("Bathy", "Epi", "Meso")}). Default is all layers.
#' @param is.day Should the function focus on a specific period (to choose in
#' \code{c(TRUE, FALSE)}).
#' @export
#' @examples
#' layerBiom(1:6) # Should be c(4, 10, 7, 15, 1, 5)
layerBiom <- function(grp, all.col=FALSE, layers=NULL, is.day=NULL){
tab <- expand.grid(Layer=c("Bathy", "Epi", "Meso"), is.Day=c(FALSE, TRUE))
tab$Biom <- rep(NA, nrow(tab))
tab$Biom[tab$is.Day] <- c(sum(grp[4:6]), grp[1], sum(grp[2:3]))
tab$Biom[!tab$is.Day] <- c(grp[4], sum(grp[c(1,3,6)]), sum(grp[c(2,5)]))
if (!is.null(layers)) tab <- tab[tab$Layer %in% layers, ]
if (!is.null(is.day)) tab <- tab[tab$is.Day %in% is.day, ]
if (all.col) return(tab)
else return(tab$Biom)
}
|
f42fff3f2ba3823026bc6847e0b1f00cf52297b8 | 38d52a7e16b96555f277cb879a69d3f1ba086dad | /man/list_top_trx_holders.Rd | 18e59cfd8258988430c97db40930982b97ca0c0e | [
"MIT"
] | permissive | next-game-solutions/tronr | c7ec41a0785536670942c653f0f1500f09e7e692 | e7eb8b1d07e1c0415881ca3259358f707d78b181 | refs/heads/main | 2023-06-19T03:06:34.302241 | 2021-07-12T22:01:05 | 2021-07-12T22:01:05 | 305,829,963 | 7 | 0 | NOASSERTION | 2021-07-12T22:01:06 | 2020-10-20T20:48:08 | JavaScript | UTF-8 | R | false | true | 1,306 | rd | list_top_trx_holders.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_top_trx_holders.R
\name{list_top_trx_holders}
\alias{list_top_trx_holders}
\title{List top TRX holders}
\usage{
list_top_trx_holders(n = 20, max_attempts = 3L)
}
\arguments{
\item{n}{(double): number of top accounts to retrieve.}
\item{max_attempts}{(integer, positive): specifies the
maximum number of additional attempts to call a URL if the
first attempt fails (i.e. its call status is different from
\code{200}). Additional attempts are implemented with an exponential
backoff. Defaults to \code{3}.}
}
\value{
A tibble with \code{n} rows and the following columns:
\itemize{
\item \code{request_time} (POSIXct): date and time when the request was made;
\item \code{address} (character): account address (in \code{base58check} format);
\item \code{trx_balance} (double): TRX balance;
\item \code{total_tx} (integer): total number of transactions associated with
the respective \code{account};
\item \code{tron_power} (double): amount of TRX frozen
(see \href{https://tronprotocol.github.io/documentation-en/introduction/overview/#2-srs-and-committee}{official documentation}
for details).
}
}
\description{
Returns a list of accounts with the largest TRX balances
}
\examples{
r <- list_top_trx_holders(10)
print(r)
}
|
11720e399e2301fe9498b719695c5aed42ae13d5 | 1ff2ef7dc528a0e30fd5a316bab8f22d58fa296e | /R/spanCV.R | 4e5186aa7b3614ef0dc268a41e8d690485ad6c92 | [] | no_license | feiyusun/statTarget | 53a803a45e0a62b602dede861a39ab964dbabbde | eddecf3e479e547f2375977e084fee5a3c14b865 | refs/heads/master | 2022-11-30T16:48:30.226065 | 2020-08-13T13:21:14 | 2020-08-13T13:21:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 409 | r | spanCV.R | ### Modified from code by ucfagls http://www.r-bloggers.com/whats-wrong-with-loess-for-palaeo-data/
loessGCV <- function(x) {
if (!(inherits(x, "loess")))
stop("Error: argument must be a loess object")
span <- x$pars$span
n <- x$n
traceL <- x$trace.hat
sigma2 <- sum(resid(x)^2)/(n - 1)
gcv <- n * sigma2/(n - traceL)^2
result <- list(span = span, gcv = gcv)
result
}
|
cf83735daf7be1257c024f58c61a9cbbc50f478f | 6a611cb69253264aa4ad95c44d09480aa75f7c6a | /R/foreignassistanceplannedtable.R | 691fa803a5aedb3ecb1dacb066e5bb337e42ea8c | [
"MIT"
] | permissive | annarouw/kffplannedforeignassistance | c57ba92e57087c4c99134a89a2e5ae97f1cc97cf | a6c775aad231cdbbc5cdd20f0fbb3e45853bf365 | refs/heads/main | 2023-02-27T08:38:14.084310 | 2021-02-03T18:17:15 | 2021-02-03T18:17:15 | 367,223,103 | 0 | 0 | MIT | 2021-05-14T02:01:26 | 2021-05-14T02:01:25 | null | UTF-8 | R | false | false | 20,263 | r | foreignassistanceplannedtable.R | #' Foreign Assistance Planned Data Function
#'
#' This function allows users to query the data based on variables they are interested in to create a table using foreignassistance.gov's planned dataset.
#' @param years Fiscal years included. Defaults to 'all'
#' @param appropriation_type Request, appropriation, or actual appropriaion. Defaults to 'all'. To select one type, put your selection in quotation marks (eg "request"). To select multiple, use c() to create a list (eg c("request", "appropriation", "appropriated_actual"))
#' @param sectors_included Do you want to include the sectors variable? Defaults to FALSE. To see sectors variable, replace with TRUE.
#' @param sectors Which sectors do you want to select? Defaults to 'all'. Possible selections include Family Planning and Reproductive Health, Maternal and Child Health, Other Public Health Threats, Tuberculosis, Water Supply and Sanitation, HIV/AIDS, Malaria, Pandemic Influenza and Other Emerging Threats (PIOET), Nutrition, and Health - General. See appropriation_type for instructions on how to select multiple values.
#' @param agencies_included Do you want to include the agencies variable? Defaults to FALSE. To see agencies variable, replace with TRUE.
#' @param agencies Which agencies do you want to select? Defaults to 'all'. Possible selections include U.S. Department of State and U.S. Agency for International Development, U.S. Department of Health and Human Services, U.S. Department of Defense, and U.S. Department of Agriculture. See appropriation_type for instructions on how to select multiple values.
#' @param accounts_included Do you want to include the accounts variable? Defaults to FALSE. To see accounts variable, replace with TRUE.
#' @param accounts Which accounts do you want to select? Defaults to 'all'. See appropriation_type for instructions on how to select multiple values. Note: Not all observations include an account value.
#' @param locations_included Do you want to include recipient location variable? Defaults to FALSE. To see locations variable, replace with TRUE.
#' @param locations Which locations do you want to select? Defaults to 'all'. Country names are based off U.S. naming conventions (eg Burma, not Myanmar). Locations includes country names, regions, and worldwide for global funding.
#' @param location_types_included Do you want to include location type variable? Location type classifies locations as a country, region, or global program. Defaults to FALSE. To see location type variable, replace with TRUE.
#' @param location_types Which location types do you want to select? Defaults to 'all'. Possible selections include Country, Region, or Worldwide. See appropriation_type for instructions on how to select multiple values.
#' @param regions_included Do you want to include the regions variable? This variable classifies countries into either USAID or WHO regions. Defaults to FALSE. To see regions variable, replace with TRUE.
#' @param region_classifications Which regional classification system do you want to use? Possible selections included USAID or WHO. Does not allow for both classifications to be used simultaneously. Defaults to USAID.
#' @param regions Which regions do you want to select? Defaults to 'all'. Possible selections vary based on USAID or WHO selection. See appropriation_type for instructions on how to select multiple values. Note: Not all countries have a regional classification, based on WHO and USAID coding
#' @param incomes_included Do you want to include the incomes variable? This variable classifies countries' income level using World Bank data. Defaults to FALSE. To see incomes variable, replace with TRUE.
#' @param incomes Which income levels do you want to select? Defaults to 'all'. Possible values include Low-income, Lower-middle income, Upper-middle income, and High-income.
#' @param group_by How do you want to group the data? This parameter is very important to remember if you want to group the data by certain variables. Selecting the variable will only include it in the table view, but will not necessarily group by that variable. Table is automatically grouped by fiscal year and appropriation type.
#'
#'
#' @keywords foreignassistance
#'
#' @export
#'
#' @examples
#'
#' @import dplyr
#' @import readr
#' @import janitor
foreign_assistance_planned_table <- function(years = 'all',
appropriation_type = 'all',
sectors_included = FALSE,
sectors = 'all',
agencies_included = FALSE,
agencies = 'all',
accounts_included = FALSE,
accounts = 'all',
locations_included = FALSE,
locations = 'all',
location_types_included = FALSE,
location_types = 'all',
regions_included = FALSE,
region_classifications = 'USAID',
regions = 'all',
incomes_included = FALSE,
incomes = 'all',
group_by = 'year') ##possible inputs: year, appropriation_type, sectors, agencies, accounts, locations, location_types, regions, incomes
{
##LOAD DATA ----------------------------------------------
budget_data <- read.csv("https://www.foreignassistance.gov/downloads/BudgetData/Budget Planning Data - All Reporting Agencies.csv") %>%
clean_names() %>%
filter(category == "Health") %>%
pivot_longer(cols = c(request, appropriation, appropriated_actual), names_to = "appropriation_phase", values_to = "value") %>%
mutate(value = ifelse(is.na(value), 0, value))
##ADDING MISSING VARIABLES --------------------------------------------
##Location type
budget_data_regions <- sort(unique(budget_data$location))[which(str_detect(sort(unique(budget_data$location)), "Asia|America|Europe|Oceania"))]
budget_data <- budget_data %>%
mutate(location_type = ifelse(location %in% budget_data_regions, "Region",
ifelse(location == "Worldwide", "Worldwide",
"Country")))
rm(budget_data_regions)
##Region classification
##WHO -- Data retrieved from: https://www.who.int/countries
who_eastern_mediterranean_region <- c("Afghanistan", "Bahrain", "Djibouti", "Egypt", "Iran", "Iraq", "Jordan", "Kuwait", "Lebanon", "Libya", "Morocco", "Oman", "Pakistan", "Qatar", "Saudi Arabia", "Somalia", "Sudan", "Syria", "Tunisia", "United Arab Emirates", "Yemen")
who_african_region <- c("Algeria", "Angola", "Benin", "Botswana", "Burkina Faso", "Burundi", "Cabo Verde", "Cameroon", "Central African Republic", "Chad", "Comoros", "Congo (Brazzaville)", "Cote d'Ivoire", "Congo (Kinshasa)", "Equatorial Guinea", "Eritrea", "Eswatini", "Ethiopia", "Gabon", "Gambia", "Ghana", "Guinea", "Guinea-Bissau", "Kenya", "Lesotho", "Liberia", "Madagascar", "Malawi", "Mali", "Mauritania", "Mauritius", "Mozambique", "Namibia", "Niger", "Nigeria", "Rwanda", "Sao Tome and Principe", "Senegal", "Seychelles", "Sierra Leone", "South Africa", "South Sudan", "Togo", "Uganda", "Tanzania", "Zambia", "Zimbabwe")
who_americas_region <- c("Antigua and Barbuda", "Argentina", "Bahamas", "Barbados", "Belize", "Bolivia", "Brazil", "Canada", "Chile", "Colombia", "Costa Rica", "Cuba", "Dominica", "Dominican Republic", "Ecuador", "El Salvador", "Grenada", "Guatemala", "Guyana", "Haiti", "Honduras", "Jamaica", "Mexico", "Nicaragua", "Panama", "Paraguay", "Peru", "Saint Kitts and Nevis", "Saint Lucia", "Saint Vincent and the Grenadines", "Suriname", "Trinidad and Tobago", "Uruguay", "Venezuela")
who_south_east_asia_region <- c("Bangladesh", "Bhutan", "North Korea", "India", "Indonesia", "Maldives", "Burma", "Nepal", "Sri Lanka", "Thailand", "Timor-Leste")
who_western_pacific_region <- c("Australia", "Brunei Darussalam", "Cambodia", "China", "Cook Islands", "Fiji", "Japan", "Kiribati", "Laos", "Malaysia", "Marshall Islands", "Micronesia, Federated States of", "Mongolia", "Nauru", "New Zealand", "Niue", "Palau", "Papua New Guinea", "Philippines", "South Korea", "Samoa", "Singapore", "Solomon Islands", "Tonga", "Tuvalu", "Vanuatu", "Vietnam")
who_european_region <- c("Albania", "Andorra", "Armenia", "Austria", "Azerbaijan", "Belarus", "Belgium", "Bosnia and Herzegovina", "Bulgaria", "Croatia", "Cyprus", "Czech Republic", "Denmark", 'Estonia', "Finland", "France", "Georgia", "Germany", "Greece", "Hungary", "Iceland", "Ireland", "Israel", "Italy", 'Kazakhstan', "Kyrgyzstan", "Latvia", "Lithuania", "Luxembourg", "Malta", "Monaco", "Montenegro", "Netherlands", "North Macedonia", "Norway", "Poland", "Portugal", "Moldova", 'Romania', "Russia", 'San Marino', "Serbia", "Slovakia", "Slovenia", "Spain", "Sweden", "Switzerland", "Tajikistan", "Turkey", "Turkmenistan", "Ukraine", "United Kingdom", "Uzbekistan")
##USAID -- Data retrieved from: https://www.usaid.gov/where-we-work
usaid_africa_region <- c("Angola", "Benin", "Botswana", "Burkina Faso", "Burundi", "Cameroon", "Central African Republic", "Chad", "Cote d'Ivoire", "Congo (Kinshasa)", "Congo (Brazzaville)", "Djibouti", "Eswatini", "Ethiopia", 'Ghana', "Guinea", "Kenya", 'Lesotho', "Liberia", "Madagascar", "Malawi", "Mali", "Mauritania", "Mozambique", "Namibia", "Niger", "Nigeria", "Rwanda", "Senegal", "Sierra Leone", "Somalia", "South Africa", "South Sudan", "Sudan", "Tanzania", "Gambia", "Uganda", 'Zambia', "Zimbabwe")
usaid_asia_region <- c("Afghanistan", "Bangladesh", "Burma", "Cambodia", "China", "India", "Indonesia", "Kazakhstan", "Kyrgyzstan", "Laos", "Maldives", "Mongolia", "Nepal", "Pacific Islands", "Pakistan", "Philippines", "Sri Lanka", "Tajikistan", "Thailand", "Timor-Leste", "Turkmenistan", "Uzbekistan", "Vietnam")
usaid_europe_and_eurasia_region <- c("Albania", "Armenia", "Azerbaijan", "Belarus", "Bosnia and Herzegovina", "Cyprus", "Georgia", "Kosovo", "Moldova", "Montenegro", "North Macedonia", "Russia", "Serbia", "Ukraine")
usaid_latin_america_and_the_caribbean_region <- c("Bolivia", "Brazil", "Colombia", "Cuba", "Dominican Republic", "Ecuador", "El Salvador", 'Guatemala', "Haiti", "Honduras", "Jamaica", "Mexico", "Nicaragua", "Panama", "Paraguay", "Peru", "Venezuela")
usaid_middle_east_region <- c("Egypt", 'Iraq', "Jordan", "Lebanon", "Libya", "Morocco", "Syria", "Tunisia", "West Bank and Gaza", "Yemen")
##Income -- Data retrieved from: https://datahelpdesk.worldbank.org/knowledgebase/articles/906519
wb_low_income <- c("Afghanistan", "Burkina Faso", "Burundi", "Central African Republic", "Chad", "Congo (Kinshasa)", "Eritrea", "Ethiopia", "Gambia", "Guinea", "Guinea-Bissau", "Haiti", "North Korea", "Liberia", "Madagascar", "Malawi", "Mali", 'Mozambique', "Niger", "Rwanda", "Sierra Leone", "Somalia", "South Sudan", "Sudan", "Syria", "Tajikistan", "Togo", "Uganda", "Yemen")
wb_lower_middle_income <- c("Angola", "Algeria", "Bangladesh", 'Benin', 'Bhutan', "Bolivia", "Burma", "Cabo Verde", "Cambodia", "Cameroon", "Congo (Brazzaville)", "Comoros", "Cote d'Ivoire", 'Djibouti', 'Egypt', "El Salvador", "Eswatini", "Ghana", "Honduras", "India", "Kenya", "Kiribati", "Kyrgyzstan", "Laos", "Lesotho", "Mauritania", "Micronesia, Federated States of", "Moldova", "Mongolia", "Morocco", "Nepal", "Nicaragua", "Nigeria", "Pakistan", "Papua New Guinea", "Philippines", "Sao Tome and Principe", "Senegal", "Solomon Islands", "Sri Lanka", "Tanzania", "Timor-Leste", "Tunisia", "Ukraine", "Uzbekistan", "Vanuatu", "Vietnam", "West Bank and Gaza", "Zambia", "Zimbabwe")
wb_upper_middle_income <- c("Albania", "Argentina", "Armenia", "Azerbaijan", "Belarus", "Belize", "Bosnia and Herzegovina", "Botswana", "Brazil", "Bulgaria", "China", "Colombia", "Costa Rica", "Cuba", "Dominica", "Dominican Republic", "Equatorial Guinea", "Ecuador", "Fiji", "Gabon", "Georgia", "Grenada", 'Guatemala', "Guyana", "Indonesia", "Iran", "Iraq", "Jamaica", "Jordan", "Kazakhstan", "Kosovo", "Lebanon", "Libya", "Malaysia", "Maldives", "Marshall Islands", "Mexico", "Montenegro", "Namibia", "North Macedonia", "Paraguay", "Peru", "Russia", "Samoa", "Serbia", "South Africa", "Saint Lucia", "Saint Vincent and the Grenadines", "Suriname", "Thailand", "Tonga", "Turkey", "Turkmenistan", "Tuvalu", "Venezuela")
wb_high_income <- c("Andorra", "Antigua and Barbuda", "Aruba", "Australia", "Austria", "Bahamas", "Bahrain", "Barbados", "Belgium", "Bermuda", "British Virgin Islands", "Brunei Darussalam", "Canada", "Cayman Islands", "Channel Islands", "Chile", "Croatia", "Curaçao", "Cyprus", "Czech Republic", "Denmark", "Estonia", "Faroe Islands", "Finland", "France", "French Polynesia", "Germany", "Gibraltar", "Greece", "Greenland", "Guam", "Hong Kong SAR, China", "Hungary", "Iceland", "Ireland", "Isle of Man", "Israel", "Italy", "Japan", "South Korea", "Kuwait", "Latvia", "Liechtenstein", "Luxembourg", "Macao SAR, China", "Malta", "Mauritius", "Monaco", "Nauru", "Netherlands", "New Caledonia", "New Zealand", "Northern Mariana Islands", "Norway", "Oman", "Palau", "Panama", "Poland", "Portugal", "Romania", "Qatar", "San Marino", "Saudi Arabia", "Seychelles", "Singapore", "Slovakia", "Slovenia", "Spain", "Saint Kitts and Nevis", "Sweden", "Switzerland", "Taiwan, China", "Trinidad and Tobago", "Turks and Caicos Islands", "United Arab Emirates", "Uruguay")
##Adding to budget_data df
budget_data <- budget_data %>%
mutate(location = ifelse(str_detect(location, "Ivoire"), "Cote d'Ivoire",
ifelse(str_detect(location, "Bahamas"), "Bahamas", location))) %>%
mutate(who_region = ifelse(location %in% who_eastern_mediterranean_region, "WHO Eastern Mediterranean Region",
ifelse(location %in% who_african_region, "WHO African Region",
ifelse(location %in% who_americas_region, "WHO Region of the Americas",
ifelse(location %in% who_south_east_asia_region, "WHO South-East Asia Region",
ifelse(location %in% who_western_pacific_region, "WHO Western Pacific Region",
ifelse(location %in% who_european_region, "WHO European Region", NA))))))) %>% ##Only country without a WHO region: Kosovo and West Bank and Gaza
mutate(usaid_region = ifelse(location %in% usaid_africa_region, "USAID Africa Region",
ifelse(location %in% usaid_asia_region, "USAID Asia Region",
ifelse(location %in% usaid_europe_and_eurasia_region, "USAID Europe and Eurasia Region",
ifelse(location %in% usaid_latin_america_and_the_caribbean_region, "USAID Latin America and Caribbean Region",
ifelse(location %in% usaid_middle_east_region, "USAID Middle East Region", NA)))))) %>%
mutate(income = ifelse(location %in% wb_low_income, "Low-income",
ifelse(location %in% wb_lower_middle_income, "Lower-middle income",
ifelse(location %in% wb_upper_middle_income, "Upper-middle income",
ifelse(location %in% wb_high_income, "High-income", NA)))))
rm(who_african_region, who_americas_region, who_eastern_mediterranean_region, who_european_region, who_south_east_asia_region, who_western_pacific_region, usaid_africa_region, usaid_asia_region, usaid_europe_and_eurasia_region, usaid_latin_america_and_the_caribbean_region, usaid_middle_east_region, wb_high_income, wb_low_income, wb_lower_middle_income, wb_upper_middle_income)
##FILTERS -------------------------------------------------------------
##year
suppressWarnings(if(years != 'all') {
budget_data <- budget_data %>%
filter(i_fiscal_year %in% years)
})
##appropriation_type
suppressWarnings(if(appropriation_type != 'all') {
budget_data <- budget_data %>%
filter(appropriation_phase %in% appropriation_type)
})
##sector
suppressWarnings(if(sectors != 'all') {
budget_data <- budget_data %>%
filter(sector %in% sectors)
})
##agency
suppressWarnings(if(agencies != 'all') {
budget_data <- budget_data %>%
filter(agency %in% agencies)
})
##account
suppressWarnings(if(accounts != 'all') {
budget_data <- budget_data %>%
filter(account %in% accounts)
})
##location
suppressWarnings(if(locations != 'all') {
budget_data <- budget_data %>%
filter(location %in% locations)
})
##location_types
suppressWarnings(if(location_types != 'all') {
budget_data <- budget_data %>%
filter(location_type %in% location_types)
})
##regions
suppressWarnings(if(regions != 'all' & region_classifications == "WHO") {
budget_data <- budget_data %>%
filter(who_region %in% regions)
} else {
if(regions != 'all' & region_classifications == "USAID") {
budget_data <- budget_data %>%
filter(usaid_region %in% regions)
}
})
##incomes
suppressWarnings(if(incomes != 'all') {
budget_data <- budget_data %>%
filter(income %in% incomes)
})
##SELECTED COLUMNS ---------------------------------------------------
selected_columns <- c('i_fiscal_year', 'appropriation_phase', 'value')
if(sectors_included == TRUE) {
selected_columns <- c(selected_columns, 'sector')
}
if(agencies_included == TRUE) {
selected_columns <- c(selected_columns, 'agency')
}
if(accounts_included == TRUE) {
selected_columns <- c(selected_columns, 'account')
}
if(locations_included == TRUE) {
selected_columns <- c(selected_columns, 'location')
}
if(location_types_included == TRUE) {
selected_columns <- c(selected_columns, 'location_type')
}
if(regions_included == TRUE & region_classifications == "USAID") {
selected_columns <- c(selected_columns, 'usaid_region')
}
if(regions_included == TRUE & region_classifications == "WHO") {
selected_columns <- c(selected_columns, 'who_region')
}
if(incomes_included == TRUE) {
selected_columns <- c(selected_columns, 'income')
}
budget_data <- budget_data %>%
select(selected_columns)
##GROUPING VARIABLES ----------------------------------------------
'%!in%' <- function(x,y)!('%in%'(x,y))
budget_data <- budget_data %>%
group_by(i_fiscal_year, appropriation_phase)
if('sectors' %!in% group_by) {
stop } else {
budget_data <- budget_data %>%
group_by(sector, .add = TRUE)
}
if('agencies' %!in% group_by) {
stop } else {
budget_data <- budget_data %>%
group_by(agency, .add = TRUE)
}
if('accounts' %!in% group_by) {
stop } else {
budget_data <- budget_data %>%
group_by(account, .add = TRUE)
}
if('locations' %!in% group_by) {
stop } else {
budget_data <- budget_data %>%
group_by(location, .add = TRUE)
}
if('location_types' %!in% group_by) {
stop } else {
budget_data <- budget_data %>%
group_by(location_type, .add = TRUE)
}
if('regions' %in% group_by & region_classifications == 'USAID') {
budget_data <- budget_data %>%
group_by(usaid_region, .add = TRUE)
}
if('regions' %in% group_by & region_classifications == "WHO") {
budget_data <- budget_data %>%
group_by(who_region, .add = TRUE)
}
if('incomes' %!in% group_by) {
stop } else {
budget_data <- budget_data %>%
group_by(income, .add = TRUE)
}
##CREATE TABLE ---------------------------------------------
table <- budget_data %>%
mutate(value = sum(value, na.rm = T)) %>%
unique() %>%
pivot_wider(names_from = i_fiscal_year, values_from = value)
}
|
a38504675e25da5d6883ea2f882fd45a1a345b1c | 4f9bd7c555f8ada9a19b509b575e520b7ea985a7 | /mosaico.R | 922a333a908d3649fe9aabb3a8f626624b376cdf | [] | no_license | geofis/geomorpho90m-tools | cfb3591e7876c67888807bd10c39c9db8c2d7fb1 | 37eff7882a796f21185a7b1debac02414331c62a | refs/heads/master | 2020-08-28T17:09:32.928022 | 2020-05-24T00:35:27 | 2020-05-24T00:35:27 | 217,764,761 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 478 | r | mosaico.R | mosaico <- function(carpeta = '', nombre = ''){
#mosaico(carpeta = 'geom_90M_n00w090', nombre = 'geom')
#mosaico(carpeta = 'vrm_90M_n00w090', nombre = 'vrm')
require(gdalUtils)
archivos <- list.files(path = carpeta, pattern = '*.tif', full.names = T)
mosaiconombre <- paste0(carpeta, '/', nombre, '_mosaico.tif')
mosaico <- mosaic_rasters(
gdalfile = archivos,
dst_dataset = mosaiconombre,
co = 'COMPRESS=LZW',
output_Raster = T)
return(mosaico)
}
|
2106dfff04f01553777873e21cd330519d6fbc09 | 814ed8580545178088156f6521f07a3920d1dcd4 | /man/autoplot.linear_stack.Rd | 9b8030110dfba9dbe2c6c6eb78a14ee8355a7a4c | [
"MIT"
] | permissive | tidymodels/stacks | 3e81d4e5909d13b97005d0a85bb9c189d6cc6970 | 8fd4867be98db8f08dc9e5e52237476d9a470c62 | refs/heads/main | 2023-08-03T23:54:30.472356 | 2023-07-24T19:36:51 | 2023-07-24T19:36:51 | 271,888,081 | 286 | 22 | NOASSERTION | 2023-04-21T01:10:11 | 2020-06-12T20:51:21 | R | UTF-8 | R | false | true | 1,558 | rd | autoplot.linear_stack.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{autoplot.linear_stack}
\alias{autoplot.linear_stack}
\title{Plot results of a stacked ensemble model.}
\usage{
\method{autoplot}{linear_stack}(object, type = "performance", n = Inf, ...)
}
\arguments{
\item{object}{A \code{linear_stack} object outputted from \code{\link[=blend_predictions]{blend_predictions()}}
or \code{\link[=fit_members]{fit_members()}}.}
\item{type}{A single character string for plot type with values "performance",
"members", or "weights".}
\item{n}{An integer for how many members weights to plot when
\code{type = "weights"}. With multi-class data, this is the total number of weights
across classes; otherwise this is equal to the number of members.}
\item{...}{Not currently used.}
}
\value{
A \code{ggplot} object.
}
\description{
Plot results of a stacked ensemble model.
}
\details{
A "performance" plot shows the relationship between the lasso penalty and the
resampled performance metrics. The latter includes the average number of
ensemble members. This plot can be helpful for understanding what penalty
values are reasonable.
A "members" plot shows the relationship between the average number of
ensemble members and the performance metrics. Each point is for a different
penalty value.
Neither of the "performance" or "members" plots are helpful when a single
penalty is used.
A "weights" plot shows the blending weights for the top ensemble members. The
results are for the final penalty value used to fit the ensemble.
}
|
83f4fd24ee5b11c7206c018189770e721c29e0d6 | 7e1d6c1822045ee656a6a41c063631760466add3 | /R/setSliderColor.R | f49a0d8685f5908a8a7120d3d7e93741bd390136 | [
"MIT"
] | permissive | jcheng5/shinyWidgets | c784a3c9e4da76c0c7f23e8362fe647044beb6d2 | f17f91f6c2e38ee8d7c6be6484ccb474ebef6417 | refs/heads/master | 2020-04-29T18:20:46.195001 | 2019-03-18T16:10:00 | 2019-03-18T16:56:59 | 176,321,196 | 3 | 0 | NOASSERTION | 2019-03-18T15:59:35 | 2019-03-18T15:59:35 | null | UTF-8 | R | false | false | 2,539 | r | setSliderColor.R | #' @title Color editor for sliderInput
#'
#' @description Edit the color of the original shiny's sliderInputs
#'
#' @param color The \code{color} to apply. This can also be a vector of colors if you want to customize more than 1 slider. Either
#' pass the name of the color such as 'Chartreuse ' and 'Chocolate 'or the HEX notation such as \code{'#7FFF00'} and \code{'#D2691E'}.
#' @param sliderId The \code{id} of the customized slider(s). This can be a vector like \code{c(1, 2)}, if you want to modify the 2 first sliders.
#' However, if you only want to modify the second slider, just use the value 2.
#'
#' @note See also \url{https://www.w3schools.com/colors/colors_names.asp} to have an overview of all colors.
#'
#' @seealso See \code{\link{chooseSliderSkin}} to update the global skin of your sliders.
#'
#' @export
#'
#'
#' @examples
#' \dontrun{
#'
#' if (interactive()) {
#'
#' library(shiny)
#' library(shinyWidgets)
#'
#' ui <- fluidPage(
#'
#' # only customize the 2 first sliders and the last one
#' # the color of the third one is empty
#' setSliderColor(c("DeepPink ", "#FF4500", "", "Teal"), c(1, 2, 4)),
#' sliderInput("obs", "My pink slider:",
#' min = 0, max = 100, value = 50
#' ),
#' sliderInput("obs2", "My orange slider:",
#' min = 0, max = 100, value = 50
#' ),
#' sliderInput("obs3", "My basic slider:",
#' min = 0, max = 100, value = 50
#' ),
#' sliderInput("obs3", "My teal slider:",
#' min = 0, max = 100, value = 50
#' ),
#' plotOutput("distPlot")
#' )
#'
#' server <- function(input, output) {
#'
#' output$distPlot <- renderPlot({
#' hist(rnorm(input$obs))
#' })
#' }
#'
#' shinyApp(ui, server)
#'
#' }
#'
#'
#' }
setSliderColor <- function(color, sliderId) {
# some tests to control inputs
stopifnot(!is.null(color))
stopifnot(is.character(color))
stopifnot(is.numeric(sliderId))
stopifnot(!is.null(sliderId))
# the css class for ionrangeslider starts from 0
# therefore need to remove 1 from sliderId
sliderId <- sliderId - 1
# create custom css background for each slider
# selected by the user
sliderCol <- lapply(sliderId, FUN = function(i) {
paste0(".js-irs-", i, " .irs-single,",
" .js-irs-", i, " .irs-bar-edge,",
" .js-irs-", i, " .irs-bar{
background: ", color[i+1], ";
}"
)
})
# insert this custom css code in the head
# of the shiy app
custom_head <- tags$head(tags$style(HTML(as.character(sliderCol))))
return(custom_head)
}
|
5036438827f81261d0ae33c7534104473f468666 | e62157a3cc13b6a65e2fbfb4926d75a8522ac5e7 | /code/strucchange_1.1-1/strucchange/R-ex/breakpoints.R | 03ad6e8b00778bf43042991d761dd5b8761ef744 | [] | no_license | ethorondor/BSPT | a87622b257a14deea65885abfd0db9bfe4b23f5b | f1eef59335fcb6ae250241b247cfb641f26ced03 | refs/heads/master | 2023-03-15T00:27:51.024680 | 2023-02-14T01:30:11 | 2023-02-14T01:30:11 | 599,842,461 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,767 | r | breakpoints.R | ### Name: breakpoints
### Title: Dating Breaks
### Aliases: breakpoints breakpoints.formula breakpoints.breakpointsfull
### breakpoints.Fstats summary.breakpoints summary.breakpointsfull
### plot.breakpointsfull plot.summary.breakpointsfull print.breakpoints
### print.summary.breakpointsfull lines.breakpoints
### Keywords: regression
### ** Examples
require(ts)
## Nile data with one breakpoint: the annual flows drop in 1898
## because the first Ashwan dam was built
data(Nile)
plot(Nile)
## F statistics indicate one breakpoint
fs.nile <- Fstats(Nile ~ 1)
plot(fs.nile)
breakpoints(fs.nile)
lines(breakpoints(fs.nile))
## or
bp.nile <- breakpoints(Nile ~ 1)
summary(bp.nile)
## the BIC also chooses one breakpoint
plot(bp.nile)
breakpoints(bp.nile)
## fit null hypothesis model and model with 1 breakpoint
fm0 <- lm(Nile ~ 1)
fm1 <- lm(Nile ~ breakfactor(bp.nile, breaks = 1))
plot(Nile)
lines(fitted(fm0), col = 3)
lines(fitted(fm1), col = 4)
lines(bp.nile)
## confidence interval
ci.nile <- confint(bp.nile)
ci.nile
lines(ci.nile)
## UK Seatbelt data: a SARIMA(1,0,0)(1,0,0)_12 model
## (fitted by OLS) is used and reveals (at least) two
## breakpoints - one in 1973 associated with the oil crisis and
## one in 1983 due to the introduction of compulsory
## wearing of seatbelts in the UK.
data(UKDriverDeaths)
seatbelt <- log10(UKDriverDeaths)
seatbelt <- cbind(seatbelt, lag(seatbelt, k = -1), lag(seatbelt, k = -12))
colnames(seatbelt) <- c("y", "ylag1", "ylag12")
seatbelt <- window(seatbelt, start = c(1970, 1), end = c(1984,12))
plot(seatbelt[,"y"], ylab = expression(log[10](casualties)))
## testing
re.seat <- efp(y ~ ylag1 + ylag12, data = seatbelt, type = "RE")
plot(re.seat)
## dating
bp.seat <- breakpoints(y ~ ylag1 + ylag12, data = seatbelt, h = 0.1)
summary(bp.seat)
lines(bp.seat, breaks = 2)
## minimum BIC partition
plot(bp.seat)
breakpoints(bp.seat)
## the BIC would choose 0 breakpoints although the RE and supF test
## clearly reject the hypothesis of structural stability. Bai &
## Perron (2003) report that the BIC has problems in dynamic regressions.
## due to the shape of the RE process of the F statistics choose two
## breakpoints and fit corresponding models
bp.seat2 <- breakpoints(bp.seat, breaks = 2)
fm0 <- lm(y ~ ylag1 + ylag12, data = seatbelt)
fm1 <- lm(y ~ breakfactor(bp.seat2)/(ylag1 + ylag12) - 1, data = seatbelt)
## plot
plot(seatbelt[,"y"], ylab = expression(log[10](casualties)))
time.seat <- as.vector(time(seatbelt))
lines(time.seat, fitted(fm0), col = 3)
lines(time.seat, fitted(fm1), col = 4)
lines(bp.seat2)
## confidence intervals
ci.seat2 <- confint(bp.seat, breaks = 2)
ci.seat2
lines(ci.seat2)
|
8a470e6bdce263fc2e8da4b401e455eac5378d6e | 317bba03e6bb5d3e959890569a89c2fbb2b378c2 | /Transform/Recode/indicatorVariable.R | 7ee6a800e125d4303d617bfed97eb81f4d34af23 | [] | no_license | jimcrozier/rattle | 5d402963be454fb7d619490008dc773ed91453f7 | 14e873cdfe68a3549887e2300d7dee1a3f6c2197 | refs/heads/master | 2021-01-21T04:03:25.890240 | 2014-02-14T17:53:53 | 2014-02-14T17:53:53 | 16,208,223 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 745 | r | indicatorVariable.R | # Remap variables.
# Turn a factor into indicator variables.
crs$dataset[, make.names(paste("TIN_Species_", levels(crs$dataset[["Species"]]), sep=""))] <- diag(nlevels(crs$dataset[["Species"]]))[crs$dataset[["Species"]],]
# Note the user selections.
# The following variable selections have been noted.
crs$input <- c("Sepal.Width", "Petal.Length", "Petal.Width", "BE4_Sepal.Length",
"TIN_Species_versicolor", "TIN_Species_virginica")
crs$numeric <- c("Sepal.Width", "Petal.Length", "Petal.Width", "TIN_Species_versicolor",
"TIN_Species_virginica")
crs$categoric <- "BE4_Sepal.Length"
crs$target <- NULL
crs$risk <- NULL
crs$ident <- NULL
crs$ignore <- c("Sepal.Length", "Species", "TIN_Species_setosa")
crs$weights <- NULL
|
18ce4f29151daef79ad3bdaf7852a2f13b706f7d | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /carSurv/man/carSurv-package.Rd | 0c849d8c9524044b91babfbe9231a35d955996a0 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,827 | rd | carSurv-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/carSurv-package.r
\docType{package}
\name{carSurv-package}
\alias{carSurv-package}
\title{Correlation-Adjusted Regression Survival Scores}
\description{
Contains functions to estimate the Correlation-Adjusted Regression Survival (CARS) Scores.
The main function is \code{\link{carSurvScore}}, which estimates CARS scores of each variable.
The higher the absolute values of CARS scores, the higher the variable importance.
Additionally there is the function \code{\link{carVarSelect}} to select cut-off thresholds
to separate variables associated with survival from noise variables. There are two possible
cut-off threshold options: False non-discovery rate q-values and empirical quantiles of the
raw scores.
}
\details{
Package: carSurv \cr
\cr
Type: Package \cr
\cr
Version: 1.0.0 \cr
\cr
Date: 2018-02-24 \cr
\cr
License: GPL-3
}
\references{
Welchowski, T. and Zuber, V. and Schmid, M., (2018), Correlation-Adjusted Regression Survival Scores for High-Dimensional Variable Selection, <arXiv:1802.08178>
Zuber, V. and Strimmer, K., (2011), High-Dimensional Regression and Variable
Selection Using CAR Scores, Statistical Applications in Genetics and Molecular Biology
Schaefer, J. and Strimmer, K., (2005), A Shrinkage Approach to Large-Scale Covariance Matrix Estimation and Implications for Functional Genomics,
Statistical Applications in Genetics and Molecular Biology
Van der Laan, M. J. and Robins, J. M., (2003), Unified Methods for Censored Longitudinal Data and Causality, Springer Series in Statistics
Strimmer, K., (2008), A unified approach to false discovery rate estimation, BMC Bioinformatics
}
\author{
Thomas Welchowski (Maintainer) \email{welchow@imbie.meb.uni-bonn.de}
}
|
f922fbb14f1378c21471e2b366d65d930624f563 | 314d5d99f71e74319ae100c90a77ba580efd1226 | /man/FirebaseOauthProviders.Rd | 7ea981fda67a6d3e38998eec314bf43dd7b33187 | [] | no_license | cran/firebase | ca514d0bec39bf6050306e4df7ec3a3d6b415b0f | 5977f22ad406c001155d3253eb952ad6bee3f630 | refs/heads/master | 2023-08-10T17:14:19.580533 | 2023-07-07T11:20:02 | 2023-07-07T11:20:02 | 251,640,532 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 9,053 | rd | FirebaseOauthProviders.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-oauth.R
\name{FirebaseOauthProviders}
\alias{FirebaseOauthProviders}
\title{OAuth Providers}
\value{
An object of class \code{FirebaseOauthProviders}.
}
\description{
Use OAuth provides such as Github or Facebook to allow users to conveniently sign in.
}
\examples{
library(shiny)
library(firebase)
ui <- fluidPage(
useFirebase(),
actionButton("signin", "Sign in with Microsoft", icon = icon("microsoft")),
plotOutput("plot")
)
server <- function(input, output, session){
f <- FirebaseOauthProviders$
new()$
set_provider("microsoft.com")
observeEvent(input$signin, {
f$launch()
})
output$plot <- renderPlot({
f$req_sign_in()
plot(cars)
})
}
\dontrun{shinyApp(ui, server)}
}
\section{Super classes}{
\code{\link[firebase:Firebase]{firebase::Firebase}} -> \code{\link[firebase:FirebaseAuth]{firebase::FirebaseAuth}} -> \code{FirebaseOauthProviders}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-FirebaseOauthProviders-new}{\code{FirebaseOauthProviders$new()}}
\item \href{#method-FirebaseOauthProviders-set_provider}{\code{FirebaseOauthProviders$set_provider()}}
\item \href{#method-FirebaseOauthProviders-launch}{\code{FirebaseOauthProviders$launch()}}
\item \href{#method-FirebaseOauthProviders-clone}{\code{FirebaseOauthProviders$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="firebase" data-topic="Firebase" data-id="expose_app"><a href='../../firebase/html/Firebase.html#method-Firebase-expose_app'><code>firebase::Firebase$expose_app()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="clear"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-clear'><code>firebase::FirebaseAuth$clear()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="delete_user"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-delete_user'><code>firebase::FirebaseAuth$delete_user()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="expose_auth"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-expose_auth'><code>firebase::FirebaseAuth$expose_auth()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="get_access_token"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-get_access_token'><code>firebase::FirebaseAuth$get_access_token()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="get_delete_user"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-get_delete_user'><code>firebase::FirebaseAuth$get_delete_user()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="get_id_token"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-get_id_token'><code>firebase::FirebaseAuth$get_id_token()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="get_sign_out"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-get_sign_out'><code>firebase::FirebaseAuth$get_sign_out()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="get_signed_in"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-get_signed_in'><code>firebase::FirebaseAuth$get_signed_in()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="get_signed_up"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-get_signed_up'><code>firebase::FirebaseAuth$get_signed_up()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="is_signed_in"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-is_signed_in'><code>firebase::FirebaseAuth$is_signed_in()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="print"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-print'><code>firebase::FirebaseAuth$print()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="req_sign_in"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-req_sign_in'><code>firebase::FirebaseAuth$req_sign_in()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="req_sign_out"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-req_sign_out'><code>firebase::FirebaseAuth$req_sign_out()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="request_id_token"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-request_id_token'><code>firebase::FirebaseAuth$request_id_token()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="set_language_code"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-set_language_code'><code>firebase::FirebaseAuth$set_language_code()</code></a></span></li>
<li><span class="pkg-link" data-pkg="firebase" data-topic="FirebaseAuth" data-id="sign_out"><a href='../../firebase/html/FirebaseAuth.html#method-FirebaseAuth-sign_out'><code>firebase::FirebaseAuth$sign_out()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-FirebaseOauthProviders-new"></a>}}
\if{latex}{\out{\hypertarget{method-FirebaseOauthProviders-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FirebaseOauthProviders$new(
persistence = c("session", "local", "memory"),
config_path = "firebase.rds",
language_code = NULL,
session = shiny::getDefaultReactiveDomain()
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{persistence}}{How the auth should persit: \code{none}, the user has to sign in at every visit,
\code{session} will only persist in current tab, \code{local} persist even when window is closed.}
\item{\code{config_path}}{Path to the configuration file as created by \code{\link{firebase_config}}.}
\item{\code{language_code}}{Sets the language to use for the UI.
Supported languages are listed \href{https://github.com/firebase/firebaseui-web/blob/master/LANGUAGES.md}{here}.
Set to \code{browser} to use the default browser language of the user.}
\item{\code{session}}{A valid shiny session.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
Initialiases Firebase Email Link
Initialises the Firebase application client-side.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-FirebaseOauthProviders-set_provider"></a>}}
\if{latex}{\out{\hypertarget{method-FirebaseOauthProviders-set_provider}{}}}
\subsection{Method \code{set_provider()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FirebaseOauthProviders$set_provider(provider, ...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{provider}}{The provider to user, e.g.: \code{microsoft.com}, \code{yahoo.com} or \code{google.com}.}
\item{\code{...}}{Additional options to pass to \href{https://github.com/firebase/snippets-web/blob/69c85abdc7cd6990618720cd33aa0d1ee357c652/snippets/auth-next/microsoft-oauth/auth_msft_provider_params.js#L8-L13}{setCustomParameters}.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
Define provider to use
}
\subsection{Returns}{
self
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-FirebaseOauthProviders-launch"></a>}}
\if{latex}{\out{\hypertarget{method-FirebaseOauthProviders-launch}{}}}
\subsection{Method \code{launch()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FirebaseOauthProviders$launch(
flow = c("popup", "redirect"),
get_credentials = FALSE
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{flow}}{Authentication flow, either popup or redirect.}
\item{\code{get_credentials}}{Whether to extract underlying oauth credentials.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
Launch sign in with Google.
}
\subsection{Returns}{
self
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-FirebaseOauthProviders-clone"></a>}}
\if{latex}{\out{\hypertarget{method-FirebaseOauthProviders-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FirebaseOauthProviders$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
89e48fcac82f83ef89adbc533486e00db865fcba | b5ed33ca7c89e1600ea0ecff83c21b599a596975 | /code/packages.R | c9b7502a5ed2fbf1b7b46b8e60358d94d1e62ea4 | [] | no_license | malfaro2/humedales | dad8746c155d9dcae38ed87b2c6b31cb5902bd83 | 460349dd0325a0849ca6af97672aec6f5800b543 | refs/heads/main | 2023-08-13T20:31:11.797476 | 2021-09-20T16:02:48 | 2021-09-20T16:02:48 | 364,150,898 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 86 | r | packages.R | library(readr)
library(tidyverse)
library(readxl)
library(knitr)
library(equatiomatic) |
bfbccb921d5d2794bfb1dd3936328f4a01efc831 | f0a0213ac538ad31a43edea0f242e040076d3be9 | /scripts/01_climatology.R | 1f55c7ede79282e861d75f7fdfa6081ff8e4b455 | [] | no_license | fernandoprudencio/CAworkshop | cf00e6e738014dc333c993695363e11ba1d8ccff | 1e4094d4fdb547bfec0147f51d03369e75145da7 | refs/heads/master | 2023-01-15T23:05:31.820070 | 2020-11-27T02:45:13 | 2020-11-27T02:45:13 | 309,123,953 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,163 | r | 01_climatology.R | #' workshop #2: calculate climatology
#' @author Fernando Prudencio
rm(list = ls())
####' Installing packages
pkg <- c("tidyverse", "raster", "ncdf4", "Hmisc")
sapply(
pkg,
function(x) {
is.there <- x %in% rownames(installed.packages())
if (is.there == FALSE) {
install.packages(x, dependencies = T)
}
}
)
####' Load packages
library(tidyverse)
library(raster)
library(ncdf4)
library(Hmisc)
####' Create time series within a dataframe
df <- tibble(
date = seq(as.Date("1981-01-01"), as.Date("2016-12-01"), by = "1 month")
) %>%
mutate(id = 1:n())
####' Build a function to calculate climatology
fun.clim <- function(month, years.omit, data) {
grd.mt <- df %>%
filter(
str_sub(date, 6, 7) == month &
str_sub(date, 1, 4) %nin% years.omit
)
data[[grd.mt$id]] %>%
"*"(1) %>%
mean(na.rm = T) %>%
return()
}
####' Apply fun.clim() function
grd.clim <- sapply(
sprintf("%02d", 1:12),
FUN = fun.clim,
years.omit = c(2005, 2010, 2016),
data = brick("data/raster/PISCOpm.nc")
) %>%
stack() %>%
"*"(1)
####' Write raster
writeRaster(grd.clim, "data/raster/PISCOpc.tif", overwrite = TRUE) |
54d80d86bfb9947138132560685f5161bb792191 | 49929f68b8ab13ca187a6b58a0dcbb5348fc5b1b | /server.R | 05aa6a866ce902f2c0ee9a53f9e5c37bb19d80a3 | [] | no_license | LOGp/Data-Products | fab2f55de50f00797aaa046782a537948accb0b2 | 10678b1b38b134e463a4db5dd5acc16a01777306 | refs/heads/master | 2020-05-30T23:51:52.699510 | 2014-08-24T21:43:00 | 2014-08-24T21:43:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 244 | r | server.R | library(shiny)
library(ggplot2)
# Load data
load(file="data.RData")
shinyServer(
function(input, output) {
output$application = renderPlot ({
app = input$dat
typ = input$typ
date = iput$date
qplot()
})
}
)
|
2178456b4c7aa2b7a3a4ea242755a683ddf42f34 | 20202d04488fbd8e35f5a4f45fdf2c21e4c45974 | /Stats for Data Science/MP4/Q2.R | 4c3f928c85561eabeeadcb89664401e51e1e39e2 | [] | no_license | dixitomkar1809/R_Mini_Projects | 5b493a81d03af10ad148ce889863166f37a691bf | 03c805166924dd91b6e663c7f924813c2642840c | refs/heads/master | 2020-03-28T04:14:04.608009 | 2019-01-23T02:29:01 | 2019-01-23T02:29:01 | 147,702,101 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 875 | r | Q2.R | setwd("MP4/")
#read the file for GPA and ACT scores
data=read.csv(file="VOLTAGE.csv", header = TRUE, sep=',')
#separate the data for remote and local locations
remote=subset(data, location=="0")
local=subset(data, location=="1")
set.seed(1234)
#Boxplots of Voltage readings at local and remote
par(mfrow = c(1, 1))
boxplot(remote$voltage,local$voltage,ylab="Voltage Readings",names=c("Remote","Local"), main="Boxplots of Voltage readings")
# Summary Statistics for Remote
summary(remote$voltage)
sd(remote$voltage)
IQR(remote$voltage)
#Summary Statistics for Local
summary(local$voltage)
sd(local$voltage)
IQR(local$voltage)
#t-test to find the 95% CI
t.test(remote$voltage, local$voltage, alternative = "two.sided", conf.level = 0.95, var.equal = FALSE)
#normal qqPlots
par(mfrow=c(1,2))
qqnorm(remote$voltage, main = "Remote")
qqnorm(local$voltage, main = "Local")
|
c258bf11991b69912ea9942e42c7205b408f2f3a | 12ea9c6128e10ffd7e92b1e557fab2904688e3c7 | /server.R | 6b19fea3aded169ac10a6fdb2b0d51914ae51607 | [] | no_license | jesse-jesse/Uncert.spatialmap | 367cae4950fe3c8f0d6249f73e76e47f66ad51e9 | 333bdaa094778f77506986abd9cbe904e8a9cfcc | refs/heads/master | 2021-01-01T03:46:35.060075 | 2020-01-02T11:53:35 | 2020-01-02T11:53:35 | 56,819,721 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,959 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# #install rgdl & Leaflet library for polygons
#install.packages("magrittr")
library(magrittr)
#install.packages("rgdal")
library(rgdal)
#install.packages("leaflet")
library(leaflet)
#install.packages("dplyr")
library(dplyr)
#install.packages("colorRamps")
library(colorRamps)
#install.packages("graphics")
library(graphics)
#install.packages("RColorBrewer")
library(RColorBrewer)
#install.packages("foreign")
library(foreign)
#install.packages("maptools")
library(maptools)
#install.packages("ggplot2")
library(ggplot2)
##load shape file
SLA <- readOGR(dsn= "/Users/robertj9/L.GitHub/L.uncert_spatial_map/Qld.shape_files",
layer = "SLA_QLD_06", verbose = FALSE)
#load file with estimates
data <- read.csv("/Users/robertj9/L.Github/L.uncert_spatial_map/est.datafile.10feb2016.csv")
#add SIR values to data file?
SLA$estimate <- data$est
## add risk to data file - this isn't necessary because when using the palette colorbin, x must be numeric and cannot be character. Althought I will leave it in in case I want to use a categorical palett in the future. A categorical palette may also be easier for defining the risk cut offs, rather than having to specify the bin cut offs.
data <- data %>%
mutate(Risk = ifelse(est < 0.769,
yes = "Very Low",
no = ifelse(est < 0.909,
yes = "Low",
no = ifelse(est < 1.1,
yes = "Average",
no = ifelse(est <1.3,
yes = "High",
no = ifelse(est >1.31,
yes = "Very High",
no = "Very High"))))))
##SLA$estimate <- data$est
SLA$ci.u <- data$ci.u
SLA$ci.l <- data$ci.l
SLA$ci.length <- data$ci.length
SLA$Risk <- data$Risk
#Legend labels
legend.lab <- c("Very High"," ", "High"," ", "Average", " ", "Low", " ", "Very Low")
#create a colour palette _______________________________________
pal1 <- colorBin( c("#CCCC00","#FFFFFF", "#993399"), SLA$estimate, bins = c( 0.0, 0.769, 0.839, 0.909, 1.1, 1.2, 1.3, 2.06), pretty = FALSE)
#pal2 <- colorQuantile("Blues", SLA$estimate, n=5)
#Draw Map
my.map <- leaflet(SLA) %>%
addPolygons(
stroke = FALSE, fillOpacity = 1, smoothFactor = 0.2,
color = ~pal1(SLA$estimate)
) %>%
addLegend("bottomleft", values = SLA$estimate, title = "Spatial Health Map", colors= c( "#993399", "#B970B6", "#D6A9D3", "#F2E2F0", "#FFFFFF","#FBF7E1", "#EFE8A4", "#E0DA66", "#CCCC00" ), labels = legend.lab, opacity = 1)
my.map
#----
shinyServer(
function(input, output) {
my.map = my.map
output$my.map <- renderLeaflet(my.map)
}
)
|
db30b8a0bf780e482054476417d280b5c3046ed1 | f8c629bfb3c94df91f791a1158d061df4fba3e3d | /man/SNSequate-package.Rd | a470678cd0c4238c4508811a2f64a50a00ba8529 | [] | no_license | jagonzalb/SNSequate | 2333484235d07b8863818641d1e268755247e5f6 | 0eaea7530c54c1e5c0571776551fd1e0ebcd923b | refs/heads/master | 2021-01-24T18:27:00.458163 | 2017-03-10T14:31:34 | 2017-03-10T14:31:34 | 84,445,082 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,951 | rd | SNSequate-package.Rd | \name{SNSequate-package}
\alias{SNSequate-package}
\alias{SNSequate}
\docType{package}
\title{Standard and Nonstandard Statistical Models and Methods for Test
Equating
}
\description{The package contains functions to perform various models and
methods for test equating. It currently implements the traditional
mean, linear and equipercentile equating methods, as well as the
mean-mean, mean-sigma, Haebara and Stocking-Lord IRT linking methods.
It also supports newest methods such that local equating, kernel
equating (using Gaussian, logistic and uniform kernels), and IRT
parameterlinking methods based on asymmetric item characteristic
functions. Functions to obtain both standard error of equating (SEE)
and standard error of equating difference between two equating
functions (SEED) are also implmented for the kernel method of
equating.
}
\details{
\tabular{ll}{
Package: \tab SNSequate\cr
Type: \tab Package\cr
Version: \tab 1.1-1\cr
Date: \tab 2014-08-08\cr
License: \tab GPL (>= 2)\cr
}
}
\author{Jorge Gonzalez Burgos
Maintainer: Jorge Gonzalez Burgos <jgonzale@mat.puc.cl>
}
\references{
Estay, G. (2012). \emph{Characteristic Curves Scale Transformation Methods Using
Asymmetric ICCs for IRT Equating}. Unpublished MSc. Thesis. Pontificia Universidad
Catolica de Chile.
Gonzalez, J. (2013). Statistical Models and Inference for the True Equating Transformation in the
Context of Local Equating. \emph{Journal of Educational Measurement, 50(3),} 315-320.
Gonzalez, J. (2014). SNSequate: Standard and Nonstandard Statistical Models and Methods for Test
Equating. \emph{Journal of Statistical Software, 59(7),} 1-30.
Holland, P. and Thayer, D. (1989). The kernel method of equating score distributions.
(Technical Report No 89-84). Princeton, NJ: Educational Testing Service.
Holland, P., King, B. and Thayer, D. (1989). The standard error of equating for the kernel method
of equating score distributions (Tech. Rep. No. 89-83). Princeton, NJ: Educational Testing Service.
Kolen, M., and Brennan, R. (2004). \emph{Test Equating, Scaling and Linking}.
New York, NY: Springer-Verlag.
Lord, F. (1980). \emph{Applications of Item Response Theory to Practical Testing Problems}.
Lawrence Erlbaum Associates, Hillsdale, NJ.
Lord, F. and Wingersky, M. (1984). Comparison of IRT True-Score and Equipercentile Observed-Score Equatings.
\emph{Applied Psychological Measurement,8(4),} 453--461.
van der Linden, W. (2011). Local Observed-Score Equating. In A. von Davier (Ed.)
\emph{Statistical Models for Test Equating, Scaling, and Linking}. New York, NY: Springer-Verlag.
van der Linden, W. (2013). Some Conceptual Issues in Observed-Score Equating.
\emph{Journal of Educational Measurement, 50(3),} 249-285.
Von Davier, A., Holland, P., and Thayer, D. (2004). \emph{The Kernel Method of Test Equating}.
New York, NY: Springer-Verlag.
}
\keyword{ package }
|
6c067da58de23fafcc2c391c954f07c600e49dba | 5fcc3f8421fa41dbb443204d206961ab18b1d45e | /man/setNodeColor.Rd | 7e448fb6586507fa9e741f72c9625067d37002bc | [
"MIT"
] | permissive | fengweijp/RCyjs | 192f369e1024661686bc10b19578587824660f1c | 0f22b40382b63f4882d7204b54b650bbfbb59333 | refs/heads/master | 2021-10-26T16:10:46.523267 | 2019-04-13T18:38:52 | 2019-04-13T18:38:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 718 | rd | setNodeColor.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCyjs-class.R
\docType{methods}
\name{setNodeColor,RCyjs-method}
\alias{setNodeColor,RCyjs-method}
\alias{setNodeColor}
\title{setNodeColor}
\usage{
\S4method{setNodeColor}{RCyjs}(obj, nodeIDs, newValues)
}
\arguments{
\item{obj}{an RCyjs instance}
\item{nodeIDs}{a character string (one or more)}
\item{newValues}{a character string, legal CSS color names (one or more)}
}
\value{
no value returned
}
\description{
\code{setNodeColor} set the specified nodes to the specifed color
}
\examples{
if(interactive()){
g <- simpleDemoGraph()
rcy <- RCyjs(title="setNodeColor", graph=g)
layout(rcy, "cose")
setNodeColor(rcy, 80)
}
}
|
17bf784e70b4a76953d585a68b5bdab321864498 | 5febc1e3f2dd766ff664f8e0ae79002072359bde | /man/scdb_ls_loaded.Rd | 0eab8d6948d8dfe2638afb38334feb36caea1e9b | [
"MIT"
] | permissive | tanaylab/metacell | 0eff965982c9dcf27d545b4097e413c8f3ae051c | ff482b0827cc48e5a7ddfb9c48d6c6417f438031 | refs/heads/master | 2023-08-04T05:16:09.473351 | 2023-07-25T13:37:46 | 2023-07-25T13:37:46 | 196,806,305 | 89 | 30 | NOASSERTION | 2023-07-25T13:38:07 | 2019-07-14T07:20:34 | R | UTF-8 | R | false | true | 334 | rd | scdb_ls_loaded.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scdb.r
\name{scdb_ls_loaded}
\alias{scdb_ls_loaded}
\title{scdb_ls_loaded - list loaded object of a certain type}
\usage{
scdb_ls_loaded(objt)
}
\arguments{
\item{objt}{- either mat,}
}
\description{
scdb_ls_loaded - list loaded object of a certain type
}
|
a38cc053008061f6f8d7857e2a0f6cb3cb73aefa | 5e9c7e704d9777390134c0a754afee36ac4f7789 | /GSE16532.r | 0ec12e8ccd625cdc205fd80f78ae6f802be7bb56 | [] | no_license | nevinwu/disease-network | 696ef882adde9f0a33a0def3f0183723b99241d4 | 9ad3a9b13d28e4ebb42b8e6ad884cff222fc071f | refs/heads/main | 2023-08-04T05:19:07.172716 | 2021-09-17T10:07:01 | 2021-09-17T10:07:01 | 401,303,275 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,980 | r | GSE16532.r | ################################################################################
## altmae_GSE16532.r
## Francisco Martínez Picó - francisco9896@gmail.com
################################################################################
# Dataset info avaliable in:
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE16532
Sys.info()[c('nodename', 'user')]
rm(list = ls())
R.version.string # 'R version 4.0.3 (2020-10-10)'
# LOAD PACKAGES -----------------------------------------------------------
library(GEOquery)
library(limma)
library(dplyr)
library(ggplot2)
library(hgug4112a.db)
functions_path = '/Users/francisco/Desktop/TFM/functions'
dataset_path = '/Users/francisco/Desktop/TFM/datasets/GSE16532_altmae'
data_path = paste0(dataset_path, '/data')
results_path = '/Users/francisco/Desktop/TFM/datasets/results/results_sva_2'
source(paste0(functions_path, '/function_plot_PCAscores.r'))
getwd()
setwd(dataset_path)
# READ PLATFORM FILE ------------------------------------------------------
# Platform = Agilent-014850
gpl_name = '/GPL4133.txt'
gpl = read.delim(file = paste0(data_path, gpl_name),
header = T,
sep = '\t',
comment.char = '#',
skip = 1,
quote = '',
stringsAsFactors = F)
dim(gpl) # [1] 45220 22
# READ EXPERIMENTAL DESSIGN -----------------------------------------------
series_name = '/GSE16532_series_matrix.txt'
series_file = paste0(data_path, series_name)
# Read characteristics
con = file(series_file, 'r') # open file
characteristics = c() # prepare empty vector to save data
while(TRUE) {
line = readLines(con, n=1)
if(length(line) == 0) {
break
} else if(startsWith(line, '!Sample_title')) {
titles = unlist(strsplit(line, '\t'))[-1]
titles = gsub('\\\'', '', titles)
} else if(startsWith(line, '!Sample_characteristics')) {
characteristics = c(characteristics, line)
} else if(startsWith(line, '!Sample_geo_accession')) {
accession = unlist(strsplit(line, '\t'))[-1]
accession = gsub('\\\'', '', accession)
}
}
close(con) # closes file
# Now we parse the info:
ed = data.frame(lapply(characteristics, function(x) {
values = unlist(strsplit(x, '\t'))[-1]
values = gsub('\\\'', '', values)
parts = strsplit(values, ': ')
name = parts[[1]][[1]]
values = sapply(parts, function(x) x[2])
out = list()
out[[name]] = values
return(out)
}))
ed = data.table(sample = accession, title = titles, ed)
# To Homogenize between dataframes:
ed = data.frame(sample = ed$sample, condition = ed$sample.type, title = ed$title,
tissue = ed$tissue, phase = ed$phase, group = ed$group)
ed$condition[ed$condition == 'experimental'] = 'RIF'
rownames(ed) = ed$sample
# DOWNLOAD RAW DATA FOR ALTMAE (GSE16532) ----------------------------------
getGEOSuppFiles('GSE16532', baseDir = './data/')
# if 'error: Timeout of 60 seconds was reached' then 'options(timeout = 300)'.
# Downloaded in '/raw_datasets/altmae_GSE71331/data/GSE71331'
# (Remember to unzip it and prepare paths)
# READ EXPRESSION DATA ----------------------------------------------------
my_files = list.files(path = 'data/GSE16532_RAW', full.names = T)
target_info = data.frame(FileName = my_files,
RIF_CONTROL = ed[,2],
stringsAsFactors = F)
# View(target_info)
# Check colnames in file:
scan('data/GSE16532_RAW/GSM414976.gpr', nlines = 1, what = 'c', sep = '\t')
columns = list(E = 'F532 Median', Eb = 'B532 Median')
gse16532raw = read.maimages(files = target_info, columns = columns,
source = 'agilent',
annotation = c('Ref','GeneName','ControlType'))
# Use View(gse16532raw$E) and check with files that parsing is correct.
# SAVE RAW DATA, ED AND GPL ------------------------------------------------
save(ed, file = paste0(data_path, '/ed_altmae.rda'), version = 2)
save(gpl, file = paste0(data_path, '/GPL4133_altmae.rda'), version = 2)
save(gse16532raw, file = paste0(data_path, '/gse16532raw.rda'), version = 2)
# PRE-NORMALIZATION ANALYSIS ----------------------------------------------
load(paste0(data_path, '/ed_altmae.rda'), verbose = T)
load(paste0(data_path, '/gpl4133_altmae.rda'), verbose = T)
load(paste0(data_path, '/gse16532raw.rda'), verbose = T)
# RAW PLOTS ---------------------------------------------------------------
#### MDplot ####
plotMD(gse16532raw, column = 1,
main = 'MD plot Altmae (GSE16532): raw control-1')
plotMD(gse16532raw, column = 7,
main = 'MD plot Altmae (GSE16532): raw rif-2')
# Warning message:
# In plotMD.EListRaw(gse16532raw,
# column = 1, main = 'MD plot Altmae (GSE414976):
# raw control-1') : NaNs produced
#### Boxplot ####
# boxplot(data.frame(log2(gse16532raw$Eb)), main = 'Green background')
boxplot(data.frame(log2(gse16532raw$E)), main = 'Raw data')
# RAW PCA -----------------------------------------------------------------
pca_raw = prcomp(t(log2(gse16532raw$E) + 1))
var_raw = round(summary(pca_raw)$importance[2, c(1,2)] * 100, 1)
toplot_raw = data.frame(pca_raw$x[, c(1,2)], stringsAsFactors = F)
lim_raw = max(abs(c(min(toplot_raw), max(toplot_raw))))
axis_limits_raw = c(-lim_raw, lim_raw)
toplot_raw$color = c(rep('control', 5), rep('RIF', 4))
ggplot(data = toplot_raw,
aes_string(x = colnames(toplot_raw)[1], y = colnames(toplot_raw)[2])) +
geom_point(aes(color = color), size = 3) +
scale_color_manual(name = 'RIF', values = c('#D95F02', '#1B9E77')) +
xlab(paste0('PC1', ': ', var_raw[1], '%')) +
ylab(paste0('PC2', ': ', var_raw[2], '%')) +
ggtitle('PCA: Altmae (GSE16532)') +
xlim(axis_limits_raw) + ylim(axis_limits_raw) +
theme_light() +
theme(legend.position = 'bottom',
axis.title = element_text(size = 18),
axis.text = element_text(size = 15),
plot.title = element_text(size = 22, hjust = 0.5),
legend.title = element_blank(),
legend.text = element_text(size = 13))
# OUTLIER DETECTION (ARRAY QUALITY METRICS) -------------------------------
outdir = paste0(dataset_path, '/arrayQuality_report')
# We need to create an altmae_eset objet:
altmae_eset = ExpressionSet(assayData = assayDataNew(exprs = gse16532raw$E))
# Now we can use aqm:
arrayQualityMetrics(expressionset = altmae_eset,
outdir = outdir,
force = TRUE,
do.logtransform = TRUE) # Since data is not processed yet.
# Check index.html file in outdir for results.
# CORRECTING BACKGROUND ---------------------------------------------------
gse16532 = backgroundCorrect(gse16532raw, method = 'normexp', offset = 50)
# check if offset needed, we use 50 as default
# ANNOTATION --------------------------------------------------------------
eset = as.matrix(gse16532$E) # expression info
dim(eset) # [1] 45.220 9
#### Filtering probes: controls and NAs ####
probesInfo = data.frame('ProbeName' = gse16532$genes$Ref, # Probes
'GeneSymbol' = gse16532$genes$GeneName,
'Control' = gse16532$genes$ControlType, # Control?
stringsAsFactors = F)
# Remove possible NAs
probesInfo_noNA = probesInfo[!is.na(probesInfo$ProbeName), ]
gpl_noNA = gpl[!is.na(gpl$SPOT_ID), ]
all(gpl_noNA$SPOT_ID == probesInfo_noNA$ProbeName) # TRUE
eset_noNA = eset[!is.na(probesInfo$ProbeName),]
dim(eset_noNA) # 45.015 rows
dim(probesInfo_noNA) # 45.015 rows
# Remove controls
probesInfo_noctrl = probesInfo_noNA[probesInfo_noNA$Control == 'false',]
gpl_noctrl = gpl_noNA[probesInfo_noNA$Control == 'false',]
eset_noctrl = eset_noNA[probesInfo_noNA$Control == 'false',]
#### Check if they have the same number of rows ####
dim(probesInfo_noctrl) # Probe info
dim(gpl_noctrl) # Annotationinfo
dim(eset_noctrl) # Expression info
all(gpl_noctrl$SPOT_ID == probesInfo_noctrl$ProbeName) # TRUE
# GROUP PROBESETS INFORMATION ---------------------------------------------
# Condense replicate probes by their average
exprbyprobe = avereps(eset_noctrl, ID = probesInfo_noctrl$ProbeName)
which(rownames(exprbyprobe) == '') # 0
which(is.na(rownames(exprbyprobe))) # 0
# GROUP GENES BY PROBESET ID ----------------------------------------------
# gpl_noctrl does not have NAs but '' symbols. We need to remove those empty
# values.
indexNA_symbol = which(gpl_noctrl$GENE_SYMBOL == '')
gpl_noctrl_notNA = gpl_noctrl[-indexNA_symbol,]
eset_noctrl_notNA = eset_noctrl[-indexNA_symbol,]
dim(eset_noctrl_notNA)
dim(gpl_noctrl_notNA)
# Still we have 32.696 genes
exprbygene = avereps(eset_noctrl_notNA, ID = gpl_noctrl_notNA$GENE_SYMBOL)
dim(exprbygene) # 19.749 genes
colnames(exprbygene) = ed$sample
# PLOT EXPRESSION BY GENE -------------------------------------------------
#### Raw expression ####
toplot = melt(exprbygene)
p1 = ggplot(toplot, aes(x = Var2, y = value)) +
geom_boxplot() + ggtitle('Gene raw expression data') + xlab('') +
ylab('') + theme_bw() +
theme(plot.title = element_text(size = 35),
legend.position = 'none',
axis.text.y = element_text(size = 25, color = 'darkgrey'),
axis.text.x = element_blank(), axis.ticks.x = element_blank(),
legend.text = element_text(size = 25),
legend.title = element_blank())
# APPLY LOG2 TRANSFORMATION -----------------------------------------------
expr_log2 = log2(exprbygene)
colnames(expr_log2) = ed$sample
#### Plot log2 data ####
toplot = melt(expr_log2)
p2 = ggplot(toplot, aes(x = Var2, y = value)) + geom_boxplot() +
ggtitle('Log2 expression data') + xlab('') + ylab('') + theme_bw() +
theme(plot.title = element_text(size = 35), legend.position = 'none',
axis.text.y = element_text(size = 25, color = 'darkgrey'),
axis.text.x = element_blank(), axis.ticks.x = element_blank(),
legend.text = element_text(size = 25),
legend.title = element_blank())
# QUANTILE NORMALIZATION --------------------------------------------------
# Final step of normalization
dat = normalizeBetweenArrays(expr_log2, method = 'quantile')
sum(is.na(rownames(dat))) # 0
sum(rownames(dat) == '') # 0
dim(dat) # 19.749
# NORMALIZED PLOTS --------------------------------------------------------
toplot = melt(dat)
p3 = ggplot(toplot, aes(x = Var2, y = value)) + geom_boxplot() +
ggtitle('Quantile normalized expression data') + xlab('') + ylab('') +
theme_bw() +
theme(plot.title = element_text(size = 35),
legend.position = 'none',
axis.text.y = element_text(size = 25, color = 'darkgrey'),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 25),
legend.title = element_blank())
#### Normalized MD-plots ####
plotMD(dat, column = 1, main = 'MD plot Altmae (GSE16532): RIF-1')
#### Boxplot ####
boxplot(dat, main = 'Normalized data')
# NORMALIZED PCA ----------------------------------------------------------
pca_norm = prcomp(t(dat))
#### Components 1 & 2 ####
var_norm = round(summary(pca_norm)$importance[2, c(1,2)] * 100, 1)
toplot_norm = data.frame(pca_norm$x[, c(1,2)], stringsAsFactors = F)
lim_norm = max(abs(c(min(toplot_norm), max(toplot_norm))))
axis_limits_norm = c(-lim_norm, lim_norm)
# toplot$color = c(paste0(rep('Control-'), 1:2), paste0(rep('RIF-'), 1:3))
toplot_norm$color = c(rep('RIF', 4), rep('control', 5))
ggplot(data = toplot_norm,
aes_string(x = colnames(toplot_norm)[1], y = colnames(toplot_norm)[2])) +
geom_point(aes(color = color), size = 3) +
scale_color_manual(name = 'RIF', values = c('#D95F02', '#1B9E77')) +
# geom_text_repel(label = rownames(pData(gse26787raw)), size = 3) +
xlab(paste0('PC1', ': ', var_norm[1], '%')) +
ylab(paste0('PC2', ': ', var_norm[2], '%')) +
ggtitle('PCA: Altmae (GSE16532)') +
xlim(axis_limits_norm) + ylim(axis_limits_norm) +
theme_light() +
theme(legend.position = 'bottom',
axis.title = element_text(size = 18),
axis.text = element_text(size = 15),
plot.title = element_text(size = 22, hjust = 0.5),
legend.title = element_blank(),
legend.text = element_text(size = 13))
#### Components 3 & 4 ####
var = round(summary(pca)$importance[2, c(3 ,4)] * 100, 1) # Proportion of varian
toplot = data.frame(pca$x[, c(3, 4)], stringsAsFactors = F)
lim = max(abs(c(min(toplot), max(toplot))))
axis_limits = c(-lim, lim)
# toplot$color = c(paste0(rep('Control-'), 1:2), paste0(rep('RIF-'), 1:3))
toplot$color = c(rep('RIF', 7), rep('Control', 5))
ggplot(data = toplot,
aes_string(x = colnames(toplot)[1], y = colnames(toplot)[2])) +
geom_point(aes(color = color), size = 3) +
scale_color_manual(name = 'RIF', values = brewer.pal(n = 5, 'Dark2')) +
xlab(paste0('PC3', ': ', var[1], '%')) +
ylab(paste0('PC4', ': ', var[2], '%')) +
ggtitle('PCA: Altmae (GSE16532)') +
xlim(axis_limits) + ylim(axis_limits) +
theme_light() +
theme(legend.position = 'bottom',
axis.title = element_text(size = 18),
axis.text = element_text(size = 15),
plot.title = element_text(size = 22, hjust = 0.5),
legend.title = element_blank(),
legend.text = element_text(size = 13))
# boxplot(data.frame(dat), main = 'Normalized data')
# SAVE .RDA ---------------------------------------------------------------
save(dat, ed, file = paste0(results_path, '/altmae.rda'), version = 2)
load(paste0(results_path, '/altmae.rda'), verbose = T)
head(dat)
head(ed)
plot_PCAscores(dat = dat, ed = ed,
condition1 = 'condition',
components = c(1,2),
colors = c('#D95F02', '#1B9E77'),
title = 'PCA: Altmae(GSE16532)')
|
a736a5b8a7e661701e59038ab77a0568eb956354 | e5b4b4ef5fb8c7597ea3a168ccba13cabba98236 | /man/sirExpDeter.Rd | d8e2438fe62abfe6871d003f574a8eb7635af9a4 | [
"MIT"
] | permissive | sbfnk/fitR | 7508517407485069c05820a6567b12b00006008b | 96b9d505fc2ef3dd5cdb6b8055eedefdf793c694 | refs/heads/main | 2023-07-24T00:21:46.294065 | 2023-07-05T12:19:55 | 2023-07-05T12:19:55 | 20,992,998 | 7 | 11 | MIT | 2023-07-05T11:10:04 | 2014-06-19T08:17:26 | R | UTF-8 | R | false | true | 1,061 | rd | sirExpDeter.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitR-package.r
\name{sirExpDeter}
\alias{sirExpDeter}
\title{A simple deterministic SIR model with constant population size and parameters
on the exponential scale}
\format{
A \code{\link{fitmodel}} object, that is a list with the following
elements:
}
\description{
A simple deterministic SIR model with constant population size, uniform prior
and Poisson observation. The parameters are transformed using an exponential
transformation.
}
\details{
\itemize{
\item \code{name} character.
\item \code{stateNames} character vector.
\item \code{thetaNames} character vector.
\item \code{simulate} \R-function.
\item \code{rPointObs} \R-function.
\item \code{dprior} \R-function.
\item \code{dPointObs} \R-function.
}
Look at the documentation of \code{\link{fitmodel}} for more details about
each of these elements.
You can look at the code of the \R-functions by typing
\code{sirExpDeter$simulate} for instance. There are some comments included.
}
\keyword{internal}
|
16c92941f32e1a0280ff5e4ad7d1f858aa29a7a9 | a82cf7f860fce4cb9db1325c66b3fa8946454521 | /RCODE/Hypervolumes/bryo_hvol_overlap_species-v1.R | af5d7b556f98387da1dc748c632bc4c691f853f9 | [] | no_license | joaofgoncalves/MountainBryophytesSDM | 8f1951b91ba157d514c6cb6b7dcb941c56c59439 | 1e24aaf0dc03cd3401cb7a255390aa436925765a | refs/heads/master | 2021-12-23T06:48:54.785690 | 2021-12-07T01:08:18 | 2021-12-07T01:08:18 | 123,359,210 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,027 | r | bryo_hvol_overlap_species-v1.R |
## --------------------------------------------------------- ##
##
## Performs an overlap analysis between species hypervolumes
##
## João Gonçalves & Helena Hespanhol
## CIBIO/InBIO, FCUP
## Porto, 10/2018
##
## --------------------------------------------------------- ##
library(hypervolume)
library(raster)
library(dplyr)
library(magrittr)
library(rgdal)
library(ggplot2)
library(ggdendro)
# Wrapper function to calculate overlap statistics
hv_overlap <- function(hv1, hv2, verbose=FALSE, ...){
hv_set <- hypervolume_set(hv1, hv2, check.memory = FALSE, verbose=verbose, ...)
hv_ovlp_stats <- hypervolume_overlap_statistics(hv_set)
return(hv_ovlp_stats)
}
# ------------------------------------------------------------------------- #
# Load data ----
# ------------------------------------------------------------------------- #
# Load hypervolume objects from previous analyses
load("./OUT/HyperVolumeBySpecies-v3-20181101.RData")
# Load shapefile
spData <- readOGR("./DATA/VECTOR/Bryophyte_dataset","And_Gri_Rac_PI_all_2")
# ------------------------------------------------------------------------- #
# Perform overlap analysis by species pairs (lower-tri matrix only) ----
# ------------------------------------------------------------------------- #
spCodesAll <- unique(spData$Cod_esp)
len <- length(spCodesAll)
ltrimat <- matrix(1:len^2,len,len) %>% lower.tri
ovlp_jacc <- matrix(NA,len,len,dimnames = list(spCodesAll,spCodesAll))
ovlp_sors <- matrix(NA,len,len,dimnames = list(spCodesAll,spCodesAll))
tot2run <- sum(ltrimat)
pb <- txtProgressBar(1,tot2run,style = 3)
k <- 0
for(i in 1:len){
for(j in 1:len){
if(!ltrimat[i,j]){
next
}
k <- k+1
sp1 <- as.character(spCodesAll[i])
sp2 <- as.character(spCodesAll[j])
hv1 <- hvObj_BySpecies[[paste("hv_svm_",sp1,sep="")]]
hv2 <- hvObj_BySpecies[[paste("hv_svm_",sp2,sep="")]]
hv_ovlp_ind <- hv_overlap(hv1, hv2)
ovlp_jacc[i,j] <- hv_ovlp_ind[1]
ovlp_sors[i,j] <- hv_ovlp_ind[2]
setTxtProgressBar(pb, k)
}
}
save(ovlp_jacc, ovlp_sors, file = "./OUT/NicheOvlpDistances-NewVars_v3.RData")
# --------------------------------------------------------------------------- #
# Make dendrogram of species hiche overlap distances ----
# --------------------------------------------------------------------------- #
hc_jacc <- hclust((1-as.dist(ovlp_jacc)), method="complete")
hc_sors <- hclust((1-as.dist(ovlp_sors)), method="complete")
#plot(hc_jacc, horiz=TRUE, hang=-1)
ggd <- ggdendrogram(hc_jacc, rotate = TRUE, size = 3) +
labs(title="Dendrogram of species niche overlap",
subtitle="Jaccard distance between hypervolumes")
ggsave("./OUT/DendroSpeciesNicheOvlp_Jacc-NewVars_v3.png",ggd,height = 7, width=9)
ggd <- ggdendrogram(hc_sors, rotate = TRUE, size = 3) +
labs(title="Dendrogram of species niche overlap",
subtitle="Sorensen distance between hypervolumes")
ggsave("./OUT/DendroSpeciesNicheOvlp_Sors-NewVars_v3.png",ggd,height = 7, width=9)
|
95ff8a5ea0d51726cbad3a51163d141c57c8ad5b | 4f1f224fc502a4d4ef9494c4bb4c6f0c50787094 | /BSFG/man/BSFG_control.Rd | 21de7b1ec555abb2c9b000c63c282f40391fe615 | [] | no_license | xinxin63/SparseFactorMixedModel | 8ba0c30947175df964b840d2a8a70f51ba778311 | 60b3aeea4f90947e86b00e54066b77d86ec36a97 | refs/heads/master | 2020-04-05T20:10:39.915162 | 2017-05-03T03:37:38 | 2017-05-03T03:37:38 | 61,915,465 | 0 | 0 | null | 2016-06-24T22:21:26 | 2016-06-24T22:21:26 | null | UTF-8 | R | false | true | 3,659 | rd | BSFG_control.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSFG_master.R
\name{BSFG_control}
\alias{BSFG_control}
\title{Set BSFG run parameters}
\usage{
BSFG_control(sampler = c("fast_BSFG", "general_BSFG"),
Posterior_folder = "Posterior", simulation = c(F, T), scale_Y = c(T, F),
b0 = 1, b1 = 5e-04, epsilon = 0.1, prop = 1, k_init = 20,
h2_divisions = 100, h2_step_size = NULL, drop0_tol = 1e-14,
K_eigen_tol = 1e-10, burn = 100, thin = 2)
}
\arguments{
\item{sampler}{specify the sampler to use. fast_BSFG is often faster, but only allows one random
effect. If more are specified in \code{BSFG_init}, this is switched to general_BSFG.}
\item{Posterior_folder}{path to folder to save posterior samples. Samples of each parameter
are saved in chuncks to limit memory requirements.}
\item{simulation}{Is this a fit to simulated data? If so, a setup list will be expected providing
the true values}
\item{scale_Y}{Should the Y values be centered and scaled? Recommend, except for simulated data.}
\item{b0}{parameter of the \code{update_k} function. See Bhattacharya and Dunson 2011}
\item{b1}{parameter of the \code{update_k} function. See Bhattacharya and Dunson 2011}
\item{epsilon}{parameter of the \code{update_k} function. Smallest \eqn{\lambda_{ij}} that is
considered "large", signifying a factor should be kept. See Bhattacharya and Dunson 2011}
\item{prop}{proportion of \eqn{\lambda{ij}} elements in a column of \eqn{\Lambda} that must be smaller than
\code{epsilon} before factor is dropped. See Bhattacharya and Dunson 2011}
\item{h2_divisions}{A scalar or vector of length equal to number of random effects. In BSFG, random
effects are re-scaled as percentages of the total variation. Then a discrete prior spanning [0,1)
with \code{h2_divisions} equally spaced values is constructred for each variance component. If
\code{h2_divisions} is a scalar, the prior for each variance component has this number of divisions.
In the joint prior over all variance components, combinations of variance components with total variance != 1
are assigned a prior of zero and ignored.}
\item{h2_step_size}{Either NULL, or a scaler in the range (0,1] giving specifying the range of h2 values for a Metropolis-Hastings
update step for each h2 parameter vector. If NULL, h2's will be sampled based on the marginal probability
over all possible h2 vectors. If a scalar, a Metropolis-Hastings update step will be used for each h2 vector.
The trail value will be selected uniformly from all possible h2 vectors within this Euclidean distance from the current vector.}
\item{drop0_tol}{A scalar giving the a tolerance for the \code{drop0()} function that will be applied
to various symmetric (possibly) sparse matrices to try to fix numerical errors and increase sparsity.}
\item{K_eigen_tol}{A scalar giving the minimum eigenvalue of a K matrix allowed. During pre-processing,
eigenvalues of each K matrix will be calculated using \code{svd(K)}. Only eigenvectors of K with corresponding eigenvalues
greater than this value will be kept. If smaller eigenvalues exist, the model will be transformed
to reduce the rank of K, by multiplying Z by the remaining eigenvectors of K. This transformation
is undone before posterior samples are recorded, so posterior samples of \code{U_F} and \code{U_R} are
untransformed.}
\item{burn}{burnin length of the MCMC chain}
\item{thin}{thinning rate of the MCMC chain}
\item{kinit}{initial number of factors}
}
\description{
Function to create run_parameters list for initializing BSFG model
}
\seealso{
\code{\link{BSFG_init}}, \code{\link{sample_BSFG}}, \code{\link{print.BSFG_state}}
}
|
871a2e5fd2a891f492e2977d2eec9aa07ee0f419 | db87ffa8bbc8fb634aca73c361824c24cf4f9b9f | /A1/task3.r | ba4af0661c550f23d61150f15adce9f0cbc1ff74 | [] | no_license | DyassKhalid007/CS432-Data-Mining | cf2d0db36d8a394877ce9389ab84c5fd15f71661 | bbb9d66c7538939b1b3c20e80f0adac0951b050e | refs/heads/master | 2023-02-06T16:36:37.211906 | 2023-01-31T20:51:21 | 2023-01-31T20:51:21 | 164,698,050 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 439 | r | task3.r | #code to generate the correlation matrix
heart = read.csv("heart.csv")
mydata.cor = cor(heart)
#Now plotting barplot of target
counts <- table(heart$target)
barplot(counts, main="Target Barplot",
xlab="Number of targets")
#now plotting histogram of sex wrt to target
barplot(table(heart$sex),
main = "Histogram of sex with respect to target",
xlab = "Sex",
ylab = "Target",
table(heart$target))
|
bd5b81fb3b4b30585538bffdba279f388d4ad23d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/timetk/examples/tk_get_timeseries_unit_frequency.Rd.R | d7dd18667fe2b480f90f1c691b4cd7d08c28466d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 238 | r | tk_get_timeseries_unit_frequency.Rd.R | library(timetk)
### Name: tk_get_timeseries_unit_frequency
### Title: Get the timeseries unit frequency for the primary time scales
### Aliases: tk_get_timeseries_unit_frequency
### ** Examples
tk_get_timeseries_unit_frequency()
|
f057948ce35459ea88350ed1a19725479843edcb | 0e595bb86c1a6751c169a32383281ff233d27f40 | /man/pullSigQTL.Rd | 3b57d75998b3f21d5786769aad13d24f5cdd70e6 | [] | no_license | pinbo/qtlTools | bd4b5e684c7353eedacc5cf48aec3344a7caa5d2 | 96f6b61e255314f6a5a32e38105c160dc52c037a | refs/heads/master | 2022-01-04T23:47:17.044866 | 2018-10-02T20:52:47 | 2018-10-02T20:52:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,511 | rd | pullSigQTL.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pullSigQTL.R
\name{pullSigQTL}
\alias{pullSigQTL}
\title{Method to summarize scanone results}
\usage{
pullSigQTL(cross, s1.output, perm.output, pheno.col = NULL, chr = NULL,
alpha = 0.05, returnQTLModel = TRUE, ...)
}
\arguments{
\item{cross}{The qtl cross.}
\item{s1.output}{The output from scanone}
\item{perm.output}{The permutation output from scanone}
\item{pheno.col}{Character or numeric vector indicating the phenotype to be tested.}
\item{chr}{The chromosome to be tested. Defaults to all chromosomes.}
\item{alpha}{The significance for permutations}
\item{returnQTLModel}{Logical, should a QTL model be returned (TRUE), or
should a culled output from qtlpvl::convert_scan1 be returned (FALSE)?}
\item{...}{additional arguments passed on to summary.scanone,
such as controlAcrossCol.}
}
\value{
Either QTL models or simplified and converted scanone summary.
}
\description{
\code{pullSigQTL} Uses qtlpvl to summarize the output of scanone,
then culls the output to only significant QTL peaks, based on permutations.
}
\examples{
\dontrun{
library(qtlTools)
data(fake.bc)
cross<-fake.bc
cross <- calc.genoprob(cross, step=2.5)
s1<-scanone(cross, method="hk", pheno.col=c("pheno1", "pheno2"))
perm<-scanone(cross, n.perm=100, method="hk",
pheno.col=c("pheno1", "pheno2"), verbose=FALSE)
pullSigQTL(cross, s1.output=s1, perm.output=perm)
pullSigQTL(cross, s1.output=s1, perm.output=perm, returnQTLModel=FALSE)
}
}
|
2d88cf0755f86b644e4f2b532af257929791cc5e | 36a2cfd1b36a4907232bca2bb3764e766cbf8c79 | /expected-points/archive/analysis/Exploratory Data Analysis.R | d3621e39c56d89c642b5af7ea30963bd158d9986 | [] | no_license | jkope892/research | 8afbe3e2b0324be33d0b54c1c0729d7a6705796c | 508c4fd9f6d6001f0acb22bc53f0f12b123fdac0 | refs/heads/master | 2023-05-02T12:57:50.174845 | 2021-05-24T04:45:39 | 2021-05-24T04:45:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,673 | r | Exploratory Data Analysis.R | library(here)
library(arrow)
library(tidyverse)
library(skimr)
setwd(here())
#pull in raw data
rosters <- read_parquet("data/rosters/rosters_1999_2019.pdata")
pbp <- read_parquet("data/pbp_data/pbp_reg_post_1999_2019.pdata")
#Pull in passer, rusher, and receiver positions
pbp <- pbp %>%
left_join(dplyr::select(rosters,
rusher_gsis_id = GSIS_ID,
rusher_gsis_name = Player,
rusher_gsis_pos = Position),
by = c("rusher_player_id" = "rusher_gsis_id")) %>%
left_join(dplyr::select(rosters,
receiver_gsis_id = GSIS_ID,
receiver_gsis_name = Player,
receiver_gsis_pos = Position),
by = c("receiver_player_id" = "receiver_gsis_id")) %>%
left_join(dplyr::select(rosters,
passer_gsis_id = GSIS_ID,
passer_gsis_name = Player,
passer_gsis_pos = Position),
by = c("passer_player_id" = "passer_gsis_id"))
#skim
skim(rosters)
skim(pbp)
#filter out bad data
explore <- pbp %>%
filter(!is.na(posteam), #not real plays
!is.na(alt_game_id)) #Pro Bowls
#skim again
skim(explore)
pass <- explore %>%
filter(play_type == "pass") %>%
group_by(season_year, season_type) %>%
summarise(count = n(),
comp_pct = mean(complete_pass, na.rm = TRUE),
adot = mean(air_yards, na.rm = TRUE))
library(RColorBrewer)
library(scales)
nb.cols <- 14
mycolors <- colorRampPalette(brewer.pal(9, "YlOrRd"))(nb.cols)
show_col(mycolors)
#completion rate over time
explore %>%
filter(season_year >= 2006 & receiver_gsis_pos %in% c("RB")) %>%
ggplot(aes(x=air_yards, y=complete_pass)) +
geom_smooth(aes(color = as.factor(season_year)), se = F) +
scale_colour_manual(values = mycolors) +
geom_smooth(color = "black", se = F) +
xlim(-5,30) +
theme_bw() +
facet_wrap(~receiver_gsis_pos)
explore %>%
filter(season_year >= 2006 & receiver_gsis_pos %in% c("TE")) %>%
ggplot(aes(x=air_yards, y=yards_gained)) +
geom_smooth(aes(color = as.factor(season_year)), se = F) +
scale_colour_manual(values = mycolors) +
geom_smooth(color = "black", se = F) +
xlim(-5,30) +
theme_bw() +
facet_wrap(~receiver_gsis_pos)
#completion rate over time
explore %>%
filter(rusher_gsis_pos %in% c("RB") & play_type == "run") %>%
ggplot(aes(x=yardline_100, y=rush_touchdown)) +
geom_smooth(aes(color = as.factor(season_year)), se = F) +
scale_colour_manual(values = mycolors) +
geom_smooth(color = "black", se = F) +
xlim(0,30) +
theme_bw() +
facet_wrap(~rusher_gsis_pos) |
cbdbab97fdd9cdad9e9f60cd2151157448ef7aa8 | 93953f679eff1f9fd5e9740b48e6ada23cc30427 | /theme_gxMetab.R | b6c9dab5c2820559fc124041de5a8aabed29529f | [] | no_license | cfbeuchel/imise_functions | 663f858b3108182fbd530d75537ea636c5db33fa | 2f253dc2c1b5f150d6c6fe3778feb0457bf22fa4 | refs/heads/master | 2023-03-03T14:52:36.304054 | 2021-02-15T11:22:23 | 2021-02-15T11:22:23 | 339,053,746 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,337 | r | theme_gxMetab.R | theme_gxMetab <- function(
bg.col = "grey100",
axis.col = "grey30",
text.col = "grey29",
strip.bg.col = "grey98",
grid.col = "grey30"
){
theme(
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
panel.border = element_blank(),
panel.background = element_rect(fill = bg.col,
color=NA),
panel.grid.major = element_line(color = grid.col,
linetype = "dotted"),
axis.ticks = element_line(color = axis.col,
size=.6),
axis.ticks.length = unit(.20,"cm"),
axis.text = element_text(family = "sans",
colour = text.col,
face = "bold"),
axis.title = element_text(family = "sans",
colour = text.col,
face = "bold"),
strip.background = element_rect(color=NA,
fill=strip.bg.col),
strip.text = element_text(family = "sans",
colour = text.col,
face = "bold"),
legend.justification = "top"
)
}
|
ed4bc43c174392371f444d801270375079ba87bb | bd4b57d4f1677d6789513b52f1db752f756936ca | /R/gelman.prior.R | 6e704b92cb7f7ecc6aa058dbc76230cddc44e3cd | [] | no_license | cran/MCMCglmm | dd45441b7dac39b065e186e5960f68f8650c7133 | fdd8d46550344b9be1ed429ac8fea24ad0a40761 | refs/heads/master | 2023-07-06T00:24:58.456332 | 2023-06-30T20:00:02 | 2023-06-30T20:00:02 | 17,680,516 | 4 | 16 | null | null | null | null | UTF-8 | R | false | false | 1,058 | r | gelman.prior.R | gelman.prior<-function(formula, data, scale=1, intercept=scale, singular.ok=FALSE){
X1<-model.matrix(formula, data)
if(singular.ok==FALSE){
sing.rm<-lm(rnorm(nrow(X1))~X1-1)
sing.rm<-which(is.na(sing.rm$coef))
if(length(sing.rm)>0){
warning("some fixed effects are not estimable and have been removed. Use singular.ok=TRUE to sample these effects, but use an informative prior!")
}
}
X2<-get_all_vars(formula, data)
X2<-as.data.frame(lapply(X2, function(x){if(is.numeric(x)){scale(x, scale=sd(x)*2*(length(x)-1)/length(x))}else{x}}))
X2<-model.matrix(formula, data=X2)
if(all(X2[,1]==1)){
X2[,-1]<-apply(X2[,-1,drop=FALSE], 2, function(x){if(any(!x%in%c(0,1))){x}else{scale(x, center=sum(x)/length(x), scale=1)}})
}else{
X2<-apply(X2, 2, function(x){if(any(!x%in%c(0,1))){x}else{scale(x, center=sum(x)/length(x), scale=1)}})
}
if(length(sing.rm)>0){
X1<-X1[,-sing.rm]
X2<-X2[,-sing.rm]
}
P<-solve(t(X1)%*%X1, t(X1)%*%X2)
I<-diag(nrow(P))*scale^2
I[1,1]<-intercept^2
P%*%I%*%t(P)
}
|
b979920de32333cc74903b5f9537e05ba6d5d060 | 1446c397e9d68266f72e23cb70e43ce724ece0d7 | /protein analysis.R | fa532854b7e0b0855fcc4175c19115acf1e4e408 | [] | no_license | Scavetta/QBM_MSc | 6ac3f2973ce0275be9836a7aaaaf67084fec0edd | e490a7f2db920b46590fb1ed12aafb4bab99bb96 | refs/heads/master | 2020-03-27T21:55:04.193781 | 2018-09-07T15:11:21 | 2018-09-07T15:11:21 | 147,186,628 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,158 | r | protein analysis.R | # SILAC analysis
# Rick Scavetta
# 04.09.2018
# QBM R workshop for MSc
# Clear workspace (environment)
rm(list = ls())
# Load packages
library(tidyverse)
# read in the data
protein.df <- read.delim("Protein.txt", stringsAsFactors = FALSE)
# examine the data:
# summary(protein.df)
# ncol(protein.df)
# dim(protein.df)
glimpse(protein.df)
# str(protein.df)
# print the data frame to the screen:
# protein.df
# use readr version:
# protein.df <- read_tsv("Protein.txt")
# Convert the data frame to a tibble:
# print the data frame to the screen:
protein.df <- as_tibble(protein.df)
protein.df
class(protein.df)
# Examine and Remove contaminats
protein.df %>%
filter(Contaminant == "+") -> prot.con
# total cont
nrow(prot.con)
# percentage cont
nrow(prot.con)/nrow(protein.df)*100
# Get a table
table(protein.df$Contaminant)/nrow(protein.df)
summary(protein.df$Contaminant)
# Using a logical vector to do math
# TRUE == T == 1
# FALSE == F == 0
sum(protein.df$Contaminant == "+")
# Remove contaminants:
protein.df %>%
filter(Contaminant != "+") -> protein.df
# Plot a histogram or a density plot of each ratio:
ggplot(protein.df, aes(x = Ratio.H.M)) +
geom_histogram()
# Transformations
# Log10 of intensities
protein.df$Intensity.H <- log10(protein.df$Intensity.H)
protein.df$Intensity.M <- log10(protein.df$Intensity.M)
protein.df$Intensity.L <- log10(protein.df$Intensity.L)
# Add intensities
protein.df$Intensity.H.M <- protein.df$Intensity.H + protein.df$Intensity.M
protein.df$Intensity.M.L <- protein.df$Intensity.M + protein.df$Intensity.L
# Log2 of ratios
protein.df$Ratio.H.M <- log2(protein.df$Ratio.H.M)
protein.df$Ratio.M.L <- log2(protein.df$Ratio.M.L)
# What is the shift?
shift.H.M <- mean(protein.df$Ratio.H.M, na.rm = T)
shift.M.L <- mean(protein.df$Ratio.M.L, na.rm = T)
# Adjust values:
protein.df$Ratio.H.M <- protein.df$Ratio.H.M - shift.H.M
protein.df$Ratio.M.L <- protein.df$Ratio.M.L - shift.M.L
# Plot a histogram or a density plot of each transformed ratio:
ggplot(protein.df, aes(x = Ratio.H.M)) +
geom_histogram()
ggplot(protein.df, aes(x = Ratio.M.L)) +
geom_histogram()
# Examine Data, Exercises 9.2 - 9.4:
# Get specific Uniprot IDs
# Using filter(), Exercise 9.2
protein.df %>%
filter(Uniprot %in% paste0(c("GOGA7", "PSA6", "S10AB"), "_MOUSE")) %>%
select(Uniprot, Ratio.M.L, Ratio.H.M)
# Using [], Exercise 10.1
protein.df[protein.df$Uniprot %in%
paste0(c("GOGA7", "PSA6", "S10AB"), "_MOUSE"),
c("Uniprot", "Ratio.M.L", "Ratio.H.M")]
# Get low p-value proteins:
# Using filter(), Exercise 9.3
protein.df %>%
filter(Ratio.H.M.Sig < 0.05) -> sig.H.M
# Using [], Exercise 10.2
protein.df[protein.df$Ratio.H.M.Sig < 0.05 &
!is.na(protein.df$Ratio.H.M.Sig), ]
# Get extreme log2 ratio proteins:
# Using filter(), Exercise 9.4
protein.df %>%
filter(Ratio.H.M > 2.0 | Ratio.H.M < -2.0)
# Using [], Exercise 10.3
protein.df[(protein.df$Ratio.H.M > 2.0 |
protein.df$Ratio.H.M < -2.0) &
!is.na(protein.df$Ratio.H.M), ]
# Proteins for top 20 HM and ML ratios
# Exercise 10.4
protein.df %>%
arrange(desc(Ratio.M.L)) %>%
filter(row_number()<21)
protein.df %>%
top_n(20, Ratio.M.L) -> topML
protein.df %>%
top_n(20, Ratio.H.M) -> topHM
# Intersection of top20 lists:
# Exercise 10.5
intersect(topML, topHM) %>%
select(Uniprot, Ratio.H.M, Ratio.M.L)
# Exercises 13.1 & 13.2:
# Make a plot coloured according to sig values:
protein.df$Ratio.H.M.Sig.Cat <- cut(protein.df$Ratio.H.M.Sig,
c(0, 1e-11, 1e-4, 0.05, 1))
# optionally, add c("<1e-11", "<1e-04", "<0.05", "NS") to cut().
protein.df$Ratio.M.L.Sig.Cat <- cut(protein.df$Ratio.M.L.Sig,
c(0, 1e-11, 1e-4, 0.05, 1))
glimpse(protein.df)
ggplot(protein.df, aes(x = Ratio.H.M, y = Intensity.H.M, col = Ratio.H.M.Sig.Cat)) +
geom_point(alpha = 0.5)
ggplot(protein.df, aes(x = Ratio.M.L, y = Intensity.M.L, col = Ratio.M.L.Sig.Cat)) +
geom_point(alpha = 0.5)
# Pattern Matching with Regular Expressions: Exercises chapter 18
desc <- protein.df$Description
# A character vector
# Which contain methyl
str_extract(desc, regex(".*methyl.*", ignore_case = TRUE)) # long, but clear
str_extract(desc, "methyl") # easier, but only lower case
str_extract(desc, ".*(M|m)ethyl.*") # short RegEx for both upper and lower case
str_extract(desc, "(M|m)ethyl.*ase") # greedy, "methylase and lysyl-hydroxylase"
str_extract(desc, "(M|m)ethyl.*?ase") # ungreedy, "methylase"
# Until the end of the name? More complex :/
# What rows contain “methyl”?
grep("(M|m)ethyl", desc)
str_which(desc, "(M|m)ethyl")
which(str_detect(desc, "(M|m)ethyl"))
# How many?
length(grep("(M|m)ethyl", desc))
# Does case (in)sensitivity make a difference?
identical(str_detect(desc, "methyl"), str_detect(desc, "Methyl"))
# Exercises 18.2 & 18.3:
protein.df %>%
filter(str_detect(Description, regex("ubiquitin", ignore_case = T))) %>%
select(Uniprot, Ratio.M.L, Ratio.H.M) %>%
filter(complete.cases(.)) %>%
ggplot(aes(Ratio.M.L, Ratio.H.M)) +
geom_point() +
labs(title = "Only Ubiquitins")
|
a81156ecb6e678a05ca225f00893d0493153d572 | 3ad67539049ea8ef2ad9bc25d2ba9bde4c783dec | /man/geom_arrow.Rd | f6cae2c47e025876a6e348b8f9a06b47f618e69e | [] | no_license | microly/autoscale | 803fb263e4a3519f0f8c05eb9a5008cb911068a1 | 949ccbfe6c99fadac1c91b7844129478575677d7 | refs/heads/master | 2020-08-11T14:51:38.695334 | 2019-10-12T05:45:21 | 2019-10-12T05:45:21 | 214,583,037 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 852 | rd | geom_arrow.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{geom_arrow}
\alias{geom_arrow}
\title{geom_arrow}
\usage{
geom_arrow(mapping = NULL, data = NULL, stat = "arrow",
position = "identity", ..., start = 0, direction = 1,
min.mag = 0, skip = 0, skip.x = skip, skip.y = skip,
arrow.angle = 15, arrow.length = 0.5, arrow.ends = "last",
arrow.type = "closed", arrow = grid::arrow(arrow.angle,
unit(arrow.length, "lines"), ends = arrow.ends, type = arrow.type),
lineend = "butt", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE)
}
\arguments{
\item{inherit.aes}{}
}
\description{
geom_arrow
}
\examples{
library(tibble)
geo <- tibble(lon = 1:10, lat = 1:10, mag = 1:10, angle = 1:10)
# scale_mag_continuous <- scale_mag
ggplot(geo, aes(lon, lat)) +
geom_arrow(aes(mag = mag, angle = angle))
}
|
38c2192af65481f9724e111b29bfa52aec13ea84 | 58fe4cd84703ec0328afddb5d34ad3e37cf7dfd6 | /benchmarks/rodinia/lavaMD/Makefile.rd | e98f8bdd67b0bfe42aea4bbc12e9cc1862f12fe7 | [] | no_license | sljiaa/curd-llvm | b513ed5e3b96fae37e1d673400ace072f493dad8 | 238202cbe8acb9a21e43fae72c5d74b9d9c42750 | refs/heads/main | 2023-02-15T00:29:03.714232 | 2021-01-17T23:42:49 | 2021-01-17T23:42:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,597 | rd | Makefile.rd | CURD_DIR ?=../../../
CURD_CFLAGS ?=-I$(P4ROOT)/sw/gpgpu/samples/common/inc -v -keep -dr -rdc=true -I$(CURD_DIR) -lineinfo -arch=sm_35 --cudart=shared
CURD_FLAGS_LAZY=$(CURD_CFLAGS) -L$(CURD_DIR) $(CURD_DIR)/race_detection_lazy.o
CURD_FLAGS_EAGER=$(CURD_CFLAGS) -L$(CURD_DIR) $(CURD_DIR)/race_detection_eager.o
# Example
# target: dependencies
# command 1
# command 2
# .
# .
# .
# command n
ifdef OUTPUT
override OUTPUT = -DOUTPUT
endif
C_C = gcc
OMP_LIB = -lgomp
OMP_FLAG = -fopenmp
CUD_C = nvcc
# OMP_FLAG = -Xcompiler paste_one_here
CUDA_FLAG = $(CURD_CFLAGS)
# link objects (binaries) together
a.out: main.o \
./kernel/kernel_gpu_cuda_wrapper.o \
./util/num/num.o \
./util/timer/timer.o \
./util/device/device.o
$(CUD_C) $(CURD_FLAGS_LAZY) $(KERNEL_DIM) main.o \
./kernel/kernel_gpu_cuda_wrapper.o \
./util/num/num.o \
./util/timer/timer.o \
./util/device/device.o \
-lm \
-L/usr/local/cuda/lib64 \
-lcuda -lcudart \
$(OMP_LIB) \
-o lavaMD_lazy
$(CUD_C) $(CURD_FLAGS_EAGER) $(KERNEL_DIM) main.o \
./kernel/kernel_gpu_cuda_wrapper.o \
./util/num/num.o \
./util/timer/timer.o \
./util/device/device.o \
-lm \
-L/usr/local/cuda/lib64 \
-lcuda -lcudart \
$(OMP_LIB) \
-o lavaMD_eager
# compile function files into objects (binaries)
main.o: main.h \
main.c \
./kernel/kernel_gpu_cuda_wrapper.h \
./kernel/kernel_gpu_cuda_wrapper.cu \
./util/num/num.h \
./util/num/num.c \
./util/timer/timer.h \
./util/timer/timer.c \
./util/device/device.h \
./util/device/device.cu
$(C_C) $(KERNEL_DIM) $(OUTPUT) main.c \
-c \
-o main.o \
-O3
./kernel/kernel_gpu_cuda_wrapper.o: ./kernel/kernel_gpu_cuda_wrapper.h \
./kernel/kernel_gpu_cuda_wrapper.cu
$(CUD_C) $(KERNEL_DIM) ./kernel/kernel_gpu_cuda_wrapper.cu \
-c \
-o ./kernel/kernel_gpu_cuda_wrapper.o \
-O3 \
$(CUDA_FLAG)
./util/num/num.o: ./util/num/num.h \
./util/num/num.c
$(C_C) ./util/num/num.c \
-c \
-o ./util/num/num.o \
-O3
./util/timer/timer.o: ./util/timer/timer.h \
./util/timer/timer.c
$(C_C) ./util/timer/timer.c \
-c \
-o ./util/timer/timer.o \
-O3
./util/device/device.o: ./util/device/device.h \
./util/device/device.cu
$(CUD_C) $(CURD_CFLAGS) ./util/device/device.cu \
-c \
-o ./util/device/device.o \
-O3
# delete all object and executable files
clean:
rm -f *.o \
./kernel/*.o \
./util/num/*.o \
./util/timer/*.o \
./util/device/*.o \
lavaMD_*
|
d6e5eb5f35d9db0bf00afa0ca4756cbe750e2100 | 2f5801a43517535dc3db761ae1abbf86c1d8ef30 | /learn-to-pass.RF.R | 2cd2be2223fc7ba5fead5994402f0dcbced564ff | [
"MIT"
] | permissive | QEDan/MTLDATA-ODBC-VIZ | 5123730484f63d6c80623d4a4528e50aad08caad | 11b71872761328878ca83ae6890677b9291e59f1 | refs/heads/master | 2020-04-06T04:39:03.324611 | 2015-05-25T15:41:39 | 2015-05-25T15:41:39 | 35,701,718 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,452 | r | learn-to-pass.RF.R | library(randomForest)
library(mlbench)
library(caret)
set.seed(42)
game1 <- read.csv('Mtl-Ott-game1.csv')
game1$game <- 1
game2 <- read.csv('Mtl-Ott-game2.csv')
game2$game <- 2
game3 <- read.csv('Mtl-Ott-game3.csv')
game3$game <- 3
game4 <- read.csv('Mtl-Ott-game4.csv')
game4$game <- 4
game5 <- read.csv('Mtl-Ott-game5.csv')
game5$game <- 5
game6 <- read.csv('Mtl-Ott-game6.csv')
game6$game <- 6
events.all <- rbind(game1, game2, game3, game4, game5, game6)
events.passes <- subset(events.all, name=="pass")
events.passes$id <- as.factor(events.passes$id)
events.passes$period <- as.factor(events.passes$period)
events.passes$xPos <- abs(events.passes$xCoord)
events.passes$yPos <- abs(events.passes$yCoord)
netpos.X <- 100.0
netpos.Y <- 0.0
events.passes$netdist <- sqrt((events.passes$xPos-netpos.X)^2 + (events.passes$yPos-netpos.Y)^2)
events.passes$shorthand <- gsub("\\+", "", events.passes$shorthand)
events.passes$shorthand <- gsub("-", "", events.passes$shorthand)
events.passes$type <- factor(events.passes$type)
extractFeatures <- function(data) {
features <- c("period",
"team",
"zone",
"type",
"xPos",
"yPos",
"game",
"netdist",
"playerPosition")
return(data[,features])
}
trainEvents <- sample(1:dim(events.passes)[1], 2500)
valEvents <- setdiff(1:dim(events.passes)[1], trainEvents)
#model <- lm(outcome ~ period + team + shorthand + zone + type +
# xAdjCoord + yAdjCoord + playerPosition, data=events.passes)
rf <- randomForest(extractFeatures(events.passes[trainEvents,]),
events.passes[trainEvents,]$outcome, ntree=100,
mtry=5, importance=TRUE)
imp <- importance(rf, type=1)
print(imp)
prediction <- predict(rf, events.passes[valEvents,-13], type="prob")
dist <- function(prob, truth)
{
if (truth == "successful") t <- 1
else t <- 0
return(abs(t - prob))
}
distances <- mapply(dist, prediction[,2], events.passes[valEvents,]$outcome)
png('testValErrors.png')
hist(distances.train, col=rgb(1,0,0,0.5), probability=T, breaks=50, main="")
hist(distances.val, col=rgb(0,0,1,0.5), probability=T, add=T, breaks=50)
legend("topright", c("Training Errors", "Validation Errors"),
col=c("red", "blue"), lwd=10)
dev.off()
print(sum(distances.val <0.5)/sum(distances.val >= 0.0))
|
ddcaf896297c846c0f381325bf6007a01f347c65 | d403482d3a161a4e5b20dcc745b790a945730e69 | /Rdouban/R/get_movie_reviews.R | b916bf45ef4f5e5a0eda968c3aa0326a44b6a991 | [] | no_license | illy/Rdouban | 2039af8abcc0fb5894ec60bb9748f43182723837 | 285ff5b57cd66c25683148e4937f6b799f4f9e8f | refs/heads/master | 2020-12-24T20:11:00.791650 | 2013-04-05T10:05:22 | 2013-04-05T10:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,705 | r | get_movie_reviews.R | ##movieid=5308265
## x=get_movie_reviews(movieid=5308265,n=20)
get_movie_reviews<-function(movieid,n=100,verbose=TRUE,...){
strurl=paste0('http://movie.douban.com/subject/',movieid,'/reviews')
pagetree <- htmlParse(getURL(strurl))
title0<- sapply(getNodeSet(pagetree, '//head//title'),xmlValue)
title<-gsub('[0-9 \n\\(\\)]|的影评|的评论','',title0)
reviews_amount<-as.integer(gsub('[^0-9]','',title0))
rating<-sapply(getNodeSet(pagetree, '//div[@class="rating_list clearfix"]//span'),xmlValue)[-1]
rating<-as.integer(gsub('[0-5]星|[ -]','',rating))
names(rating)<-c('stars5','stars4','stars3','stars2','stars1')
cat('There is a total of',reviews_amount,'reviews...\n')
.get_review<-function(pagetree,verbose=TRUE,...){
urlsnode<-getNodeSet(pagetree, '//div[@class="ctsh"]//a')
urls<-unique(sapply(urlsnode,function(x) xmlGetAttr(x, "href")))
review_url<-urls[grep('/review/',urls)]
author_url<-urls[grep('/people/',urls)]
#urlsvalue<-gsub('[\n ]','',sapply(urlsnode,xmlValue))
#urlsvalue<-urlsvalue[nchar(urlsvalue)>0]
m=length(review_url)
rev<-c()
for(i in 1:m){
if(verbose==TRUE)
cat(' Getting long comments from ',review_url[i],'...\n')
reviewtree <- htmlParse(getURL(review_url[i]))
title <- sapply(getNodeSet(reviewtree, '//span[@property="v:summary"]'),xmlValue)
time<-sapply(getNodeSet(reviewtree, '//span[@property="v:dtreviewed"]'),xmlValue)
nickname<-sapply(getNodeSet(reviewtree, '//span[@property="v:reviewer"]'),xmlValue)
rating<-sapply(getNodeSet(reviewtree, '//span[@property="v:rating"]'),xmlValue)
review<-sapply(getNodeSet(reviewtree, '//span[@property="v:description"]'),xmlValue)
if(length(review)==0)
review<-sapply(getNodeSet(reviewtree, '//div[@property="v:description"]'),xmlValue)
useful<-sapply(getNodeSet(reviewtree, '//span[@class="useful"]//em'),xmlValue)
unuseful<-sapply(getNodeSet(reviewtree, '//span[@class="unuseful"]//em'),xmlValue)
if(length(useful)==0|length(unuseful)==0){
x0<-sapply(getNodeSet(reviewtree, '//div[@class="main-panel-useful"]//em'),xmlValue)
useful=x0[1]
unuseful=x0[2]
}
rev0<-c(title,review,time,nickname,rating,
useful,unuseful,review_url[i],author_url[i])
rev<-rbind(rev,rev0)
}
row.names(rev)<-NULL
rev
}
pages<-ceiling(min(n,reviews_amount)/20)
reviews_info<-.get_review(pagetree,verbose=verbose)
if(pages>1){
for(pg in 2:pages){
cat('Getting',(pg-1)*20+1,'--',pg*20,'reviews...\n')
strurl=paste0('http://movie.douban.com/subject/',movieid,
'/reviews?start=',(pg-1)*20,'&filter=&limit=20')
pagetree <- htmlParse(getURL(strurl))
reviews_info0<-.get_review(pagetree,verbose=verbose)
reviews_info<-rbind(reviews_info,reviews_info0)
}
}
row.names(reviews_info)<-NULL
reviews_info<-data.frame(title=reviews_info[,1],
review=reviews_info[,2],
time=reviews_info[,3],
nickname=reviews_info[,4],
rating=as.integer(reviews_info[,5]),
useful=as.integer(reviews_info[,6]),
unuseful=as.integer(reviews_info[,7]),
review_url=reviews_info[,8],
author_url=reviews_info[,9],
stringsAsFactors=F)
list(movie_title=title,
reviews_amount=reviews_amount,
rating=rating,
reviews_info=reviews_info)
} |
e5596369645f1a91ecc078d0fe91630dc6c675e4 | a4391097e7b9742bb66e5871cd2dd980b0ed7d60 | /R/umap_small.R | 2832c28812e3c21115ac62dcc096d33be09dee79 | [
"MIT"
] | permissive | JenniferSLyon/umap | e4a56c8cf2b48d19c400e09ae22f40451ff9143e | 524f4460e7a80bfdd7babb3f94c1b02b85bb914b | refs/heads/master | 2020-04-05T05:53:40.904708 | 2018-09-29T06:45:58 | 2018-09-29T06:45:58 | 156,616,109 | 0 | 0 | NOASSERTION | 2018-11-07T22:22:26 | 2018-11-07T22:22:26 | null | UTF-8 | R | false | false | 715 | r | umap_small.R | ## package umap
## functions to produce umap objects for extremely small datasets (0, 1, 2 items)
##' Create an embedding object compatible with package umap for very small inputs
##'
##' @keywords internal
##' @param d matrix
##' @param config list with settings
##'
##' @return list, one element of which is matrix with embedding coordinates
umap.small = function(d, config) {
warning("constructing layout for a very small input dataset", call.=FALSE)
embedding = matrix(0, ncol=config$n_components, nrow=nrow(d))
if (nrow(d)==2) {
## create two well-separate points
embedding[1,] = 5
embedding[2,] = -5
}
rownames(embedding) = rownames(d)
list(layout=embedding, config=config)
}
|
0226575e4383898050f92ca49eba0974fda8d587 | 8f8b98944155f4a8c73f3ce02b81be9d1d32eec8 | /tennis.R | 8314a5312095b09ac23c52471994ec9950dacba6 | [
"MIT"
] | permissive | kshirley/TennisRivals | 895d7250990e9daf3d5fd916edce69e647e6a2cd | 6568d8c716addb6bf29f0b502b1db27723813ef0 | refs/heads/master | 2020-07-11T19:05:23.605704 | 2019-08-27T05:30:12 | 2019-08-27T05:30:12 | 204,622,170 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,257 | r | tennis.R | # Get a list of every pair of players who have played 22 or more
# head to head matches in ATP or WTA tennis:
# First, download the data from Jeff Sackmann's github repo:
# for year in {1968..2019}; do wget https://raw.githubusercontent.com/JeffSackmann/tennis_atp/master/atp_matches_$year.csv --no-check-certificate; done
# for year in {1968..2018}; do wget https://raw.githubusercontent.com/JeffSackmann/tennis_wta/master/wta_matches_$year.csv --no-check-certificate; done
# R setup:
library(dplyr)
library(data.table)
library(tidyr)
setwd("~/tennis") # forgive me, Jenny Bryan
# read in the results files for ATP:
atp <- vector("list", length(1968:2019))
for (i in 1:length(atp)) {
atp[[i]] <- fread(paste0("data/atp/atp_matches_", 1967 + i, ".csv"), data.table = FALSE)
}
# combine all 52 years of data into a single data frame and sort chronologically:
atp <- bind_rows(atp)
atp <- arrange(atp, tourney_date)
n <- nrow(atp)
# 169,690 matches
# count unique matchups:
atp <- atp %>%
mutate(lower = pmin(winner_id, loser_id),
upper = pmax(winner_id, loser_id))
# count number of head to head matches for each pair of players
m <- atp %>% group_by(lower, upper) %>%
summarize(n = n()) %>%
arrange(desc(n)) %>%
as.data.frame()
N <- sum(m$n >= 22)
N
# only 35 pairs of players with 22 or more head-to-head matchups in this data
# filter match-level data to just these 35 matchups
df <- inner_join(atp, filter(m, n >= 22))
# for each matchup, compute cumulative win-loss record
h2h <- vector("list", N)
for (i in 1:N) {
h2h[[i]] <- filter(df, lower == m$lower[i], upper == m$upper[i]) %>%
select(winner_id, winner_name, loser_id, loser_name, lower, upper)
h2h[[i]] <- h2h[[i]] %>%
mutate(wins = cumsum(winner_id == lower),
losses = cumsum(winner_id == upper),
player1 = ifelse(winner_id == lower, winner_name, loser_name),
player2 = ifelse(winner_id == lower, loser_name, winner_name))
}
# look at the 22nd match between each pair of players:
match_22_atp <- lapply(h2h, slice, 22) %>%
bind_rows() %>%
select(wins:player2) %>%
mutate(W = pmax(wins, losses),
L = pmin(wins, losses),
W_player = ifelse(wins > losses, player1, player2),
L_player = ifelse(wins > losses, player2, player1)) %>%
select(W:L_player) %>%
arrange(desc(W))
match_22_atp
# read in the results files for ATP:
wta <- vector("list", length(1968:2018))
for (i in 1:length(wta)) {
wta[[i]] <- fread(paste0("data/wta/wta_matches_", 1967 + i, ".csv"),
data.table = FALSE, fill = TRUE) %>%
select(tourney_date, winner_id, winner_name, loser_id, loser_name)
}
# combine all 51 years of data into a single data frame and sort chronologically:
wta <- bind_rows(wta)
wta <- arrange(wta, tourney_date)
n <- nrow(wta)
# 111,598 matches
# count unique matchups:
wta <- wta %>%
mutate(lower = pmin(winner_id, loser_id),
upper = pmax(winner_id, loser_id))
# count number of head to head matches for each pair of players
m <- wta %>% group_by(lower, upper) %>%
summarize(n = n()) %>%
arrange(desc(n)) %>%
as.data.frame()
N <- sum(m$n >= 22)
N
# only 24 pairs of players with 22 or more head-to-head matchups in the WTA data
# filter match-level data to just these 35 matchups
df <- inner_join(wta, filter(m, n >= 22))
# for each matchup, compute cumulative win-loss record
h2h <- vector("list", N)
for (i in 1:N) {
h2h[[i]] <- filter(df, lower == m$lower[i], upper == m$upper[i]) %>%
select(winner_id, winner_name, loser_id, loser_name, lower, upper)
h2h[[i]] <- h2h[[i]] %>%
mutate(wins = cumsum(winner_id == lower),
losses = cumsum(winner_id == upper),
player1 = ifelse(winner_id == lower, winner_name, loser_name),
player2 = ifelse(winner_id == lower, loser_name, winner_name))
}
# look at the 22nd match between each pair of players:
match_22_wta <- lapply(h2h, slice, 22) %>%
bind_rows() %>%
select(wins:player2) %>%
mutate(W = pmax(wins, losses),
L = pmin(wins, losses),
W_player = ifelse(wins > losses, player1, player2),
L_player = ifelse(wins > losses, player2, player1)) %>%
select(W:L_player) %>%
arrange(desc(W))
match_22_wta
|
0ea9dc662a339c10a2872c65b43cd2a0baef9032 | 191bf5d3af0e4a7b3457843f38cb256b1e0ad57e | /R/directions.R | 73e9bbd3ae95ea6bcaf16d288893e170092d2de8 | [] | no_license | cran/CVarE | 0ec113fd600ca429008b2257c1618fede966454a | addcabfd6ebe305febe59966b727761a8dc696ca | refs/heads/master | 2023-03-14T17:55:15.651687 | 2021-03-11T14:00:06 | 2021-03-11T14:00:06 | 346,926,332 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,575 | r | directions.R | #' @export
directions <- function(object, k, ...) {
UseMethod("directions")
}
#' Computes projected training data \code{X} for given dimension `k`.
#'
#' Returns \eqn{B'X}. That is, it computes the projection of the \eqn{n x p}
#' design matrix \eqn{X} on the column space of \eqn{B} of dimension \eqn{k}.
#'
#' @param object an object of class \code{"cve"}, usually, a result of a call to
#' \code{\link{cve}} or \code{\link{cve.call}}.
#' @param k SDR dimension to use for projection.
#' @param ... ignored (no additional arguments).
#'
#' @return the \eqn{n\times k}{n x k} dimensional matrix \eqn{X B} where \eqn{B}
#' is the cve-estimate for dimension \eqn{k}.
#'
#' @examples
#' # create B for simulation (k = 1)
#' B <- rep(1, 5) / sqrt(5)
#' set.seed(21)
#' # creat predictor data x ~ N(0, I_p)
#' x <- matrix(rnorm(500), 100, 5)
#' # simulate response variable
#' # y = f(B'x) + err
#' # with f(x1) = x1 and err ~ N(0, 0.25^2)
#' y <- x %*% B + 0.25 * rnorm(100)
#' # calculate cve with method 'mean' for k = 1
#' set.seed(21)
#' cve.obj.mean <- cve(y ~ x, k = 1, method = 'mean')
#' # get projected data for k = 1
#' x.proj <- directions(cve.obj.mean, k = 1)
#' # plot y against projected data
#' plot(x.proj, y)
#'
#' @seealso \code{\link{cve}}
#'
#' @method directions cve
#' @aliases directions directions.cve
#' @export
directions.cve <- function(object, k, ...) {
if (!(k %in% names(object$res))) {
stop("SDR directions for requested dimension `k` not computed.")
}
return(object$X %*% object$res[[as.character(k)]]$B)
}
|
7d85835cb64e9eed03c635230773023d5ca82380 | 2b850f9fdfa54159f05553050ad21600e6a58246 | /man/jointsig.Rd | 6be525c9ce934aeed4c6ed0fd84b2c67506e15f6 | [] | no_license | cran/palaeoSig | f1dece921733d50a2915d47bd188ecdc32392ced | 29f25c31bf18651a5d50928ebe61f2b247f22962 | refs/heads/master | 2023-03-15T03:17:27.916002 | 2023-03-10T08:30:02 | 2023-03-10T08:30:02 | 17,698,185 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,497 | rd | jointsig.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jointsig.r, R/plot.js.R
\name{jointsig}
\alias{jointsig}
\alias{plot.js}
\title{Test if two variables jointly control changes in fossil data}
\usage{
jointsig(spp, fos, var1, var2, method = "randomTF", n = 99, r = 32, ...)
\method{plot}{js}(x, names.v1, names.v2, ...)
}
\arguments{
\item{spp}{Data frame of modern training set species data, transformed as
required, for example with sqrt}
\item{fos}{Data frame of fossil species data, with same species codes and
transformations as spp}
\item{var1}{Training set environmental variable 1}
\item{var2}{Training set environmental variable 2}
\item{method}{Which significance test to use.
Current option are randomTF and obs.cor.
The latter may give strange results - use with caution.}
\item{n}{number of random training sets used to generate the null model}
\item{r}{How many synthetic variables to make. More is better but slower}
\item{\dots}{Other arguments to plot}
\item{x}{Output from jointsig}
\item{names.v1}{Vector length 2 with names of the end members of the first
environmental variable, e.g., c("cold", "warm") for temperature.}
\item{names.v2}{Ditto for the second variable.}
}
\value{
A list with components
\itemize{
\item{PCA}{ The unconstrained ordination of the fossil data.}
\item{preds}{ A list of the containing the reconstructions for each
environmental variable.}
\item{MAX}{ Proportion of the variance explained by the first axis of the
unconstrained ordination. This is the maximum amount that a reconstruction
of a single variable can explain.}
\item{EX}{ The proportion of the variance in the fossil data explained by
each reconstruction.}
\item{sim.ex}{ The proportion of variance explained by each of the random
environmental variables.}
\item{sig}{ The p-value of each reconstruction.}
}
}
\description{
Generates synthetic variables with different proportion of two
environmental variables, and tests how much variance in the fossil data
reconstructions of these synthetic variables explain.
}
\details{
With \code{method="randomTF"}, the function calculates the proportion of
variance in the fossil data explained by transfer function reconstructions of
synthetic variables.
The synthetic variables are composed of two environmental variables, weighted
between -1 and +1, so to represent a circle.
This is compared with a null distribution of the proportion of variance
explained by reconstructions based on random environmental variables.
Any transfer function in the rioja library can be used.
With method="obs.cor", the aim is the same, but the function reports the
correlation between the species weighted average optima on the synthetic
variables and the species first axis scores.
This option has some pathological behaviour and should probably be avoided.
}
\section{Functions}{
\itemize{
\item \code{plot(js)}: Plot js object
}}
\examples{
require(rioja)
data(SWAP)
data(RLGH)
rlgh.js <- jointsig(
spp = sqrt(SWAP$spec),
fos = sqrt(RLGH$spec),
var1 = SWAP$pH,
var2 = sample(SWAP$pH),
method = "randomTF",
n = 49, r = 32, fun = WA, col = 1
)
# nonsense second variable
plot(rlgh.js, c("acid", "alkaline"), c("down", "up"))
}
\references{
Unpublished method - use with caution. Can give spurious results
with weighted averaging.
}
\seealso{
\code{\link{randomTF}},\code{\link{obs.cor}}
}
\author{
Richard Telford \email{richard.telford@bio.uib.no}
}
\keyword{hplot}
\keyword{htest}
\keyword{multivariate}
|
ece60ea3ed1aca5c89f148ea563f90872648e873 | e8872f92885dd58b1e74835ea1edab18950b7085 | /predictNextWord.R | f3c9e9f992aa2d5b636812146eab49a7df20d981 | [] | no_license | pjjefferies/Coursera-Data-Science-Capstone | 97c8464cd20f8fb7a83e0c8da9e71a2f6def4502 | 88c9df5dfb8e5b11ccfc88b8aa3bc8186dd5534b | refs/heads/master | 2021-01-09T05:34:43.613470 | 2019-01-31T06:19:30 | 2019-01-31T06:19:30 | 83,169,133 | 2 | 6 | null | null | null | null | UTF-8 | R | false | false | 36,802 | r | predictNextWord.R | #
#
#
library(tm)
#library(combinat)
source("CleanCorpus.R")
source("AddToPredictionDF.R")
predictNextWord <- function(wordsToPredictBy,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn = 1,
skipPenalty = 2,
removeStopWords=TRUE,
removeWordSuffixes=TRUE) {
#writeLines(paste0("pNW 1.0: ", newWordList))
# shortList <- data.frame(count=as.integer(),
# basis=as.character(),
# prediction=as.character(),
# word=as.character(),
# rowCount=as.integer(),
# freq=as.numeric(),
# cumFreq=as.numeric())
#print(newWordList)
wordsToPredictBy <- as.character(wordsToPredictBy)
aShortCorpus <- Corpus(VectorSource(c(wordsToPredictBy)))
aShortCleanCorpus <- CleanCorpus(aShortCorpus,
removeEmail=TRUE,
removeURL=TRUE,
removeHandles=TRUE,
removeHashtags=TRUE,
removeStopWords=removeStopWords,
appSpecWordsFile=FALSE,
removeWordSuffixes=removeWordSuffixes,
myBadWordsFile=FALSE,
convertPlainText=TRUE)
textOutOfCorpus <- aShortCleanCorpus[[1]]$content
#writeLines("pNW 1.1: newWordDF:")
#print(newWordDF)
#writeLines("end")
aLineOfWords <- stripWhitespace(trimws(strsplit(textOutOfCorpus, " ")[[1]]))
aLOWLen <- length(aLineOfWords)
#Take the last 6 words at most
aLineOfWords <- aLineOfWords[max(aLOWLen-min(5,aLOWLen), 1):aLOWLen]
aLOWLen <- length(aLineOfWords)
newWordDF <- data.frame(word=aLineOfWords,
stringsAsFactors=FALSE)
predictionDF <- data.frame(word=as.character(),
power=as.numeric(),
sourceAlgo=as.character(),
stringsAsFactors = FALSE)
### PREDICTION - PRIORITY 1 - 4-grams, 3-grams, 2-grams with words in-order together
# Find 2-Gram Matches
if(aLOWLen >= 1) {
predictorWords <- newWordDF[aLOWLen, "word", drop=TRUE]
if(predictorWords %in% predictorWordDF$word) { #Found a 2-gram match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=2,
sourceAlgo="2"))
}
} else {
return(data.frame(word=c(FALSE), stringsAsFactors = FALSE))
}
# Second find 3-Gram; 2-Gram, Skip-1 Matches
if(aLOWLen >= 2) {
#3-Grams
predictorWords <- paste(newWordDF[(aLOWLen-1):aLOWLen, "word",
drop=TRUE],
collapse="+")
if(predictorWords %in% predictorWordDF$word) { #Found a 4/3/2-gram match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="3"))
}
#2-Grams, Skip-1
predictorWords <- newWordDF[(aLOWLen-1), "word", drop=TRUE]
if(predictorWords %in% predictorWordDF$word) { #Found a 4/3/2-gram match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=1,
sourceAlgo="21"))
}
} else {
if(nrow(predictionDF) > 0) {
predictionDF <- predictionDF[order(predictionDF$power,
decreasing = TRUE),,drop=FALSE]
predictionDF <- predictionDF[1:min(noWordsToReturn,
nrow(predictionDF)),,drop=FALSE]
return(predictionDF)
} else {
return(data.frame(word=c(FALSE), stringsAsFactors = FALSE))
}
}
# First find 4-Grams; 3-Grams, Skip-1 Matches
if(aLOWLen >= 3) {
#4-Grams
predictorWords <-
paste(newWordDF[(aLOWLen-2):aLOWLen, "word", drop=TRUE],
collapse="+")
if(predictorWords %in% predictorWordDF$word) { #Found a 4-gram match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=4,
sourceAlgo="4"))
}
#3-Grams, Skip-1A
predictorWords <- paste0(newWordDF[(aLOWLen-2), "word", drop=TRUE],"+",
newWordDF[aLOWLen, "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=2,
sourceAlgo="31A"))
}
#3-Grams, Skip-1B
predictorWords <- paste0(newWordDF[(aLOWLen-2), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-1), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=2,
sourceAlgo="31B"))
}
} else {
if(nrow(predictionDF) > 0) {
predictionDF <- predictionDF[order(predictionDF$power,
decreasing = TRUE),,drop=FALSE]
predictionDF <- predictionDF[1:min(noWordsToReturn,
nrow(predictionDF)),,drop=FALSE]
return(predictionDF)
} else {
return(data.frame(word=c(FALSE), stringsAsFactors = FALSE))
}
}
# First find 5-Gram; 4-Gram, Skip-1; 3-Gram, Skip-2 Matches
if(aLOWLen >= 4) {
#5-Grams
predictorWords <-
paste(newWordDF[(aLOWLen-3):aLOWLen, "word", drop=TRUE],
collapse="+")
if(predictorWords %in% predictorWordDF$word) { #Found a 5-gram match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=5,
sourceAlgo="5"))
}
#4-Grams, Skip-1A
predictorWords <- paste0(newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
paste(newWordDF[(aLOWLen-1):aLOWLen, "word",
drop=TRUE],
collapse="+"))
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="41A"))
}
#4-Grams, Skip-1B
predictorWords <- paste0(paste(newWordDF[(aLOWLen-3):(aLOWLen-2),
"word", drop=TRUE],
collapse="+"), "+",
newWordDF[aLOWLen, "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="41B"))
}
#4-Grams, Skip-1C
predictorWords <- paste(newWordDF[(aLOWLen-3):(aLOWLen-1), "word",
drop=TRUE], collapse="+")
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="41C"))
}
#3-Grams, Skip-2A
predictorWords <- paste0(newWordDF[aLOWLen-3, "word", drop=TRUE], "+",
newWordDF[aLOWLen, "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=1,
sourceAlgo="32A"))
}
#3-Grams, Skip-2B
predictorWords <- paste0(newWordDF[aLOWLen-3, "word", drop=TRUE], "+",
newWordDF[(aLOWLen-1), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=1,
sourceAlgo="32B"))
}
#3-Grams, Skip-2C
predictorWords <- paste0(newWordDF[aLOWLen-3, "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=1,
sourceAlgo="32C"))
}
} else {
if(nrow(predictionDF) > 0) {
#print(predictionDF)
predictionDF <- predictionDF[order(predictionDF$power,
decreasing = TRUE),,drop=FALSE]
predictionDF <- predictionDF[1:min(noWordsToReturn,
nrow(predictionDF)),,drop=FALSE]
return(predictionDF)
} else {
return(data.frame(word=c(FALSE), stringsAsFactors = FALSE))
}
}
# First find 5-Gram, Skip-1; 4-Gram, Skip-2 Matches
if(aLOWLen >= 5) {
#5-Grams, Skip-1A
predictorWords <- paste0(newWordDF[aLOWLen-4, "word", drop=TRUE], "+",
paste(newWordDF[(aLOWLen-2):aLOWLen, "word",
drop=TRUE],
collapse="+"))
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=4,
sourceAlgo="51A"))
}
#5-Grams, Skip-1B
predictorWords <- paste0(paste(newWordDF[(aLOWLen-4):(aLOWLen-3),
"word", drop=TRUE],
collapse="+"), "+",
paste(newWordDF[(aLOWLen-1):aLOWLen,
"word", drop=TRUE],
collapse="+"))
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=4,
sourceAlgo="51B"))
}
#5-Grams, Skip-1C
predictorWords <- paste0(newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE], "+",
newWordDF[aLOWLen, "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a 5-gram match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=4,
sourceAlgo="51C"))
}
#5-Grams, Skip-1D
predictorWords <- paste0(newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
paste(newWordDF[(aLOWLen-2):(aLOWLen-1),
"word", drop=TRUE],
collapse="+"))
if(predictorWords %in% predictorWordDF$word) { #Found a 5-gram match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=4,
sourceAlgo="51D"))
}
#4-Grams, Skip-2A
predictorWords <- paste0(newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
paste(newWordDF[(aLOWLen-1):aLOWLen, "word",
drop=TRUE],
collapse="+"))
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=2,
sourceAlgo="42A"))
}
#4-Grams, Skip-2B
predictorWords <- paste0(newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE], "+",
newWordDF[aLOWLen, "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=2,
sourceAlgo="42B"))
}
#4-Grams, Skip-2C
predictorWords <- paste0(newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE],"+",
newWordDF[(aLOWLen-1), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=2,
sourceAlgo="42C"))
}
#4-Grams, Skip-2D
predictorWords <- paste0(newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
newWordDF[aLOWLen, "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=2,
sourceAlgo="42D"))
}
#4-Grams, Skip-2E
predictorWords <- paste0(newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-1), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=2,
sourceAlgo="42E"))
}
#4-Grams, Skip-2F
predictorWords <- paste0(newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=2,
sourceAlgo="42F"))
}
} else {
if(nrow(predictionDF) > 0) {
predictionDF <- predictionDF[order(predictionDF$power,
decreasing = TRUE),,drop=FALSE]
predictionDF <- predictionDF[1:min(noWordsToReturn,
nrow(predictionDF)),,drop=FALSE]
return(predictionDF)
} else {
return(data.frame(word=c(FALSE), stringsAsFactors = FALSE))
}
}
# First find 5-Gram, Skip-2 Matches
if(aLOWLen >= 6) {
#5-Grams, Skip-2A
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-1), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-0), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52A"))
}
#5-Grams, Skip-2B
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-1), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-0), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52B"))
}
#5-Grams, Skip-2C
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-0), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52C"))
}
#5-Grams, Skip-2D
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-1), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52D"))
}
#5-Grams, Skip-2E
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-1), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-0), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52E"))
}
#5-Grams, Skip-2F
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-0), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52F"))
}
#5-Grams, Skip-2G
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-1), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52G"))
}
#5-Grams, Skip-2H
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-0), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52H"))
}
#5-Grams, Skip-2I
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-1), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52I"))
}
#5-Grams, Skip-2J
predictorWords <- paste0(newWordDF[(aLOWLen-5), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-4), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-3), "word", drop=TRUE], "+",
newWordDF[(aLOWLen-2), "word", drop=TRUE])
if(predictorWords %in% predictorWordDF$word) { #Found a match
predictionDF <- rbind(predictionDF,
addToPredictionDF(predictorWords,
mCWordSpMatrix,
predictorWordDF,
predictedWordDF,
noWordsToReturn,
multiplier=3,
sourceAlgo="52J"))
}
} else {
if(nrow(predictionDF) > 0) {
predictionDF <- predictionDF[order(predictionDF$power,
decreasing = TRUE),,drop=FALSE]
predictionDF <- predictionDF[1:min(noWordsToReturn,
nrow(predictionDF)),,drop=FALSE]
return(predictionDF)
} else {
return(data.frame(word=c(FALSE), stringsAsFactors = FALSE))
}
}
# ### PREDICTION - PRIORITY 2 - Permutations of 4/3-grams match - with no skips
#
# combWordList <- c()
#
# #First do for 4-grams
# if(aLOWLen == 3) {
# #aLineOfWords contains list of words
# combWordPermList <- permn(aLineOfWords)
# for(aPerm in combWordPermList) {
# #print(aPerm)
# newCombList <- paste(aPerm, collapse="+")
# #if(newCombList == origWordList) next #skip if alredy checked in orig. order
# #print(newCombList)
# combWordList <- append(combWordList, c(newCombList))
# }
# }
#
# #Do the same for 3-grams
# if(aLOWLen >= 2) {
# #aLineOfWords contains list of words - take last two words and permutate
# combWordPermList <- permn(aLineOfWords[aLOWLen-(1:0)])
# #origWordList <- paste0(newWordList[1:3], collaps="+")
# for(aPerm in combWordPermList) {
# #print(aPerm)
# newCombList <- paste(aPerm, collapse="+")
# #if(newCombList == origWordList) next #skip if alredy checked in orig. order
# #print(newCombList)
# combWordList <- append(combWordList, c(newCombList))
# }
# }
#
# #Now see if any of permutations are in list of perdictors
# for(predictorWords in combWordList) {
# if(predictorWords %in% predictorWordDF$word) { #Found a 4/3-gram permutation match
# predictionDF <- rbind(predictionDF,
# addToPredictionDF(predictorWords,
# mCWordSpMatrix,
# predictorWordDF,
# predictedWordDF,
# noWordsToReturn))
# }
# }
#
# #Return result if any were found with 4/3-gram permutations
# lengthToKeep <- min(nrow(predictionDF), noWordsToReturn)
# if(lengthToKeep > 0) {
# predictionDF <- predictionDF[seq(1:lengthToKeep), , drop=FALSE]
# }
# if(nrow(predictionDF) > 0) {
# return(predictionDF)
# }
if(nrow(predictionDF) > 0) {
predictionDF <- predictionDF[order(predictionDF$power,
decreasing = TRUE),,drop=FALSE]
predictionDF <- predictionDF[1:min(noWordsToReturn,
nrow(predictionDF)),,drop=FALSE]
return(predictionDF)
} else {
return(data.frame(word=c(FALSE), stringsAsFactors = FALSE))
}
}
|
805ed55e8a32785c0dcbfce9ad02cac15c0cb50d | 7f34c279bd937c9930490ba3b0f6290768d1cad5 | /DataAnalysis/analysis.r | 01680b33035245cf39fb6ac3525c1589a107743b | [] | no_license | Hayels406/visualisations | acf0af05bb0fadabdd210e3f762246f00ed76392 | 4f7b7712d34cf51eef289b0bc1c1123a5b7fb80b | refs/heads/master | 2021-01-19T07:33:37.477379 | 2017-04-07T15:45:38 | 2017-04-07T15:45:38 | 87,553,591 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 823 | r | analysis.r | library(reshape2)
library(ggplot2)
data20 <- read.csv("20_Individual_152209_06042017.csv")
data50 <- read.csv("50_Individual_151445_06042017.csv")
data100 <- read.csv("100_Individual_150952_06042017.csv")
dataRunning <- data.frame(Frequency = c(data20$running,data100$running), N = (c(rep("20", nrow(data20)), rep("100", nrow(data100)))))
ggplot(dataRunning, aes(x = Frequency, fill = N)) + geom_density(alpha=.3)
dataWalking <- data.frame(Frequency = c(data20$walking,data100$walking), N = (c(rep("20", nrow(data20)), rep("100", nrow(data100)))))
ggplot(dataWalking, aes(x = Frequency, fill = N)) + geom_density(alpha=.3)
dataIdle <- data.frame(Frequency = c(data20$idle,data100$idle), N = (c(rep("20", nrow(data20)), rep("100", nrow(data100)))))
ggplot(dataIdle, aes(x = Frequency, fill = N)) + geom_density(alpha=.3) |
1c1b1dfef74bd8fb63329ffbb8676d7eed7ae309 | 1dc906f08967ae9c1afcf1e4f67cf09ea64360b5 | /man/byType.Rd | 26296920a6a41648b6f0b3db773897cad407ebc1 | [] | no_license | xinBrueck/raking | ee9635d7feeb673184ffe4cfb094ea67c8a85451 | 9c5ae243bebc4f90c895dd85333ae2ed9b71f717 | refs/heads/master | 2020-03-18T14:35:58.618699 | 2019-08-04T20:07:06 | 2019-08-04T20:07:06 | 134,856,439 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,172 | rd | byType.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/byType.R
\name{byType}
\alias{byType}
\title{Return indicator matrix}
\usage{
byType(dat, types, reqs)
}
\arguments{
\item{dat}{a dataframe representing a single categorical or numerical group to be processed}
\item{types}{representing the types of the input variable, possible values: "factor", "numeric","logic".
if factor: recode the factors into different groups
if numeric: cut the numeric variables into factors
if logic(indicator matrix): group different factors if needed}
\item{reqs}{rules as string for recode/cut/group based on the different types of input variable
if factor: examples as "c(0)='No';c(1,2,3,4)='Yes'", recode 0 into 'No', recode 1,2,3,4 into 'Yes'
if numeric: input the cuts
if logic(indicator matrix): input the categories to be grouped together}
}
\value{
A dataframe of the indicator matrix
}
\description{
This function takes in a dataframe representing a single categorical/numerical group.
based on the different types of data the input dataframe representing,
it will perform different processure like cut, recode and etc and return a indicator matirx
}
|
cdb8b66ea73c03737dabc7b90c62ba2b64e434fd | 1fb22fa3ac1f5a78e0cce797d6d7bdcf78d8236f | /man/plot_square_adj_mat.Rd | 58db41b89eeea97b73e27b4d9d5261a1178eb730 | [] | no_license | scramblingbalam/graphclass | 2fb0a77d6fa984a9dd6c56b39487ea43a8cbda34 | 930d2ff47319e5fd5c70c6d73afdea2763eee3ab | refs/heads/master | 2021-04-12T04:21:09.096338 | 2018-02-23T23:01:18 | 2018-02-23T23:01:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 835 | rd | plot_square_adj_mat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Plots.R
\name{plot_square_adj_mat}
\alias{plot_square_adj_mat}
\title{Plot a vectorized adjacency matrix with cells divisions}
\usage{
plot_square_adj_mat(edge_values, communities = NULL, type = "real",
community_labels = c(1:13, -1), main = "", cut_at, sel_cells)
}
\arguments{
\item{edge_values}{Vectorized adjacency matrix. Only undirected networks are supported for now.}
\item{communities}{Community of each node}
\item{type}{Either "real" for valued networks, "prob" for [0,1] valued networks or "prob_cells" for equal value on each cell}
\item{community_labels}{Name of each community that will appear on the plot.}
\item{main}{Title of the plot}
}
\description{
Plot a vectorized adjacency matrix with cells divisions
}
|
2162d4f6de2f0c8c06c3923dc821769202aefd73 | 58eff71efd36ab75fa078fecb638610124395570 | /man/fitNBtbCl.Rd | 89acc479e020e063aab32cae18061fe71161af5a | [] | no_license | dgrun/RaceID3_StemID2_package | ef3111a926c80c218f478cc7f54d24ce0cc267eb | 3747bac1e905d434bfec16c4b60dc8534994c6c1 | refs/heads/master | 2023-08-17T05:28:29.250319 | 2023-08-10T17:07:01 | 2023-08-10T17:07:01 | 140,938,971 | 49 | 9 | null | 2020-02-29T09:32:45 | 2018-07-14T11:00:26 | R | UTF-8 | R | false | true | 1,516 | rd | fitNBtbCl.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarID_functions.R
\name{fitNBtbCl}
\alias{fitNBtbCl}
\title{Function for fitting a negative binomial noise model of technical and biological variability}
\usage{
fitNBtbCl(z, mu, rt, gamma = 2, x0 = 0.1, lower = 0, upper = 100)
}
\arguments{
\item{z}{Transcript count matrix with cells as columns and genes as rows.}
\item{mu}{Vector of mean expression values across cells in \code{z}.}
\item{rt}{Vector of dispersion parameters explaining global cell-to-cell variability of transcript counts across cells in \code{z}.}
\item{gamma}{Positive real number. Scale paramter of the cauchy prior. Default is 2.}
\item{x0}{Real number greater or equal to zero. Location parameter of the cauchy prior.}
\item{lower}{Real number greater or equal to zero. Lower bound for the maximum a posterior inference of the biological noise. Default is 0.}
\item{upper}{Real number greater or equal to zero. Upper bound for the maximum a posterior inference of the biological noise. Default is 100.}
}
\value{
Vector of biological noise parameters across cells in \code{z}.
}
\description{
This function fits a negative binomial model to transcript counts of a group of cells thereby deconvoluting variability into sampling noise, global cell-to-cell variability of transcript counts, and residual variability, which corresponds to biological noise. Local mean and and global cell-to-cell variability of transcript counts are pre-computed arguments.
}
|
a5528ac7a4bf9e6b1e90e019949164fe32b91190 | 40e6ace678704f0de91ef25171e6aa1d28ecfe8e | /tests/testthat/test_hfdr.R | 3d8e4a113b7458426db2fdee23cb463e2b517f72 | [] | no_license | krisrs1128/structSSI | ac7508940bee2b3be597a52d39a18fd41658e4bb | 6aa603c1947032aeb24621567f7fc2a28ceac3c3 | refs/heads/master | 2020-12-24T15:04:58.439533 | 2020-07-12T17:46:10 | 2020-07-12T17:46:10 | 6,913,657 | 6 | 2 | null | 2020-05-20T23:21:10 | 2012-11-29T02:08:33 | R | UTF-8 | R | false | false | 2,984 | r | test_hfdr.R | library('ape')
library('igraph')
set.seed(130229)
test_that("hfdr returns", {
tree <- as.igraph(rtree(10))
V(tree)$name <- paste("hyp", c(1:19))
tree.el <- get.edgelist(tree)
unadjp <- c(runif(5, 0, 0.01), runif(14, 0, 1))
names(unadjp) <- paste("hyp", c(1:19))
# The hierarchical adjustment procedure applied to this class
adjust <- hFDR.adjust(unadjp, tree.el)
expect_s4_class(adjust, "hypothesesTree")
expect_lt(adjust@p.vals[1, 4], 1e-2)
expect_equal(adjust@p.vals[1, 4], adjust@p.vals[1, 3])
})
test_that("hfdr plots", {
tree <- as.igraph(rtree(10))
V(tree)$name <- paste("hyp", c(1:19))
tree.el <- get.edgelist(tree)
unadjp <- c(runif(5, 0, 0.01), runif(14, 0, 1))
names(unadjp) <- paste("hyp", c(1:19))
adjust <- hFDR.adjust(unadjp, tree.el)
expect_output(summary(adjust), "Number of tip discoveries")
expect_silent(plot(adjust))
})
test_that("returns with warning when nothing significant", {
tree <- as.igraph(rtree(10))
V(tree)$name <- paste("hyp", c(1:19))
tree.el <- get.edgelist(tree)
unadjp <- rep(1, 19)
names(unadjp) <- paste("hyp", c(1:19))
expect_warning(hFDR.adjust(unadjp, tree.el))
expect_equal(hFDR.adjust(unadjp, tree.el)@p.vals[1, "significance"], "-")
})
test_that("hfdr has no rownames", {
tree <- as.igraph(rtree(10))
V(tree)$name <- paste("hyp", c(1:19))
tree.el <- get.edgelist(tree)
unadjp <- c(runif(5, 0, 0.01), runif(14, 0, 1))
names(unadjp) <- paste("hyp", c(1:19))
adjust <- hFDR.adjust(unadjp, tree.el)
expect_equal(rownames(adjust@p.vals), as.character(seq_len(19)))
})
test_that("throws error on mismatched names", {
tree <- as.igraph(rtree(10))
V(tree)$name <- paste("hyp", c(1:19))
tree.el <- get.edgelist(tree)
unadjp <- c(runif(5, 0, 0.01), runif(14, 0, 1))
names(unadjp) <- paste("different", c(1:19))
expect_error(hFDR.adjust(unadjp, tree.el))
})
test_that("Works when names are ints", {
tree <- as.igraph(rtree(10))
V(tree)$name <- seq_len(19)
tree.el <- get.edgelist(tree)
unadjp <- c(runif(5, 0, 0.01), runif(14, 0, 1))
names(unadjp) <- seq_len(19)
adjust <- hFDR.adjust(unadjp, tree.el)
expect_equal(seq_len(19), adjust@p.vals$hypothesisIndex)
})
test_that("names not constant", {
tree <- as.igraph(rtree(10))
V(tree)$name <- seq_len(19)
tree.el <- get.edgelist(tree)
unadjp <- c(runif(5, 0, 0.01), runif(14, 0, 1))
names(unadjp) <- seq_len(19)
adjust <- hFDR.adjust(unadjp, tree.el)
expect_lte(max(table(adjust@p.vals$hypothesisName)), 1)
})
test_that("hfdr returns", {
tree <- as.igraph(rtree(50))
V(tree)$name <- paste("hyp", c(1:99))
tree.el <- get.edgelist(tree)
unadjp <- c(runif(10, 0, 0.01), runif(89, 0, 1))
names(unadjp) <- paste("hyp", c(1:99))
# The hierarchical adjustment procedure applied to this class
adjust <- hFDR.adjust(unadjp, tree.el)
expect_s4_class(adjust, "hypothesesTree")
expect_lt(adjust@p.vals[1, 3], 1e-2)
expect_equal(adjust@p.vals[1, 4], adjust@p.vals[1, 3])
})
|
7559b87df4398b5d623885e87a51410577ca8e0b | 62ac5381191b3b231af0da90d7a8fdd6447a7d27 | /Demographic Analysis/Demographic_Analysis.R | d9ca8bd50465cff69b0fb8d0d7421b749e6cf36d | [] | no_license | ksenkaya/R-Projects | 0ca8104948ea759f6b124bbbe72bb06b7db080d4 | 3512592c183a3f623d367c0273c21bca2094acaf | refs/heads/master | 2021-10-16T04:47:33.493379 | 2019-02-07T21:54:04 | 2019-02-07T21:54:04 | 113,780,220 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,919 | r | Demographic_Analysis.R |
read.csv(file.choose())
stats <- read.csv(file.choose())
stats
head(stats)
str(stats)
runif(stats)
rnorm(stats)
stats$Internet.users
stats$Internet.users[2]
stats[,1]
# Filtering
filter <- stats$Internet.users < 2
stats[filter,]
stats[stats$Birth.rate < 40,]
stats[stats$Birth.rate > 40 & stats$Internet.users < 2,]
stats[stats$Income.Group == "High income",]
levels(stats$Country.Name)
stats[stats$Country.Name == "Malta",]
# qplot()
library(ggplot2)
?qplot
qplot(data=stats, x=Internet.users)
qplot(data=stats, x=Income.Group, y=Birth.rate)
qplot(data=stats, x=Income.Group, y=Birth.rate, col=I(4), size=I(3))
qplot(data=stats, x=Income.Group, y=Birth.rate, geom = "boxplot")
qplot(data = stats, x=Internet.users, y=Birth.rate, size=I(4), color=I("red"))
qplot(data = stats, x=Internet.users, y=Birth.rate, size=I(5), color=Income.Group)
# Creating Data Frames
mydf <- data.frame(Countries_2012_Dataset, Codes_2012_Dataset,
Regions_2012_Dataset)
#head(mydf)
#colnames(mydf) <- c("Country", "Code", "Region")
rm(mydf)
mydf <- data.frame(Counry=Countries_2012_Dataset, Code=Codes_2012_Dataset, Region=Regions_2012_Dataset)
head(mydf)
tail(mydf)
summary(mydf)
# Merging Data Frames
head(mydf)
tail(mydf)
merged <- merge(stats, mydf, by.x = "Country.Code", by.y = "Code")
head(merged)
merged$Country <- NULL
str(merged)
tail(merged)
# Visualizing with new Split
qplot(data=merged, x=Internet.users, y=Birth.rate)
qplot(data=merged, x=Internet.users, y=Birth.rate,
color=Region)
# Shapes
qplot(data=merged, x=Internet.users, y=Birth.rate,
color=Region, size=I(5), shape=I(2))
# Transparency
qplot(data=merged, x=Internet.users, y=Birth.rate,
color=Region, size=I(5), shape=I(19),
alpha=I(0.6))
# Title
qplot(data=merged, x=Internet.users, y=Birth.rate,
col=Region, size=I(5), shape=I(19),
alpha=I(0.6),
main="Birth Rate vs Internet Users")
|
906edafa384c2ce91e6fb4d4c0fe2104453e4db0 | fcfc1ced66aef91e1b5b27782bd6829b3a431ced | /R/queue.R | 6065f80edb79211d54c612709fbbfa5556fd2f6b | [
"MIT"
] | permissive | otoomet/ipc | 6df7d227f1c4b522365dd894f733dfd8cd6efb5b | 23e352e2dfd03f70e8d38e0286e1ebc2950bec4f | refs/heads/master | 2020-09-01T19:12:38.876370 | 2019-11-01T17:45:29 | 2019-11-01T17:45:29 | 219,034,269 | 0 | 0 | NOASSERTION | 2019-11-01T17:42:20 | 2019-11-01T17:42:20 | null | UTF-8 | R | false | false | 1,661 | r | queue.R | #' A Class containing a producer and consumer
#'
#' @format NULL
#' @usage NULL
#' @export
Queue <- R6Class(
"Queue",
private = list(
source=NULL
),
public = list(
producer=NULL,
consumer=NULL,
initialize = function(source, prod, cons){
private$source <- source
self$producer <- prod
self$consumer <- cons
},
destroy = function(){
self$consumer$stop()
private$source$destroy()
}
)
)
#' Create a Queue object
#' @param source The source for reading and writing the queue
#' @param producer The producer for the source
#' @param consumer The consumer of the source
#' @aliases Queue
#' @export
queue <- function(source = defaultSource()$new(),
producer = Producer$new(source),
consumer = Consumer$new(source)){
Queue$new(source, producer, consumer)
}
#' Create a Queue object
#' @param source The source for reading and writing the queue
#' @param producer The producer for the source
#' @param consumer The consumer of the source
#' @param session A Shiny session
#' @details
#' Creates a Queue object for use with shiny, backed by
#' ShinyTextSource, ShiyProducer and ShinyConsumer objects
#' by default. The object will be cleaned up and destroyed on
#' session end.
#' @export
shinyQueue <- function(source = defaultSource()$new(),
producer = ShinyProducer$new(source),
consumer = ShinyConsumer$new(source),
session=shiny::getDefaultReactiveDomain()){
q <- Queue$new(source, producer, consumer)
if(!is.null(session))
session$onEnded(q$destroy)
q
}
|
c6bfba5ed0c4c412e2c87138092481befdaf59db | 6f6ba7b0b8c82b1e247a0b58de4af1e9642e25b5 | /plot4.R | 4ef6b6d315572d15236e91fe567efd82cf73832a | [] | no_license | juliajmg/ExData_Plotting1 | 992cecbfa8f88c44f6e64931aeedcc1b77a4f179 | 2989559228bfebbf28e76dfa3297643769523df5 | refs/heads/master | 2021-01-20T19:49:29.201195 | 2016-01-24T17:18:25 | 2016-01-24T17:18:25 | 50,297,531 | 0 | 0 | null | 2016-01-24T16:56:58 | 2016-01-24T16:56:57 | null | UTF-8 | R | false | false | 1,501 | r | plot4.R | ## Read the database and asign it to a variable.
power <- read.table("~/DataScienceSpecialization/4ExploratoryDataAnalysis/household_power_consumption.txt", sep = ";", stringsAsFactors = FALSE, head = TRUE)
##Subset data needer for plotting
epower <- subset(power, Date == "1/2/2007" | Date == "2/2/2007")
##Paste Date and Time variables in Date column, and change it to Date/Time classes.
epower$Date <- paste(epower$Date, epower$Time)
epower$Date <- strptime(epower$Date, format = "%d/%m/%Y %H:%M:%S")
##Change variables to numeric.
epower$Global_active_power <- as.numeric(epower$Global_active_power)
epower$Sub_metering_1 <- as.numeric(epower$Sub_metering_1)
epower$Sub_metering_2 <- as.numeric(epower$Sub_metering_2)
##PLOT 4
png("plot4.png", width = 480, height = 480, units = "px")
par(mfrow = c(2,2))
with (epower,plot(Date, Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = ""))
with (epower, plot(Date, Voltage, type = "l", ylab = "Voltage", xlab = "datetime"))
plot(epower$Date, epower$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(epower$Date, epower$Sub_metering_2, type = "l", col = "red")
lines(epower$Date, epower$Sub_metering_3, type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lwd = 1, cex = 0.75)
with (epower, plot(Date, Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime"))
dev.off()
|
486c68bb3b0fa3018ea656f934208eb4e0d364d4 | bafa382e2e631b0e169aa35297cc24dc698de99d | /man/simpleFunction.Rd | 1282298684641fdde2533ef3586356ebcfd127d5 | [] | no_license | doktorschiwago/Rllvm2 | 7ec81e97e43ee846a3d20ccd30401d13a5dc972a | a6402df6f7865eadfa02f0622a9526cce75961cb | refs/heads/master | 2021-01-21T14:04:20.559087 | 2015-04-10T22:56:50 | 2016-05-17T08:03:26 | 45,631,996 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 817 | rd | simpleFunction.Rd | \name{simpleFunction}
\alias{simpleFunction}
\title{Define the skeleton of an LLVM Function}
\description{
This function creates an LLVM \code{Function}
object and creates the initial \code{Block} and
populates it with local variables that access the parameter values.
}
\usage{
simpleFunction(.name, retType = VoidType, ..., .types = list(...), mod = Module())
}
\arguments{
\item{.name}{the name of the function/routione}
\item{retType}{the return type of the routine}
\item{\dots}{individual type objects for the parameters of the routine}
\item{.types}{the parameter types specified as a single object}
\item{mod}{the module in which to create the function}
}
%\value{}
\references{
LLVM Documentation \url{http://llvm.org/docs/}
}
\author{
DTL
}
%\seealso{}
%\examples{}
\keyword{programming}
|
4287d8b8f906ad4cb21fc55fab634b1ef0923149 | e64dd6fd3ad476ccbd0f006d2d77133f6ce67f82 | /R/internal.RNG_P_NN.R | 915725b8cd07ba1e635f82f58809ab3801f34c67 | [] | no_license | cran/miscor | 7e95802f7ae124437d9c31dc74f89237c899bc61 | 890fea5f9abd05671897d28e6888c67e0b0b0538 | refs/heads/master | 2021-01-17T01:06:24.750864 | 2017-04-02T14:27:10 | 2017-04-02T14:27:10 | 65,773,844 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,957 | r | internal.RNG_P_NN.R | ##########################################################################################################
#
# miscor: Miscellaneous Functions for the Correlation Coefficient
#
# Internal function: RNG_P_NN
#
# Function copied from the PoisNonNor package <cran.r-project.org/web/packages/PoisNonNor>
internal.RNG_P_NN <- function(lamvec = NULL, cmat, rmat = NULL, norow, mean.vec = NULL,
variance.vec = NULL) {
n1 <- ifelse(is.null(lamvec), 0, length(lamvec))
n2 <- ifelse(is.null(rmat), 0, dim(rmat)[1])
if ((!is.null(lamvec)) & (sum(lamvec > 0) < n1)) {
stop("Specified lambda should be positive \n")
}
if ((!is.null(rmat)) & (dim(rmat)[2] != 2)) {
stop("column of rmat must be 2\n")
}
if ((!is.null(rmat)) & (sum(rmat[, 2] >= (rmat[, 1]^2 - 2)) < n2)) {
stop("Specified skewness (skew) and kurtosis (kurt) parameter should be kurt >= (skew^2 - 2) \n")
}
if ((n1 + n2) != dim(cmat)[1]) {
stop("Correlation matrix dimension is not consistent with number of variables!\n")
}
cmat_N_N <- diag(1, (n1 + n2))
pmat <- NULL
if (n2 != 0) {
pmat <- internal.Param.fleishman(rmat)
}
if (internal.Validate.correlation(cmat, pmat, lamvec)) {
cmat_N_N <- internal.intercor.all(cmat, pmat, lamvec)
}
X <- internal.rmvnorm(n = norow, rep(0, dim(cmat)[1]), cmat_N_N)
data <- matrix(NA, nrow = norow, ncol = dim(cmat)[1])
if (n1 > 0) {
data[, 1:n1] <- t(qpois(t(pnorm(X[, 1:n1])), lamvec))
}
if (n2 > 0) {
for (i in (n1 + 1):(n1 + n2)) {
j <- i - n1
data[, i] <- pmat[j, 1] + pmat[j, 2] * X[, i] + pmat[j, 3] * X[, i] * X[, i] + pmat[j, 4] * X[, i] * X[, i] * X[, i]
if (!is.null(variance.vec)) {
data[, i] <- data[, i] * sqrt(variance.vec[j])
}
if (!is.null(mean.vec)) {
data[, i] <- data[, i] + mean.vec[j]
}
}
}
return(data)
}
|
a8451441cb2bb27d5bcd6e8524c28644584e9589 | a54e7ab7b2262c3689c5d7beeadbbf8756a08020 | /gradeg.r | 66fc4f9702b550b13bba37198fb3bdd902041fd2 | [] | no_license | ccollicutt/fio_scripts | 79b2adcc68d9c38f39d4816063afab91f9bf3dd2 | 7a24616556af81683634c1032b2cb81f388c6454 | refs/heads/master | 2021-01-18T05:16:27.356816 | 2012-09-19T06:23:35 | 2012-09-19T06:23:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,420 | r | gradeg.r |
dir <- "C:\\Temp\\"
m <- read.csv("data_emc.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
m <- read.csv("data_pg8.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
m <- read.csv("data_pg512.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
m <- read.csv("data_ssd.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
m <- read.csv("data_skytap.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
m <- read.csv("data_dtv.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
m <- read.csv("data_pharos.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
m <- read.csv("ptsmt.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
m <- read.csv("data_mlna.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
m <- read.csv("data_phs.csv")
file <- paste(dir,m$system[1],"_grade_",".png",sep="")
png(filename=file)
chart_grades(m)
dev.off()
|
8243541900823308fba8a89995210063bedd25ea | 6ea2b0e4c13776337ee422d8752800bf4ee52888 | /man/heatmap.send.Rd | 7bfd2114486a03835a6f10925d58ef6476e93865 | [] | no_license | lshep/sendplot | fda0e0eabdb6ca62a139240098ce8a986157611d | 13456f72af2bed0b44bf4490a356f566eb81199f | refs/heads/master | 2021-07-25T21:36:28.892753 | 2017-11-06T18:53:39 | 2017-11-06T18:53:39 | 109,736,411 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,420 | rd | heatmap.send.Rd | \name{heatmap.send}
\alias{heatmap.send}
\title{INTERACTIVE HEATMAP}
\description{
This function is a wrapper for the R stats package heatmap. This will
create an interactive heatmap image. NOTE: The majority of the code for
this function is verbatim from the R package stats heatmap
function. This function was designed to work as a wrapper to untilize
the same functionality and plotting as the heatmap function with
sendplot's interactive functionality.
}
\usage{
heatmap.send(x,Rowv = NULL,
Colv = if (symm) "Rowv" else NULL,
distfun = dist,hclustfun = hclust,
reorderfun = function(d,w) reorder(d, w),
add.expr,symm = FALSE,
revC = identical(Colv,"Rowv"),
scale = c("row", "column", "none"),
na.rm = TRUE, margins = c(5, 5),
ColSideColors,RowSideColors,
MainColor = heat.colors(12),
cexRow = 0.2 + 1/log10(nr),
cexCol = 0.2 + 1/log10(nc),
labRow = NULL,labCol = NULL,
main = NULL,xlab = NULL,ylab = NULL,
keep.dendro = FALSE,
verbose = getOption("verbose"),
x.labels=NA,y.labels=NA,xy.labels=NA,
x.links=NA, y.links=NA,
xy.links=NA,asLinks=NA,
x.images=NA, y.images=NA,
xy.images=NA,
spot.radius=5,source.plot=NA,
image.size="800x1100",
fname.root="test",dir="./", header="v3",
window.size = "800x1100",
...)
}
\arguments{
\item{x}{numeric matrix of the values to be plotted}
\item{Rowv}{determines if and how the row dendrogram should be computed
and reordered. Either a 'dendrogram' or a vector of values
used to reorder the row dendrogram or 'NA' to suppress any
row dendrogram (and reordering) or by default, 'NULL', see
heatmap argument}
\item{Colv}{determines if and how the column dendrogram should be
reordered. Has the same options as the 'Rowv' argument above
and additionally when 'x' is a square matrix, 'Colv =
"Rowv"' means that columns should be treated identically to
the rows}
\item{distfun}{function used to compute the distance (dissimilarity) between
both rows and columns. Defaults to 'dist'}
\item{hclustfun}{function used to compute the hierarchical clustering when
'Rowv' or 'Colv' are not dendrograms. Defaults to 'hclust'}
\item{reorderfun}{function(d,w) of dendrogram and weights for reordering the
row and column dendrograms. The default uses
'reorder.dendrogram'}
\item{add.expr}{expression that will be evaluated after the call to 'image'.
Can be used to add components to the plot}
\item{symm}{logical indicating if 'x' should be treated *symm*etrically;
can only be true when 'x' is a square matrix.}
\item{revC}{logical indicating if the column order should be 'rev'ersed
for plotting, such that e.g., for the symmetric case, the
symmetry axis is as usual}
\item{scale}{character indicating if the values should be centered and
scaled in either the row direction or the column direction,
or none. The default is '"row"' if 'symm' false, and
'"none"' otherwise}
\item{na.rm}{logical indicating whether 'NA''s should be removed}
\item{margins}{numeric vector of length 2 containing the margins (see
'par(mar= *)') for column and row names, respectively}
\item{ColSideColors}{(optional) character vector of length 'ncol(x)'
containing the color names for a horizontal side bar that may
be used to annotate the columns of 'x'}
\item{RowSideColors}{ (optional) character vector of length 'nrow(x)'
containing the color names for a vertical side bar that may
be used to annotate the rows of 'x'}
\item{MainColor}{color scale for values. Passed into 'image' function
as col argument}
\item{cexRow}{positive number, used as 'cex.axis' in for the row axis
labeling. The defaults currently only use number of rows}
\item{cexCol}{positive number, used as 'cex.axis' in for the column axis
labeling. The defaults currently only use number of columns}
\item{labRow}{character vectors with row labels to use;
these default to 'rownames(x)'}
\item{labCol}{character vectors with column labels to use;
these default to 'colnames(x)'}
\item{main}{main title; defaults to none}
\item{xlab}{x axis title; defaults to none}
\item{ylab}{y axis title; defautls to none}
\item{keep.dendro}{logical indicating if the dendrogram(s) should be kept as
part of the result (when 'Rowv' and/or 'Colv' are not NA)}
\item{verbose}{logical indicating if information should be printed}
\item{x.labels}{data frame of n x m which contains values relating to the x
axis of the heatmap plot. n should be equal to the second dimension of the x
argument.This information is displayed in the interactive plot
window. This may be left as NA.}
\item{y.labels}{data frame of n x m which contains values relating to the y
axis of the heatmap plot. n should be equal to the first dimension of the x
argument.This information is displayed in the interactive plot
window. This may be left as NA }
\item{xy.labels}{list of matricies. All matricies should be of n x m
where n is equal to the first dimension of the x argument and m is
equal to the second dimension of the x argument. This information is
displayed in the interactive plot window. This may be left NA}
\item{x.links}{data frame of n x m which contains web addresses
for links relating to the x axis of the heatmap plot. n should be
equal to the second dimension of the x argument. m columns contains
information regarding sample. This information is displayed as
hyperlinks in the interactive plot window. This may be left NA}
\item{y.links}{data frame of n x m which contains web addresses
for links relating to the y axis of the heatmap plot. n should be
equal to the first dimension of the x argument. This information is
displayed as hyperlinks in the interactive plot window. This may be
left as NA}
\item{xy.links}{list of matricies. All matricies should be of n x m
where n is equal to the first dimension of the x argument and m is
equal to the second dimension of the x argument. This information is
displayed in the interactive plot window as hyperlinks. The values
in these matricies should be complete web address}
\item{asLinks}{contains complete web address for points that should be
treated as hyperlinks. May be a data.frame or matrix of n x m where
n is equal to the first dimension of the x argument and m is equal
to the second dimension of the x argument, a vector of length equal
to the first dimension of the x argument that will be repeated, a
vector of length equal to the second dimension of the x argument
that will be repeated,a non NA value of length 1 that will be
repeated for all points, or a vector of length dim(x)[1]*dim(x)[2]}
\item{x.images}{data frame of n x m which contains paths for images
relating to the x axis of the heatmap plot. n should be
equal to the second dimension of the x argument. m columns contains
information regarding sample. This information is displayed as
images in the interactive plot window. This may be left NA}
\item{y.images}{data frame of n x m which contains paths for images
relating to the y axis of the heatmap plot. n should be
equal to the first dimension of the x argument. This information is
displayed as images in the interactive plot window. This may be
left as NA}
\item{xy.images}{list of matricies. All matricies should be of n x m
where n is equal to the first dimension of the x argument and m is
equal to the second dimension of the x argument. This information is
displayed in the interactive plot window as images. The values
in these matricies should be complete path of images}
\item{spot.radius}{radius of circle in pixels indicating area that
will be interactive around the center of graphed points}
\item{source.plot}{Indicates whether application should make a
postscript file and then convert to png file, or if the png file
should be made directly. This value is either ps, png, or NA. If NA
the operating system is checked and the appropraite file format is
output. Unix has a convert function that can convert a ps file to
png file; we by default use this setup because we feel the
postscript file maintains better quality. So on unix/linux systems
if source.plot is NA, source.plot will be set to ps. Windows does
not have this option, for this reason source.plot will be set to png
if left NA}
\item{image.size}{character indicating size of device.}
\item{fname.root}{Base name to use for all files created.}
\item{dir}{directory path to where files should be created. Default
creates files in working directory}
\item{header}{May either be v1,v2, or v3. This determines which
tooltip header will be in the html file. Each version has different
features or works well with different web browsers. see sp.header
for details.}
\item{window.size}{size of the html window. Only effective when header=v3}
\item{...}{additional arguments to the makeImap function}
}
\details{
The majority of the code for this function is verbatim from the R
package stats heatmap function. This function was designed to work as
a wrapper to untilize the same functionality and plotting as the
heatmap function with sendplot's interactive functionality. See
\code{\link{heatmap}} for more details on arguments and details
concerning the creatation of plots.
%%
See \code{\link{sendplot}} for more information regarding the creation
of the interactive output with tool-tip content.
%%
Users are encouraged to read the package vignette which includes
a detailed discussion of all function arguments as well as several
useful examples.
}
\value{creates the static and interactive versions of heatmap}
\references{
http://www.R-project.org
http://www.onlamp.com/pub/a/onlamp/2007/07/05/writing-advanced-javascript.html
http://www.walterzorn.com/tooltip/tooltip\_e.htm
}
\note{
The majority of the code for this function is verbatim from the R
package stats heatmap function. This function was designed to work as
a wrapper to untilize the same functionality and plotting as the
heatmap function with sendplot's interactive functionality.
The interactive html plot currently only works in web browsers that
implement java script.
The code used to create the javascript embedded in html file is a
modified version of the javascript code or from the open source
tooltip library. see reference links
}
\author{
Lori A. Shepherd and Daniel P. Gaile;
Authors of heatmap code used in our code: Andy Liaw, original; R. Gentleman, M. Maechler, W. Huber,revisions}
\seealso{\code{\link{initSplot}},\code{\link{makeImap}},\code{\link{makeSplot}},\code{\link{imagesend}},\code{\link{heatmap.send.legacy}}, \code{\link{sendplot}}, \code{\link{heatmap}} }
\examples{
library(sendplot)
library(rtiff)
require(graphics)
x = as.matrix(mtcars)
rc = rainbow(nrow(x), start=0, end=.3)
cc = rainbow(ncol(x), start=0, end=.3)
xy.labels=list(value=x)
x.labels=data.frame(label=colnames(x),
description=c("Miles/(US) gallon","Number of cylinders",
"Displacement (cu.in.)",
"Gross horsepower",
"Rear axle ratio",
"Weight (lb/1000)",
"1/4 mile time",
"V/S",
"Transmission (0 = automatic, 1 = manual)",
"Number of forward gears",
"Number of carburetors")
)
#set up temporary directory
direct = paste(tempdir(),"/",sep="")
direct
heatmap.send(x,scale="column", xy.labels = xy.labels,
x.labels=x.labels,
RowSideColors = rc, ColSideColors = cc, margin=c(5,10),
xlab = "specification variables", ylab= "Car Models",
main = "mtcars data",
fname.root="exHeat",dir=direct,
font.size=18,image.size="600x900")
}
\keyword{methods}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.