blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c13c8431f874381d5b944b606c00859829ff91b
|
6de59664997ab124f8b99fc956bfcdf603b6fd52
|
/4_Graphs_R/a_amm_heatmaps.R
|
78f346cb96827b0deb1cb52a0be53e7d9045b12e
|
[] |
no_license
|
nstrasser/MasterThesisProject
|
716ce243ec69a7126216b5b0c43cc6fc57579e4d
|
43c521bd3044ba4c9a149b49061861191b66f25a
|
refs/heads/master
| 2022-12-08T14:05:08.067094
| 2020-08-31T16:18:05
| 2020-08-31T16:18:05
| 291,654,879
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,328
|
r
|
a_amm_heatmaps.R
|
library(ggplot2) # (Wickham, 2016)
library(tidyr) # (Wickham and Henry, 2020)
library(dplyr) # (Wickham et al., 2020)
library(cowplot) # (Wilke, 2019)
library(plotly)
library(grid)
library(gridExtra)
plot.heatmap <- function(data_var, config_var) {
current_mutation_rate <- unique(data_var$mutation_rate_a)
current_population_size <- unique(data_var$population_size)
pl <- ggplot(data = data_var, aes(x=a_replicate, y=b_replicate, fill=variance_overall)) +
geom_tile() +
scale_fill_gradient(low = "red", high = "white") +
theme(plot.title = element_text(size = 18),
plot.subtitle = element_text(size = 18),
axis.title = element_blank(),
axis.text = element_text(size=16),
axis.text.x = element_text(angle = 90, hjust = 1),
legend.text = element_text(size=16),
legend.title = element_text(size=16)) +
labs(fill="AMM", subtitle = paste("Mutation Rate: ", current_mutation_rate, "\nPopulation Size: ", current_population_size, sep = ""))
return(pl)
}
data_path <- "../3_a_LocalMachineDataProcessing_Python/equal_amm_heatmaps.csv"
data <- read.csv(data_path, na.strings="NONE")
data$configuration <- factor(data$configuration, levels=c("CONFIG_1", "CONFIG_2", "CONFIG_3", "CONFIG_4", "CONFIG_5", "CONFIG_6", "CONFIG_7"))
theme_set(theme_cowplot())
scenarios <- c("lockstep",
"oneOffLockstep",
"bFollowsA",
"independentAddition",
"noSelPressureBoth",
"matchingBitsLockstep")
configs <- c("CONFIG_1", "CONFIG_2", "CONFIG_3",
"CONFIG_4", "CONFIG_5", "CONFIG_6", "CONFIG_7")
for (sc in scenarios) {
if (sc == 'lockstep') { heading <- "Zero-Off Lockstep" }
else if (sc == 'oneOffLockstep') { heading <- "One-Off Lockstep" }
else if (sc == 'bFollowsA') { heading <- "One Follows" }
else if (sc == 'independentAddition') { heading <- "Additive Evolution" }
else if (sc == 'noSelPressureBoth') { heading <- "No Selection Pressure" }
else if (sc == 'matchingBitsLockstep') { heading <- "Matching-Bits Lockstep" }
data_c1 <- filter(data, scenario==sc, configuration=="CONFIG_1")
data_c2 <- filter(data, scenario==sc, configuration=="CONFIG_2")
data_c3 <- filter(data, scenario==sc, configuration=="CONFIG_3")
data_c4 <- filter(data, scenario==sc, configuration=="CONFIG_4")
data_c5 <- filter(data, scenario==sc, configuration=="CONFIG_5")
data_c6 <- filter(data, scenario==sc, configuration=="CONFIG_6")
data_c7 <- filter(data, scenario==sc, configuration=="CONFIG_7")
pl_c1 <- plot.heatmap(data_c1)
pl_c2 <- plot.heatmap(data_c2)
pl_c3 <- plot.heatmap(data_c3)
pl_c4 <- plot.heatmap(data_c4)
pl_c5 <- plot.heatmap(data_c5)
pl_c6 <- plot.heatmap(data_c6)
pl_c7 <- plot.heatmap(data_c7)
plot_complete <- grid.arrange(pl_c1, pl_c2, pl_c3, pl_c4, pl_c5, pl_c6, pl_c7, nrow = 2,
top=textGrob(paste("Scenario: ", heading, sep = ""), gp=gpar(fontsize=20, font=2)),
bottom=textGrob("A-Cell (Seed)", gp=gpar(fontsize=18)),
left=textGrob("B-Cell (Seed)", gp=gpar(fontsize=18), rot = 90))
ggsave(file=paste("./a_amm/heatmaps/amm_heatmaps_", sc, ".png", sep = ""), plot_complete, width = 16, height = 8)
}
|
ed07d9bd682d448618f743a042602f514e49aa36
|
54b4976030ae6a42e10282c8f41609ef266721c9
|
/R/lamp-laplace-distribution-method.R
|
5a28078c0004d039b082bd5044d06b264083fd0a
|
[] |
no_license
|
cran/ecd
|
b1be437b407e20c34d65bcf7dbee467a9556b4c1
|
18f3650d6dff442ee46ed7fed108f35c4a4199b9
|
refs/heads/master
| 2022-05-18T20:24:56.375378
| 2022-05-09T20:10:02
| 2022-05-09T20:10:02
| 48,670,406
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,109
|
r
|
lamp-laplace-distribution-method.R
|
#' Laplace distribution
#'
#' Implements some aspects of Laplace distribution (based on stats package) for
#' stable random walk simulation.
#'
#' @param n numeric, number of observations.
#' @param x numeric, vector of responses.
#' @param b numeric, the scale parameter, where the variance is 2*b^2.
#'
#' @return numeric, standard convention is followed:
#' d* returns the density,
#' p* returns the distribution function,
#' q* returns the quantile function, and
#' r* generates random deviates.
#'
#' @keywords Laplace
#'
#' @author Stephen H-T. Lihn
#'
#' @importFrom stats dexp
#' @importFrom stats rexp
#' @importFrom stats runif
#'
#' @export rlaplace0
#' @export dlaplace0
#'
### <======================================================================>
rlaplace0 <- function(n, b=1) stats::rexp(n,1/b)*sign(stats::runif(n)-0.5)
### <---------------------------------------------------------------------->
#' @rdname rlaplace0
dlaplace0 <- function(x, b=1) stats::dexp(abs(x),1/b)/2
### <---------------------------------------------------------------------->
|
6f5bd7279cde0e0d224d7451e541292cb9a0b21d
|
2a1d00ab9ac6fe11bff9557513f1f4a876a2afd4
|
/Post_Mid_term.R
|
0ff55fef955afacc39507855eb9fa3f1b67f32af
|
[] |
no_license
|
mohith10/R_for_Data_Science
|
9d9be3407a4f301ad3b4f41171197867fcf5945f
|
685477f55b4ceb9dfd247696ea066a6c63080b32
|
refs/heads/master
| 2020-04-17T17:34:18.150899
| 2019-05-17T16:00:08
| 2019-05-17T16:00:08
| 166,787,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
Post_Mid_term.R
|
#Ensemble Model - Powerful way of participating in challenges and contests
#methods-Bagging and Boosting
#sTEPS- gET-> cLEAN DATA ->tRAIN mODEL -> Test Data -> Improve
#We partition the data to avoid overfitting - Basic Question
#In unsupervised we dont have any target variable.
#Bagging - Bootstrap Aggregating
|
666e61fc275e991f076adb3f26bef08584dfe398
|
6fe61e61e61f70c223c12a46a58846591d8494c2
|
/exploratory_analyses/02_reddit_scaled_up/scripts/04_topic_modeling_comments.R
|
76aea8b150eb140e0b02366095044f31fa2561b9
|
[] |
no_license
|
mllewis/LANGSCALES
|
a723eccc5ebdb20187b0ceaedeb324a282f5070f
|
ebe0749ba1f46bf628f4ae4820580d420d479cad
|
refs/heads/master
| 2020-07-24T08:10:20.422821
| 2020-02-20T19:55:00
| 2020-02-20T19:55:00
| 207,859,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,666
|
r
|
04_topic_modeling_comments.R
|
# train topic models for comments using mallet package
library(here)
library(mallet)
library(tidyverse)
library(tidytext)
library(glue)
LOCAL_PATH <- here("/exploratory_analyses/02_reddit_scaled_up/data/")
MIN_WORDS_PER_POST <- 100 # excluding stop words
NTOPICS <- 50
get_topic_model_subreddit <- function(subreddit, local_path, ntopics, nwords_per_post){
print(subreddit)
# tidy comments
subreddit_path <- glue("{local_path}tidy/{subreddit}_tidy_comments_posts.csv")
tidy_subreddit_data <- read_csv(subreddit_path, guess_max = 1000000) %>%
select(-body)
reddit_text <- tidy_subreddit_data %>%
filter(author != "[deleted]",
text_type == "comment",
body_clean != "removed")
# unnest tokens
unnested_reddit <- reddit_text %>%
select(subreddit, post_id, comment_id, body_clean) %>%
unite(document, subreddit, post_id, comment_id, sep = "-") %>%
distinct(document, .keep_all = T) %>%
unnest_tokens(word, body_clean)
# remove stop words
unnested_reddit_tidy <- unnested_reddit %>%
anti_join(stop_words %>% filter(lexicon == "snowball"), by = "word") %>%
mutate(word = str_replace(word, "'", "")) %>%
arrange(document)
# collapse posts to single line; remove posts with fewer than MIN_WORDS_PER_POST words
nested_reddit_tidy <- unnested_reddit_tidy %>%
group_by(document) %>%
summarize(text = paste(word, collapse = " ")) %>%
right_join(unnested_reddit_tidy %>%
count(document) %>%
filter(n >= nwords_per_post) %>%
select(document))
# create an empty file of "stopwords"
file.create(empty_file <- tempfile())
docs <- mallet.import(nested_reddit_tidy$document,
nested_reddit_tidy$text,
empty_file)
# train model
mallet_model <- MalletLDA(num.topics = ntopics)
mallet_model$loadDocuments(docs)
mallet_model$train(500)
# save matrices
document_topic_model <- tidy(mallet_model, matrix = "gamma")
topic_word_model <- tidy(mallet_model, matrix = "beta")
doc_topic_model_outpath <- glue("{local_path}topic_models/{subreddit}_dt.csv")
write_csv(document_topic_model, doc_topic_model_outpath)
topic_word_model_outpath <- glue("{local_path}topic_models/{subreddit}_tw.csv")
write_csv(topic_word_model, topic_word_model_outpath)
}
target_subreddits <- glue("{LOCAL_PATH}raw/comments/")%>%
list.files() %>%
str_replace_all("_comments.csv","")
walk(target_subreddits[17], get_topic_model_subreddit,
LOCAL_PATH,
NTOPICS,
MIN_WORDS_PER_POST)
|
3866654b59981236e5dd6bd98f956eb6578a2588
|
71181535e485db80d21f2e447c3208547b3114a2
|
/R/old/check_msats.R
|
85b69fe40b5b54138e1c1ca6d501a71c019c9845
|
[] |
no_license
|
mastoffel/pinniped_bottlenecks
|
f93ab44d3abf3f2bf125b968555a2b042517f6a8
|
3627d6596a90497a382e65c249b7245e1dbe022a
|
refs/heads/master
| 2021-08-27T16:24:49.777306
| 2021-08-06T09:00:23
| 2021-08-06T09:00:23
| 57,881,827
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 568
|
r
|
check_msats.R
|
# script to check all microsatellite datasets
library(readxl)
# sheet numbers to load
dataset_names <- excel_sheets("data/processed/seal_data_largest_clust_and_pop.xlsx")
load_dataset <- function(dataset_names) {
read_excel("data/processed/seal_data_largest_clust_and_pop.xlsx", sheet = dataset_names)
}
# load all datasets
all_seals <- lapply(dataset_names, load_dataset)
names(all_seals) <- dataset_names
all_seals[[2]]
# check coding of msats
lapply(all_seals, function(x) apply(x[, 4:ncol(x)], 2, range, na.rm = T))
lapply(all_seals)
|
54c36be47e2ddc271ce5eff66a052ea80363291c
|
a2c6618e894166b23b3bda63e32e84a749fa9c62
|
/plot5.R
|
5c206275ae156ed9a0dd7c0be2e4d54d936951d7
|
[] |
no_license
|
csmahori/Exploratory-Data-Analysis-Project
|
110d54db5bf8cca23d46862c133b6951b12771e5
|
ee86ba1b4447cd5669c3793dce19603513ef1617
|
refs/heads/master
| 2020-03-17T10:20:32.549774
| 2018-06-06T10:07:30
| 2018-06-06T10:07:30
| 133,508,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 555
|
r
|
plot5.R
|
NEI<-readRDS("summarySCC_PM25.rds")
SCC<-readRDS("Source_Classification_Code.rds")
table(SCC$EI.Sector)
sub3<-SCC[grep("[Mm]otor|[Vv]ehicles",SCC$SCC.Level.Three),]
sub31<-merge(NEI,sub3,by="SCC")
sub32<-filter(sub31,fips="24510")
sub32<-mutate(sub32,year=as.factor(year))
sub33<-group_by(sub32,year,SCC.Level.Three)
sub33<-summarize(sub33,total_emissions=sum(Emissions,na.rm=TRUE))
ggplot(sub33,aes(year,total_emissions,group=SCC.Level.Three,col=SCC.Level.Three))+geom_line(size=1.25)+scale_y_log10()
dev.copy(png,file="plot5.png")
dev.off()
|
4d09cb2aad305ffec3b7ce5ac819a97fd60fac67
|
04d0a997364ad1bab775fb920edfe5b60cf6d740
|
/man/PseudoR2.Rd
|
988e42301b46bc94c031f95420d1ed1ce264fb58
|
[] |
no_license
|
mainwaringb/DescTools
|
a2dd23ca1f727e8bbfc0e069ba46f44567e4be24
|
004f80118d463c3cb8fc2c6b3e934534049e8619
|
refs/heads/master
| 2020-12-22T15:12:41.335523
| 2020-03-21T17:30:52
| 2020-03-21T17:30:52
| 236,836,652
| 0
| 0
| null | 2020-01-28T20:40:03
| 2020-01-28T20:40:02
| null |
UTF-8
|
R
| false
| false
| 5,162
|
rd
|
PseudoR2.Rd
|
\name{PseudoR2}
\alias{PseudoR2}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Pseudo R2 Statistics
%% ~~function to do ... ~~
}
\description{Although there's no commonly accepted agreement on how to assess the fit of a logistic regression, there are some approaches. The goodness of fit of the logistic regression model can be expressed by some variants of pseudo R squared statistics, most of which being based on the deviance of the model.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
PseudoR2(x, which = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{the \code{glm}, \code{polr} or \code{multinom} model object to be evaluated.
%% ~~Describe \code{x} here~~
}
\item{which}{character, one out of \code{"McFadden"}, \code{"McFaddenAdj"}, \code{"CoxSnell"}, \code{"Nagelkerke"}, \code{"AldrichNelson"},
\code{"VeallZimmermann"}, \code{"Efron"}, \code{"McKelveyZavoina"}, \code{"Tjur"}, \code{"all"}. Partial matching is supported.}
}
\details{Cox and Snell's \eqn{R^2} is based on the log likelihood for the model compared to the log likelihood for a baseline model. However, with categorical outcomes, it has a theoretical maximum value of less than 1, even for a "perfect" model.
Nagelkerke's \eqn{R^2} (also sometimes called Cragg-Uhler) is an adjusted version of the Cox and Snell's \eqn{R^2} that adjusts the scale of the statistic to cover the full range from 0 to 1.
McFadden's \eqn{R^2} is another version, based on the log-likelihood kernels for the intercept-only model and the full estimated model.
%% ~~ If necessary, more details than the description above ~~
Veall and Zimmermann concluded that from a set of six widely used measures the measure suggested by McKelvey and Zavoina had the closest correspondance to ordinary least square R2. The Aldrich-Nelson pseudo-R2 with the Veall-Zimmermann correction is the best approximation of the McKelvey-Zavoina pseudo-R2. Efron, Aldrich-Nelson, McFadden and Nagelkerke approaches severely underestimate the "true R2".
}
\value{the value of the specific statistic. \code{AIC}, \code{LogLik}, \code{LogLikNull} and \code{G2} will only be reported with option \code{"all"}.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
\item{McFadden}{McFadden pseudo-\eqn{R^2}}
\item{McFaddenAdj}{McFadden adjusted pseudo-\eqn{R^2}}
\item{CoxSnell}{Cox and Snell pseudo-\eqn{R^2} (also known as ML pseudo-\eqn{R^2})}
\item{Nagelkerke}{Nagelkerke pseudo\eqn{R^2} (also known as CraggUhler \eqn{R^2})}
\item{AldrichNelson}{AldrichNelson pseudo-\eqn{R^2}}
\item{VeallZimmermann}{VeallZimmermann pseudo-\eqn{R^2}}
\item{McKelveyZavoina}{McKelvey and Zavoina pseudo-\eqn{R^2}}
\item{Efron}{Efron pseudo-\eqn{R^2}}
\item{Tjur}{Tjur's pseudo-\eqn{R^2}}
\item{AIC}{Akaike's information criterion}
\item{LogLik}{log-Likelihood for the fitted model (by maximum likelihood)}
\item{LogLikNull}{log-Likelihood for the null model. The null model will include the offset, and an intercept if there is one in the model.}
\item{G2}{differenz of the null deviance - model deviance}
}
\references{
Aldrich, J. H. and Nelson, F. D. (1984): Linear Probability, Logit, and probit Models, \emph{Sage
University Press}, Beverly Hills.
Cox D R & Snell E J (1989) \emph{The Analysis of Binary Data} 2nd ed. London: Chapman and Hall.
Efron, B. (1978). Regression and ANOVA with zero-one data: Measures of residual variation. \emph{Journal of the American Statistical Association, 73}(361), 113--121.
Hosmer, D. W., & Lemeshow, S. (2000). \emph{Applied logistic regression} (2nd ed.). Hoboke, NJ: Wiley.
McFadden D (1979). Quantitative methods for analysing travel behavior of individuals: Some recent developments. In D. A. Hensher & P. R. Stopher (Eds.), \emph{Behavioural travel modelling} (pp. 279-318). London: Croom Helm.
McKelvey, R. D., & Zavoina, W. (1975). A statistical model for the analysis of ordinal level dependent variables. \emph{The Journal of Mathematical Sociology, 4}(1), 103--120
Nagelkerke, N. J. D. (1991). A note on a general definition of the coefficient of determination. \emph{Biometrika, 78}(3), 691--692.
Tjur, T. (2009) Coefficients of determination in logistic regression models -
a new proposal: The coefficient of discrimination. \emph{The American
Statistician},
63(4): 366-372
Veall, M.R., & Zimmermann, K.F. (1992) Evalutating Pseudo-R2's fpr binary probit models. \emph{Quality&Quantity}, 28, pp. 151-164
}
\author{Andri Signorell <andri@signorell.net>
%% ~~who you are~~
}
\seealso{\code{\link{logLik}}, \code{\link{AIC}}, \code{\link{BIC}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
r.glm <- glm(Survived ~ ., data=Untable(Titanic), family=binomial)
PseudoR2(r.glm)
PseudoR2(r.glm, c("McFadden", "Nagel"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{model}
|
442abb7e794bcdcbf6dab31b1dd4214c32aabb3d
|
fc4fba8b64c564ef49beaf1560ff01bf9bdc259f
|
/tools/analysis_from_radiant_not_in_use/base.R
|
097fc101c2c8e38e97d0f76b610695a9f3f92a01
|
[] |
no_license
|
nimrodbusany/Radiant4GoogleReps
|
a86665197fd507e95aa45525c5d313832d9a0348
|
9508bf9dbb0f158c9cad341d23e9692a7e30863c
|
refs/heads/master
| 2021-01-01T20:05:26.951890
| 2020-06-27T05:41:02
| 2020-06-27T05:41:02
| 24,627,872
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,118
|
r
|
base.R
|
# alternative hypothesis options
base_alt <- list("Two sided" = "two.sided", "Less than" = "less", "Greater than" = "greater")
###############################
# Single mean
###############################
output$uiSm_var <- renderUI({
isNum <- "numeric" == getdata_class() | "integer" == getdata_class()
vars <- varnames()[isNum]
if(length(vars) == 0) return()
selectInput(inputId = "sm_var", label = "Variable (select one):", choices = vars,
selected = state_singlevar("sm_var",vars), multiple = FALSE)
})
output$ui_singleMean <- renderUI({
list(
wellPanel(
uiOutput("uiSm_var"),
selectInput(inputId = "sm_alternative", label = "Alternative hypothesis:",
choices = base_alt, selected = state_init_list("sm_alternative","two.sided", base_alt), multiple = FALSE),
sliderInput('sm_sigLevel',"Significance level:", min = 0.85, max = 0.99,
value = state_init('sm_sigLevel',.95), step = 0.01),
numericInput("sm_compValue", "Comparison value:", state_init('sm_compValue',0.0))
),
helpAndReport('Single mean','singleMean',inclMD("tools/help/singleMean.md"))
)
})
output$singleMean <- renderUI({
# create inputs and outputs - function in radiant.R
statTabPanel("Base","Single mean",".singleMean","singleMean")
})
.singleMean <- reactive({
ret_text <- "This analysis requires a variable of type numeric or interval.\nPlease select another dataset."
if(is.null(input$sm_var)) return(ret_text)
# if(is.null(inChecker(c(input$sm_var)))) return(ret_text)
singleMean(input$datasets, input$sm_var, input$sm_compValue, input$sm_alternative, input$sm_sigLevel)
})
observe({
if(is.null(input$singleMeanReport) || input$singleMeanReport == 0) return()
isolate({
inp <- list(input$datasets, input$sm_var, input$sm_compValue,
input$sm_alternative, input$sm_sigLevel)
updateReport(inp,"singleMean")
})
})
singleMean <- function(datasets, sm_var, sm_compValue = 0, sm_alternative = 'two.sided',
sm_sigLevel = .95) {
dat <- values[[datasets]][,sm_var]
result <- t.test(dat, mu = sm_compValue, alternative = sm_alternative,
conf.level = sm_sigLevel)
result$data <- data.frame(dat)
names(result$data) <- sm_var
result$data.name <- sm_var
result
}
summary_singleMean <- function(result = .singleMean()) {
result
}
plots_singleMean <- function(result = .singleMean()) {
dat <- result$data
bw <- diff(range(dat, na.rm = TRUE)) / 12
p <- ggplot(dat, aes_string(x=result$data.name)) +
geom_histogram(colour = 'black', fill = 'blue', binwidth = bw, alpha = .1) +
geom_vline(xintercept = c(result$null.value), color = 'red', linetype = 'longdash', size = 1) +
geom_vline(xintercept = result$estimate, color = 'black', linetype = 'solid', size = 1) +
geom_vline(xintercept = result$conf.int, color = 'black', linetype = 'longdash', size = .5)
print(p)
}
###############################
# Compare means
###############################
output$uiCm_var1 <- renderUI({
isNumOrFct <- "numeric" == getdata_class() | "integer" == getdata_class() | "factor" == getdata_class()
vars <- varnames()[isNumOrFct]
if(length(vars) == 0) return()
selectInput(inputId = "cm_var1", label = "Select a factor or numerical variable:", choices = vars,
selected = state_singlevar("cm_var1",vars), multiple = FALSE)
})
output$uiCm_var2 <- renderUI({
if(is.null(input$cm_var1)) return()
isNum <- "numeric" == getdata_class() | "integer" == getdata_class()
vars <- varnames()[isNum]
if(length(vars) == 0) return()
if(input$cm_var1 %in% vars) {
# when cm_var1 is numeric comparison for multiple variables are possible
vars <- vars[-which(vars == input$cm_var1)]
if(length(vars) == 0) return()
selectInput(inputId = "cm_var2", label = "Variables (select one or more):", choices = vars,
selected = state_multvar("cm_var2",vars), multiple = TRUE, selectize = FALSE)
} else {
# when cm_var1 is not numeric then comparisons are across levels/groups
selectInput(inputId = "cm_var2", label = "Variables (select one):", choices = vars,
selected = state_singlevar("cm_var2",vars), multiple = FALSE)
}
})
output$ui_compareMeans <- renderUI({
list(
wellPanel(
uiOutput("uiCm_var1"),
uiOutput("uiCm_var2"),
conditionalPanel(condition = "input.tabs_compareMeans == 'Summary'",
selectInput(inputId = "cm_alternative", label = "Alternative hypothesis:", choices = base_alt,
selected = state_init_list("cm_alternative","two.sided", base_alt))
),
conditionalPanel(condition = "input.tabs_compareMeans == 'Plots'",
checkboxInput('cm_jitter', 'Jitter', value = state_init("cm_jitter",FALSE))
)
),
helpAndReport('Compare means','compareMeans',inclMD("tools/help/compareMeans.md"))
)
})
output$compareMeans <- renderUI({
statTabPanel("Base","Compare means",".compareMeans", "compareMeans")
})
.compareMeans <- reactive({
ret_text <- "This analysis requires variables of type factor, numeric or interval.\nPlease select another dataset."
if(is.null(input$cm_var1)) return(ret_text)
if(is.null(input$cm_var2)) return("Please select a numeric or interval variable")
# if(is.null(inChecker(c(input$cm_var1, input$cm_var2)))) return(ret_text)
compareMeans(input$datasets, input$cm_var1, input$cm_var2, input$cm_alternative, input$cm_jitter)
})
observe({
if(is.null(input$compareMeansReport) || input$compareMeansReport == 0) return()
isolate({
inp <- list(input$datasets, input$cm_var1, input$cm_var2, input$cm_alternative, input$cm_jitter)
updateReport(inp,"compareMeans")
})
})
compareMeans <- function(datasets, var1, var2, cm_alternative, cm_jitter) {
vars <- c(var1,var2)
dat <- values[[datasets]][,vars]
dat <- na.omit(dat)
if(!is.factor(dat[,var1])) {
cm_paired <- TRUE
dat <- melt(dat)
var1 <- colnames(dat)[1]
var2 <- colnames(dat)[2]
} else {
cm_paired <- FALSE
colnames(dat)[1] <- "variable"
}
if(cm_paired) {
pwcomp <- with(dat,pairwise.t.test(get(var2), get('variable'), pool.sd = FALSE,
p.adj = "bonf", paired = TRUE, alternative = cm_alternative))
} else {
pwcomp <- with(dat,pairwise.t.test(get(var2), get('variable'), pool.sd = FALSE,
p.adj = "bonf", paired = FALSE, alternative = cm_alternative))
}
pwcomp$vars <- paste0(vars, collapse=", ")
pwcomp$cm_alternative <- cm_alternative
pwcomp$cm_jitter <- cm_jitter
list("pwcomp" = pwcomp, "data" = data.frame(dat))
}
# Generate output for the summary tab
summary_compareMeans <- function(result = .compareMeans()) {
cat("Pairwise comparisons using t-tests (bonferroni adjustment)\n")
cat(paste0("Variables: ",result$pwcomp$vars,"\n\n"))
# cat("\nMeans table:\n")
means_tab <- ddply(result$data, c("variable"), colwise(mean))
colnames(means_tab) <- c("","mean")
print(means_tab, row.names = FALSE, right = FALSE)
if(result$pwcomp$cm_alternative == "two.sided") {
h.sym <- "not equal to"
} else if(result$pwcomp$cm_alternative == "less") {
h.sym <- "<"
} else {
h.sym <- ">"
}
mod <- result[['pwcomp']]$p.value
dvar <- dimnames(mod)
var1 <- dvar[[1]]
var2 <- dvar[[2]]
res <- data.frame(matrix(ncol = 3, nrow = length(var1)*length(var1)/2))
colnames(res) <- c("Alternative hyp.", "Null hyp.", "p-value")
rnr <- 1
for(i in var1) {
for(j in var2) {
if(is.na(mod[i,j])) next
res[rnr, 'Alternative hyp.'] <- paste(i, h.sym, j," ")
res[rnr, 'Null hyp.'] <- paste(i, "=", j, " ")
if(mod[i,j] < .001) {
pval = "< 0.001"
} else {
pval <- sprintf("%.3f", mod[i,j])
}
res[rnr, 'p-value'] <- pval
rnr <- rnr + 1
}
}
cat("\n")
print(res, row.names = FALSE, right = FALSE)
}
# Generate output for the plots tab
plots_compareMeans <- function(result = .compareMeans()) {
dat <- result$data
var1 <- colnames(dat)[1]
var2 <- colnames(dat)[-1]
plots <- list()
# p <- ggplot(dat, aes_string(x=var1, y=var2, fill=var1)) + geom_boxplot(alpha=.3, legend = FALSE)
p <- ggplot(dat, aes_string(x=var1, y=var2, fill=var1)) + geom_boxplot(alpha=.3)
if(result$pwcomp$cm_jitter) p <- p + geom_jitter()
plots[["Boxplot"]] <- p
plots[["Density"]] <- ggplot(dat, aes_string(x=var2, fill=var1)) + geom_density(alpha=.3)
do.call(grid.arrange, c(plots, list(ncol = 1)))
}
###############################
# Cross-tabs
###############################
output$uiCt_var1 <- renderUI({
isFct <- "factor" == getdata_class()
vars <- varnames()[isFct]
if(length(vars) == 0) return()
selectInput(inputId = "ct_var1", label = "Select a grouping factor:", choices = vars,
# selected = names(vars[vars == values$ct_var1]), multiple = FALSE)
selected = state_singlevar("ct_var1",vars), multiple = FALSE)
})
output$uiCt_var2 <- renderUI({
if(is.null(input$ct_var1)) return()
isFct <- "factor" == getdata_class()
vars <- varnames()[isFct]
# if(!input$ct_var1 %in% vars) return()
# if(is.null(inChecker(input$ct_var1))) return()
vars <- vars[-which(vars == input$ct_var1)]
if(length(vars) == 0) return()
selectInput(inputId = "ct_var2", label = "Select a factor:", choices = vars,
# selected = names(vars[vars == values$ct_var2]), multiple = FALSE)
selected = state_singlevar("ct_var2",vars), multiple = FALSE)
})
output$ui_crosstab <- renderUI({
list(
wellPanel(
uiOutput("uiCt_var1"),
uiOutput("uiCt_var2"),
checkboxInput("ct_std_residuals", label = "Deviation (standarized)",
value = state_init('ct_std_residuals',FALSE)),
checkboxInput("ct_deviation", label = "Deviation (percentage)",
value = state_init('ct_deviation',FALSE)),
checkboxInput("ct_expected", label = "Expected values",
value = state_init('ct_expected',FALSE)),
conditionalPanel(condition = "input.tabs_crosstab == 'Summary'",
checkboxInput("ct_contrib", label = "Contribution to chisquare value",
value = state_init('ct_contrib',FALSE)))
),
helpAndReport('Cross-tabs','crosstab',inclMD("tools/help/crossTabs.md"))
)
})
ct_plotWidth <- function() {
result <- .crosstab()
ifelse(is.list(result), return(result$plotWidth), return(650))
}
ct_plotHeight <- function() {
result <- .crosstab()
ifelse(is.list(result), return(result$plotHeight), return(650))
}
output$crosstab <- renderUI({
# for input-output
statTabPanel("Base", "Cross-tabs",".crosstab","crosstab", "ct_plotWidth", "ct_plotHeight")
})
.crosstab <- reactive({
ret_text <- "This analysis requires variables of type factor.\nPlease select another dataset."
if(is.null(input$ct_var1) || is.null(input$ct_var2)) return(ret_text)
# if(is.null(inChecker(c(input$ct_var1, input$ct_var2)))) return(ret_text)
crosstab(input$datasets, input$ct_var1, input$ct_var2, input$ct_expected, input$ct_deviation,
input$ct_std_residuals, input$ct_contrib)
})
observe({
if(is.null(input$crosstabReport) || input$crosstabReport == 0) return()
isolate({
inp <- list(input$datasets, input$ct_var1, input$ct_var2, input$ct_expected, input$ct_deviation,
input$ct_std_residuals, input$ct_contrib)
updateReport(inp,"crosstab", round(7 * ct_plotWidth()/650,2), round(7 * ct_plotHeight()/650,2))
})
})
crosstab <- function(datasets, ct_var1, ct_var2, ct_expected, ct_deviation, ct_std_residuals, ct_contrib) {
dat <- na.omit( values[[datasets]][,c(ct_var1,ct_var2)] )
dnn = c(paste("Group(",ct_var1,")",sep = ""), paste("Variable(",ct_var2,")",sep = ""))
tab <- table(dat[,ct_var1], dat[,ct_var2], dnn = dnn)
cst <- suppressWarnings(suppressMessages( chisq.test(tab, correct = FALSE) ))
# adding the % deviation table
o <- cst$observed
e <- cst$expected
cst$deviation <- (o-e) / e
nrPlot <- 1 + sum(c(ct_expected,ct_deviation, ct_std_residuals))
cinp <- list()
cinp$datasets <- datasets
cinp$ct_var1 <- ct_var1
cinp$ct_var2 <- ct_var2
cinp$ct_expected <- ct_expected
cinp$ct_deviation <- ct_deviation
cinp$ct_std_residuals <- ct_std_residuals
cinp$ct_contrib <- ct_contrib
list(cst = cst, table = tab, plotWidth = 650, plotHeight = 400 * nrPlot, cinp = cinp)
}
summary_crosstab <- function(result = .crosstab()) {
cat("Observed values:\n")
print(result$cst$observed)
cinp <- result$cinp
if(cinp$ct_std_residuals) {
cat("\nDeviation (standardized):\n")
print(round(result$cst$residuals, 2)) # these seem to be the correct std.residuals
}
if(cinp$ct_deviation) {
cat("\nDeviation (percentage):\n")
print(round(result$cst$deviation, 2)) # % deviation
}
if(cinp$ct_expected) {
cat("\nExpected values:\n")
print(round(result$cst$expected,2))
# print(result$cst$expected, digits = 2)
}
if(cinp$ct_contrib) {
cat("\nContribution to chisquare value:\n")
print((result$cst$observed - result$cst$expected)^2 / result$cst$expected, digits = 2)
# print(round((result$cst$observed - result$cst$expected)^2 / result$cst$expected), 2)
}
# if(cinp$ct_cellperc) {
# cat("\nCell percentages:\n")
# print(prop.table(result$table), digits = 2) # cell percentages
# }
# if(cinp$ct_rowperc) {
# cat("\nRow percentages:\n")
# print(prop.table(result$table, 1), digits = 2) # row percentages
# }
# if(cinp$ct_colperc) {
# cat("\nColumn percentages:\n")
# print(prop.table(result$table, 2), digits = 2) # column percentages
# }
print(result$cst)
# cat(paste("\n",sprintf("%.1f",100 * (sum(result$cst$expected < 5) / length(result$cst$expected))),"% of cells have expected values below 5\n\n"), sep = "")
cat(paste(sprintf("%.1f",100 * (sum(result$cst$expected < 5) / length(result$cst$expected))),"% of cells have expected values below 5\n\n"), sep = "")
}
plots_crosstab <- function(result = .crosstab()) {
cinp <- result$cinp
dat <- na.omit( values[[cinp$datasets]][,c(cinp$ct_var1,cinp$ct_var2)] )
# dat <- na.omit( getdata()[,c(cinp$ct_var1,cinp$ct_var2)] )
plots <- list()
meltTable <- function(tab) {
tab <- data.frame(tab)
lab <- data.frame(rownames(tab))
names(lab) <- "rnames"
melt(cbind(lab,tab))
}
if(cinp$ct_std_residuals) {
tab <- meltTable(result$cst$residuals)
colnames(tab)[c(2,3)] <- c(cinp$ct_var1, cinp$ct_var2)
plots[['residuals']] <- ggplot(tab, aes_string(x = cinp$ct_var1, y = "value", fill = cinp$ct_var2)) +
geom_bar(stat="identity", position = "dodge", alpha = .3) +
geom_hline(yintercept = c(-1.96,1.96,-1.64,1.64), color = 'black', linetype = 'longdash', size = .5) +
geom_text(data = NULL, x = 1, y = 2.11, label = "95%") +
geom_text(data = NULL, x = 1, y = 1.49, label = "90%") +
labs(list(title = paste("Deviation (standardized) for ",cinp$ct_var2," versus ",cinp$ct_var1, sep = ""), x = cinp$ct_var1))
}
if(cinp$ct_deviation) {
tab <- meltTable(result$cst$deviation)
colnames(tab)[c(2,3)] <- c(cinp$ct_var1, cinp$ct_var2)
plots[['deviation']] <- ggplot(tab, aes_string(x = cinp$ct_var1, y = "value", fill = cinp$ct_var2)) +
geom_bar(stat="identity", position = "dodge", alpha = .3) + ylim(-1,1) +
labs(list(title = paste("Deviation (percentage) for ",cinp$ct_var2," versus ",cinp$ct_var1, sep = ""), x = cinp$ct_var1))
}
if(cinp$ct_expected) {
tab <- meltTable(result$cst$expected)
tab$rnames <- factor(tab$rnames,levels=levels(dat[,1]))
plots[['expected']] <- ggplot(tab, aes_string(x = 'rnames', y = "value", fill = "variable")) +
# geom_bar(stat="identity", position = "dodge", alpha = .3) +
geom_bar(position = "fill", alpha = .3) +
labs(list(title = paste("Expected values for ",cinp$ct_var2," versus ",cinp$ct_var1, sep = ""),
x = "", y = "", fill = cinp$ct_var2))
}
plots[['stacked']] <- ggplot(dat, aes_string(x = cinp$ct_var1, fill = cinp$ct_var2)) + geom_bar(position = "fill", alpha=.3) +
labs(list(title = paste("Observed values for ",cinp$ct_var2," versus ",cinp$ct_var1, sep = ""),
x = "", y = "", fill = cinp$ct_var2))
# plots[['observed']] <- ggplot(dat, aes_string(x = cinp$ct_var1, fill = cinp$ct_var2)) + geom_histogram(position = "dodge", alpha=.3) +
# labs(list(title = paste("Crosstab of ",cinp$ct_var2," versus ",cinp$ct_var1, sep = ""),
# x = '', y = "Count", fill = cinp$ct_var2))
do.call(grid.arrange, c(plots, list(ncol = 1)))
}
|
c06f91a7806d349f73ea2026a396c9f3bfcf53b4
|
c0b29712073ce54f3d75e864bdd1f770c688d236
|
/script/archive/package_mng.R
|
039c8f9a8989367bbc8eddf61866e819a4629d9c
|
[
"MIT"
] |
permissive
|
achiral/rbioc
|
fb6f173430f974e68b5e7e3af6e3e464de9f4d78
|
1a0c5ab2d1eebe2161ba518853179aa7ae2c50a8
|
refs/heads/main
| 2023-08-31T00:43:54.542237
| 2021-10-21T03:18:10
| 2021-10-21T03:18:10
| 414,892,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 899
|
r
|
package_mng.R
|
# 分析・開発用のコード
#
# install_packages.R でインストールしたパッケージを普通にロードして使用する。
# 例:
# library(palmerpenguins)
# head(penguins)
setwd("/home/rstudio/project")
# パッケージはhome/rstudio/project/dev/packages.Rにて管理すると良い
# 新しいパッケージをライブラリにインストールするときは `renv::install()` 関数を使用
# renv::install("tidyverse")
# ライブラリの状態を記録するには `renv::snapshot()` 関数を使用
# ライブラリにインストールされたパッケージとそのバージョン情報を [`renv.lock`](./renv.lock) ファイルに記録
# GitHub にプッシュし、チームメンバー間で共有
# renv::snapshot()
# `renv.lock` ファイルのライブラリ状態を復元するときは`renv::restore()` 関数を使用
# renv::snapshot()
|
fef3489db80458b01486e3272fc8d8447d7aae38
|
948f76002c5f0422f906a2ff805dbfc9cf82988e
|
/R/00_clean_data.R
|
cccdb9f25bc68016fc3e5f4099b479b590d1be86
|
[] |
no_license
|
ziyint/Info_repo
|
f642213d94a0bf5c19b4f58f7565057d2be23f5f
|
d0fc4cb0c7cef7e75968a96cca223d935c068956
|
refs/heads/master
| 2023-01-24T20:37:49.024485
| 2020-11-24T01:16:14
| 2020-11-24T01:16:14
| 304,959,505
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 823
|
r
|
00_clean_data.R
|
#! /usr/local/bin/Rscript
# Read in dataset
met_data <- read.table('raw_data/data.txt', header = T)
met_intensity <- met_data[4:53]
# Calculate the number of samples detected each of metabolic features
total_notmissing <- apply(met_intensity, 2, function(x){return(length(which(!is.na(x))))})
# Remove metabolic features if signal is not detected in at least 10% of all samples
good_features <- which(total_notmissing>0.1*dim(met_data[1]))
final_met_int <- met_intensity[ , good_features]
# Final dataset
basic_info <- cbind(met_data[1:3], met_data[54:56])
clean_data <- cbind(basic_info, final_met_int)
# Make gender varible a factor variable and label each level
clean_data$gender <- factor(clean_data$gender,
level = c(0, 1),
labels = c("Male", "Female"))
write.csv(clean_data, "processed_data/clean_data.txt")
|
e87b145e1e0d4fc43908ed068212e00432a32368
|
80f1a3756899b0f6b36f4cdfc039883138b0abc8
|
/test_jags_part2.R
|
d58297b7611ef48fdd7e1127b93c3adbf883f487
|
[] |
no_license
|
nushiamme/AShankar_hummers
|
fffde730af3e55b43cbb18e6d093375b0d29e723
|
c859fddd8f71501fdcb6dee034f8034e69cb7b22
|
refs/heads/master
| 2023-06-22T18:01:58.306283
| 2023-06-19T20:02:04
| 2023-06-19T20:02:04
| 13,457,401
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,315
|
r
|
test_jags_part2.R
|
sink("Bayesian/LogN.jags")
cat("
model {
for (i in 1:N) {
y[i] ~ dbern (pr.hat[i]) #no need to change a thing
logit(pr.hat[i]) <- a[species[i]] + b1*x1 #x1 is individual mass you give it to model
e.y[i]<- ilogit(y[i] - logit(pr.hat[i])) #data-level errors, to estimate R2, need to be rescaled --inverse logit--
#or they give nonsensical results
}
b1 ~ dnorm (0, 0.0001)
CME[1:J] ~ dmnorm(a.hat[],tau_resid[,]) #multivariate normal distribution with correlated residuals
for (j in 1:J) { #J = total species
a[j] ~ dnorm (CME[j], tau.a) #scaled back to normal distribution as binomial yielded only negative b0s
a.hat[j] <- mu.a #no predictors
e.a[j] <- a[j] - a.hat[j] #group-level errors, to estimate R2
}
tau_resid[1:J,1:J] ~ dwish(invA[,],J) #where invA is vcv matrix based on phylogeny
mu.a ~ dnorm (0, 0.0001)
tau.a <- pow(sigma.a, -2)
sigma.a ~ dunif (0, 100)
}
#Assess Model Fit
#Fit discrepancy statistics
eval[x]<-mu[Bird[x],Plant[x],Time[x]]
E[x]<-pow((Yobs[x]-eval[x]),2)/(eval[x]+0.5)
ynew[x]~dnorm(mu[Bird[x],Plant[x],Time[x]],tau_obs)
E.new[x]<-pow((ynew[x]-eval[x]),2)/(eval[x]+0.5)
}
for (i in 1:Birds){
alpha[i] ~ dnorm(intercept,tau_alpha)
beta1[i] ~ dnorm(gamma1,tau_beta1)
beta2[i] ~ dnorm(gamma2,tau_beta2)
beta3[i] ~ dnorm(gamma3,tau_beta3)
}
#Hyperpriors
#Slope grouping
gamma1~dnorm(0,0.0001)
gamma2~dnorm(0,0.0001)
gamma3~dnorm(0,0.0001)
#Intercept grouping
intercept~dnorm(0,0.0001)
# Group intercept variance
tau_alpha ~ dgamma(0.0001,0.0001)
sigma_int<-pow(1/tau_alpha,0.5)
#Observation variance, turning precision to sd
tau_obs ~ dgamma(0.0001,0.0001)
sigma_obs<-pow(1/tau_obs,0.5)
#Slope variance, turning precision to sd
tau_beta1 ~ dgamma(0.0001,0.0001)
sigma_slope1<-pow(1/tau_beta1,0.5)
tau_beta2 ~ dgamma(0.0001,0.0001)
sigma_slope2<-pow(1/tau_beta2,0.5)
tau_beta3 ~ dgamma(0.0001,0.0001)
sigma_slope3<-pow(1/tau_beta3,0.5)
#derived posterior check
fit<-sum(E[]) #Discrepancy for the observed data
fitnew<-sum(E.new[])
}
",fill=TRUE)
sink()
|
d7f6414cb9c0406703cd58e0364a666eabb32347
|
dae5069a21b7c7d2077291a2325f244245be7f10
|
/modules/auth/logIn.R
|
771fe0a80e814d515fea32cd07c97aae5fb6465b
|
[] |
no_license
|
JulioMh/web-app-shiny
|
2b4777af13a40b08560d5a7f3db7a052b173fe9d
|
a0ff1f987921f0a9fc8aeb01c0dca5ff5e968e67
|
refs/heads/master
| 2022-05-30T19:51:42.134040
| 2020-05-03T14:33:35
| 2020-05-03T14:33:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,165
|
r
|
logIn.R
|
logInUI <- function(id) {
ns <- NS(id)
tagList(
div(
id = "login",
style = "width: 500px; max-width: 100%; margin: 0 auto; padding: 20px;",
wellPanel(
h2("LOG IN", class = "text-center", style = "padding-top: 0;color:#333; font-weight:600;"),
textInput(
ns("userName"),
placeholder = "Username",
label = tagList(icon("user"), "Username")
),
passwordInput(
ns("passwd"),
placeholder = "Password",
label = tagList(icon("unlock-alt"), "Password")
),
br(),
textOutput(ns("missing")),
br(),
div(
style = "text-align: center;",
actionButton(
ns("login"),
"LOG IN",
style = "color: white; background-color:#3c8dbc;
padding: 10px 15px; width: 150px; cursor: pointer;
font-size: 18px; font-weight: 600;"
),
br(),
actionLink(ns("signup"), "SIGN UP")
),
br(),
br(),
textOutput(ns("res")),
br()
)
)
)
}
logIn <- function(input, output, session) {
output$missing <- renderText(
validate(
need(input$userName != '', label= "User name"),
need(input$passwd != '', label= "Password")
)
)
observeEvent(input$signup, {
change_page("signup")
})
observeEvent(input$login, isolate({
req(input$userName)
req(input$passwd)
res <- performanceLogIn(input$userName, input$passwd)
output$res <- renderText(res)
}))
}
performanceLogIn <- function(userName, password){
db <-
dbConnect(
MySQL(),
dbname = databaseName,
host = options()$mysql$host,
port = options()$mysql$port,
user = options()$mysql$user,
password = options()$mysql$password
)
query <- sprintf(
"select * from User where userName= '%s' and password = '%s'",
userName,
password
)
response <- dbGetQuery(db,query)
if(nrow(response)==0){
res <- "Wrong user or password"
}else{
res <- "Welcome!"
}
dbDisconnect(db)
return(res)
}
|
0952c977fb59432b2035a38db484495bc30f797e
|
1b1d051d9bc90d26694a6fc76839ad1f128abede
|
/Machine Learning/course project/code/data_import.R
|
206cb9016d0101a435657eef06c5a507516da0ff
|
[] |
no_license
|
jessica-dyer/datasciencecoursera
|
a391a2fbd7f6355600ea754aa3e65f1c5037dc19
|
0ea3def980b5a1a1288ae216cefd202eb2a57d28
|
refs/heads/master
| 2023-02-28T16:33:12.037914
| 2021-02-09T20:42:20
| 2021-02-09T20:42:20
| 302,464,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 449
|
r
|
data_import.R
|
##################################################
## Project: Machine learning course project
## Script purpose: Data import
## Date: 2021-02-06
## Author: Jessica Dyer
##################################################
train_url <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
test_url <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
train <- read.csv(train_url)
# test <- read.csv(test_url)
|
bc563a92e3e7bce530e53d57448c07de2aec414c
|
b72fb283f3d8937d4ef56b1cd61b584910add460
|
/man/metropolis.Rd
|
b94e6e285be886819fda57ff15b7210837545873
|
[] |
no_license
|
IQSS/binb
|
cda8ebbf66cfad78ec9ba325ba1e0db839a8f2ef
|
25a1a71caad094c254b1e33d5cbd8b6316258bac
|
refs/heads/master
| 2020-03-28T21:32:14.534034
| 2018-09-16T13:28:08
| 2018-09-16T13:28:08
| 149,163,946
| 0
| 2
| null | 2018-09-17T17:39:57
| 2018-09-17T17:39:56
| null |
UTF-8
|
R
| false
| true
| 2,520
|
rd
|
metropolis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binb.R
\name{metropolis}
\alias{metropolis}
\alias{iqss}
\title{Binb is not Beamer - Metropolis-themed PDF Presentation}
\usage{
metropolis(toc = FALSE, slide_level = 2, incremental = FALSE,
fig_width = 10, fig_height = 7, fig_crop = TRUE,
fig_caption = TRUE, dev = "pdf", df_print = "default",
fonttheme = "default", highlight = "tango", keep_tex = FALSE,
latex_engine = "xelatex", citation_package = c("none", "natbib",
"biblatex"), includes = NULL, md_extensions = NULL,
pandoc_args = NULL)
iqss(toc = FALSE, slide_level = 3, incremental = FALSE,
fig_width = 10, fig_height = 7, fig_crop = TRUE,
fig_caption = TRUE, dev = "pdf", df_print = "default",
fonttheme = "default", highlight = "haddock", keep_tex = FALSE,
latex_engine = "xelatex", citation_package = c("none", "natbib",
"biblatex"), includes = NULL, md_extensions = NULL,
pandoc_args = NULL)
}
\arguments{
\item{toc}{A logical variable defaulting to \code{FALSE}.}
\item{slide_level}{A numeric variable defaulting to two.}
\item{incremental}{A logical variable defaulting to \code{FALSE}.}
\item{fig_width}{A numeric variable defaulting to ten.}
\item{fig_height}{A numeric variable defaulting to seven.}
\item{fig_crop}{A logical variable defaulting to \code{TRUE}.}
\item{fig_caption}{A logical variable defaulting to \code{TRUE}.}
\item{dev}{A character variable defaulting to \dQuote{pdf}.}
\item{df_print}{A character variable defaulting to \dQuote{default}.}
\item{fonttheme}{A character variable defaulting to \dQuote{default}.}
\item{highlight}{A character variable defaulting to \dQuote{tango}.}
\item{keep_tex}{A logical variable defaulting to \code{FALSE}.}
\item{latex_engine}{A character variable defaulting to \dQuote{xelatex}.}
\item{citation_package}{An optional character variable with possible value
\dQuote{none}, \dQuote{natbib} (the default), or \dQuote{biblatex}.}
\item{includes}{An optional character variable defaulting to \code{NULL}.}
\item{md_extensions}{An optional character variable defaulting to \code{NULL}.}
\item{pandoc_args}{An optional character variable defaulting to \code{NULL}.}
}
\value{
RMarkdown content processed for rendering.
}
\description{
A template for RMarkdown-based Beamer presentations in the \sQuote{Metropolis}
style by Matthias Vogelgesang and others.
}
\details{
Not all options and customizations available at the LaTeX level are implemented
yet.
}
\author{
Dirk Eddelbuettel
}
|
9ba40607cac06848d324bf89693fc1f64cedc459
|
b24e55de85c6b09921f212b6d1108647096f4d5f
|
/rebuttal1/get_info_rev2_point2.R
|
0d48ba19f69a0ff49efbdd333f1ffe9811adb822
|
[] |
no_license
|
gui11aume/REPLAY_TRIP
|
cdc8ab59aec590d7b49f73b8911dcbb56a564909
|
76128e7e9840bc14e8aeeca196c4bf1457829da1
|
refs/heads/master
| 2021-01-12T15:30:49.960925
| 2016-11-10T12:18:35
| 2016-11-10T12:18:35
| 71,796,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 262
|
r
|
get_info_rev2_point2.R
|
load("../Fig4/models.rda")
modenh = models[[5]]
# Start writing to file.
sink(file("info_rev2_point2.txt"))
cat("Predictive power H3K27ac/H3K4me1\n")
var(modenh$fitted.values) / (var(modenh$residuals + modenh$fitted.values))
# End writing to file.
sink(NULL)
|
a6964b7f5356262c3514969cf4849ec186f22980
|
f1f749fed0ff90367f5344644c61da8583036881
|
/man/rast_spec.Rd
|
81a167f5e64c5679ae7c4f3b8d032fae23c70d38
|
[] |
no_license
|
ozjimbob/raaqfs
|
72cc1ac9b869c66b7ba1ebf6d2ad8b4a4eaf78e0
|
2037327991484de997b82ace8fda6c8fc7c081b4
|
refs/heads/master
| 2020-04-06T07:09:31.808134
| 2016-09-01T01:42:08
| 2016-09-01T01:42:08
| 64,715,187
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 716
|
rd
|
rast_spec.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rast_spec.r
\name{rast_spec}
\alias{rast_spec}
\title{Return raster grid of variable for a time period}
\usage{
rast_spec(input, spec, hour)
}
\arguments{
\item{input}{An open ncdf4 object.}
\item{spec}{Full species name (4 chars) including spaces.}
\item{hour}{Hour (1-24) to return}
}
\value{
Raster grid of species concentration.
}
\description{
Return raster grid of variable for a time period
}
\examples{
library(ncdf4)
library(raster)
tfile <- system.file("extdata","aust_20140212_nat_0.800dg_morw.nc",package="raaqfs")
mydata <- nc_open(tfile)
tra <- rast_spec(mydata,"EC25",1)
plot(tra)
library(maps)
map("world",add=TRUE)
}
|
c77f55f687c449696091eeb59e8e8e152631a5fb
|
f55dabad5fc47799fd95e0761d6af9997fbabbed
|
/cachematrix.R
|
5e1e27282f0c5bd24785011d7dcb29312be6c25c
|
[] |
no_license
|
fsaavedraolmos/ProgrammingAssignment2
|
eb990ca7eaf2507e5f5b6149fcee5475df882ce4
|
abd4bb5e17b854e1415252f1b3235b33de0220bd
|
refs/heads/master
| 2021-01-10T11:06:25.275318
| 2016-01-25T01:34:10
| 2016-01-25T01:34:10
| 50,317,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,704
|
r
|
cachematrix.R
|
## This following function takes a square invertible matrix
## and return a list of functions to:
## 1.- Set the matrix.
## 2.- Get the matrix.
## 3.- Set the inverse of matrix.
## 4.- Get the inverse of matrix.
## Finally, this is used as input in cacheSolve function.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## Set the matrix
set <- function(y){
x <<- y
m <<- NULL
}
## Get the matrix
get <- function() x
## Set the inverse of matrix.
setinverse <- function(solve) m <<- solve
## Get the inverse of matrix.
getinverse <- function() m
## Return the list of functions.
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## In cacheSolve function, we take the matrix and
## calculate the inverse.
## Finally we return it.
cacheSolve <- function(x, ...) {
## Get
m <- x$getinverse()
if(!is.null(m)) {
## If the value already exists, returns m.
message("getting cached data")
return(m)
}
## Else
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
return(m)
}
## Running Example
# > matriz <- matrix(runif(9,1,100),3,3)
# Show "matriz"
# > matriz
# [,1] [,2] [,3]
# [1,] 90.15773 26.488523 72.41762
# [2,] 60.19124 9.766337 91.25337
# [3,] 96.55614 47.140896 67.54101
# Generating the cache matrix
# > matrixExample <- makeCacheMatrix(matriz)
# Finally calculate or retrieve the value of
# Inverted matrix using cacheSolve:
# > cacheSolve(matrixExample)
# [,1] [,2] [,3]
# [1,] -0.162284059 -0.12840965 0.22513611
# [2,] 0.194560788 -0.05779459 -0.08093354
# [3,] 0.004915707 0.20579781 -0.04886617
|
6793746b8c38f353e74e7a71f036f678dd9aa94e
|
c6819f8a6b00e273bcdcae567a3c9be4ca3b98b3
|
/Table1Reviewed.R
|
fd2e763608c822b8da6ea062d12a08d4309e2d49
|
[] |
no_license
|
FelipeMonts/NAPTSoilsData
|
188f67f91e2ce64c3a7a11ae3bcd2c6c1b8f9753
|
bb910fe458dace64a355d29f4f1bad61aadc5f8d
|
refs/heads/master
| 2023-07-02T13:28:44.196217
| 2019-11-22T18:48:24
| 2019-11-22T18:48:24
| 129,754,077
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,057
|
r
|
Table1Reviewed.R
|
##############################################################################################################
#
#
# Program to Plot table 1 inthe laser diffraction paper, with all the information requested by the reviewwers
#
# Felipe Montes 2018 12 13
#
###############################################################################################################
# Tell the program where the package libraries are #####################
.libPaths("C:/Felipe/SotwareANDCoding/R_Library/library") ;
# Preliminaries
rm(list = ls())
# Set your working directory to some place you can find
setwd("C:/Felipe/LaserDifractionSoilTextureAnalysis/NAPTSoilsData") ;
# Load the data from (file='NAPTTexturePlot.RData') obtained from runing the NAPTTexturePlot.R
# Package for writing and reading excel files
library(XLConnect) ;
library(stringr) ;
load(file='NAPTTexturePlot.RData');
Table1.data<-readWorksheetFromFile("C:\\Felipe\\LaserDifractionSoilTextureAnalysis\\Manuscript\\Table120180927.xlsx", sheet="New",startCol= 1, endCol=6) ;
head(Table1.data)
str(Table1.data)
head(Paper.Samples)
str(Paper.Samples)
Table1Papersamples<-merge(Table1.data, Paper.Samples, by.x='Sample',by.y='SAMPLE', all.x=T) ;
writeWorksheetToFile("C:\\Felipe\\LaserDifractionSoilTextureAnalysis\\Manuscript\\Table120180927.xlsx",Table1Papersamples, sheet="Table1Papersamples") ;
head(NAPT.all)
str(NAPT.all)
# Get all the data from the NAPT.all files, screened for the selected samples
#first transform the sample name to a compatible name
NAPT.all$YEAR<-str_split(NAPT.all$SAMPLE, '-', simplify=T )[,1] ;
NAPT.all$SampleNo<-str_split(NAPT.all$SAMPLE, '-', simplify=T )[,2] ;
NAPT.all$Sample<-paste0(NAPT.all$YEAR,'-',NAPT.all$SampleNo) ;
head(NAPT.all)
Table1NAPTall<-unique(NAPT.all[which(NAPT.all$Sample %in% Table1.data$Sample),]) ;
head(Table1NAPTall)
# write it into an excell spreadsheet
writeWorksheetToFile("C:\\Felipe\\LaserDifractionSoilTextureAnalysis\\Manuscript\\Table120180927.xlsx",Table1NAPTall, sheet="Table1NAPTalls") ;
|
a2e55a2ae90b7ddd1474a8e04d4afadb3251851e
|
7c38caa385ff78efcc4c3628fc0c0552896ecb17
|
/plot1.R
|
e36b9c5c9ce5a7aca4a3583b603f76e3284a4905
|
[] |
no_license
|
AshitoshN/Exploratory_data_Analysis
|
5cbbf2c9bf408e755bfc441398b569fb098fd10d
|
7cee685d4c3a2e139bb239743b99639d6593138a
|
refs/heads/master
| 2022-10-15T23:41:21.208819
| 2020-06-11T12:24:15
| 2020-06-11T12:24:15
| 271,526,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 608
|
r
|
plot1.R
|
#Libraries
library(data.table)
#Read the data as data frame
data <- fread("C:/Users/LENOVO/Desktop/Data_Science/Exploratory_analysis/household_power_consumption.txt")
#Subsetting the data for date 1/2/2007 & 2/2/2007
data1 <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
#now we are ready with the data So need to plot the graphs
data1$Global_active_power <- as.numeric(data1$Global_active_power)
#plot 1
hist(data1$Global_active_power,col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
#save plot as png format
dev.copy(png, "plot1.png")
dev.off()
|
b4cb180aeeac55e3ec220f2d186876a9aa62058b
|
e161195a09e161f978e8610a345bd8320806a692
|
/man/simcub.Rd
|
94faf9721aba869e776adc6427c0e904874fcfd2
|
[] |
no_license
|
cran/CUB
|
835be9f64528f974025d8daaff7cc1f99f2eae1a
|
7c47f960512aa90db261ba9ed41006a191440c1a
|
refs/heads/master
| 2020-04-06T21:07:58.688216
| 2020-03-31T14:30:19
| 2020-03-31T14:30:19
| 48,078,715
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 652
|
rd
|
simcub.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simcub.R
\name{simcub}
\alias{simcub}
\title{Simulation routine for CUB models}
\usage{
simcub(n,m,pai,csi)
}
\arguments{
\item{n}{Number of simulated observations}
\item{m}{Number of ordinal categories}
\item{pai}{Uncertainty parameter}
\item{csi}{Feeling parameter}
}
\description{
Generate \eqn{n} pseudo-random observations following the given CUB distribution.
}
\examples{
n<-300
m<-9
pai<-0.4
csi<-0.7
simulation<-simcub(n,m,pai,csi)
plot(table(simulation),xlab="Ordinal categories",ylab="Frequencies")
}
\seealso{
\code{\link{probcub00}}
}
\keyword{distribution}
|
d88e741d77d8aaefd78aa4ae1e0c772c7137eca1
|
b1d604355be03002727270fe211b299f6436d124
|
/election prediction.R
|
9b423b1a778d1f943687ff39191a88c1b45ccf6b
|
[] |
no_license
|
ckeating/AdvAnalytics
|
3837f64aaec1a7101730f28c4a4376aad4c39a0c
|
599fa92600089d508bd1d7ae5b0659c253c21399
|
refs/heads/master
| 2021-01-11T18:58:06.762213
| 2016-12-17T15:30:36
| 2016-12-17T15:30:36
| 79,280,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,906
|
r
|
election prediction.R
|
##initialize session
#setwd("C:/Users/Chibot/Dropbox/edgedata")
setwd("C:/Users/Craig/Dropbox/edgedata")
polling=read.csv("pollingdata.csv")
library(caTools)
library(ROCR)
library(mice)
#randomly split data into training and test
pollingorig=polling;
str(polling)
summary(polling)
table(polling$Year)
#create data frame consisting only of the variables with missing values
simple=polling[c("Rasmussen","SurveyUSA","PropR","DiffCount")]
summary(simple)
set.seed(144)
imputed=complete(mice(simple))
summary(imputed)
#copy variables back into polling dataframe
polling$Rasmussen=imputed$Rasmussen
polling$SurveyUSA=imputed$SurveyUSA
summary(polling)
summary(pollingorig)
#train on data from the 2004 and 2008 elections
#test on 2012 election
# break into testing and training sets
Train=subset(polling,Year==2004 | Year==2008)
Test=subset(polling,Year==2012)
#first step is to understand our BASELINE model
#to do that we will look at the breakdown of the
#DEPENDENT variable in the training set
#this shows:
#in 47 of 100, the Democrat won the state, while 53 of 100 the Republican
#won the state
table(Train$Republican)
View(Train)
#this baseline model will have an accuracy of 53%
#on the
#smart baseline
#rows are TRUE outcome
#columns are smart baseline predictions
#42 observations where smart baseline predicted a
#democrat win, and a democrat one, 2 (zero column) where
#the results were inconclusive, and 3 where the prediction was
#Republican, but a Democrat won
# the baseline is much better as it only made 3 mistakes
#and 3 inconclusives
table(Train$Republican,sign(Train$Rasmussen))
#testing for multicollinearity
#run a correlation between ONLY the numeric independent
#variables
polling=read.csv("PollingData_Imputed.csv")
str(polling)
str(Train)
#choosing dependent variable
#should be the one that is most HIGHLY correlated to the dependent variable
#Republican, which is PropR at 94.84%
cor(Train[c("Rasmussen","SurveyUSA","PropR","DiffCount","Republican")])
#build model
#high coefficent for PropR
#the AIC measuring strength of the mode is 19.78
mod1=glm(Republican~PropR,data=Train,family = "binomial")
summary(mod1)
#compute predicted probabilites that Republican is going to win
#on the training set
pred1=predict(mod1,type="response")
table(Train$Republican,pred1>=0.5)
#see if we can improve this model by selecting
#another variable, choose the LEAST correlated independent
#variables - the choices would be Rasmussen and DiffCount
#OR SurveyUSA and DiffCount
cor(Train[c("Rasmussen","SurveyUSA","PropR","DiffCount","Republican")])
mod2=mod1=glm(Republican~SurveyUSA+DiffCount,data=Train,family = "binomial")
pred2=predict(mod2,type="response")
table(Train$Republican,pred2>=0.5)
#evaluate model on testing set
table(Test$Republican,sign(Test$Rasmussen))
TestPrediction=predict(mod2,newdata=Test,type="response")
table(Test$Republican,TestPrediction>=0.5)
|
b04115680ceba77a9e4e36a46c2a10e606d6ab8a
|
8b0d26cb31f02921899309f15e8f68639d74719d
|
/scripts/ewma_model_2018.R
|
d58a3f8448a0229beefb72a15512eeca8ea7fbae
|
[] |
no_license
|
algoquant/lecture_slides
|
49fcd0d1e2a3202580f0b23ced788fe7a80dcae0
|
5a7dbfa36a15b4a53bbbffa986b79550c5aa6e7e
|
refs/heads/master
| 2023-05-25T22:44:54.688380
| 2023-05-24T12:14:43
| 2023-05-24T12:14:43
| 13,590,208
| 29
| 17
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,257
|
r
|
ewma_model_2018.R
|
# Functions for simulating EWMA strategies
# library(HighFreq) # load package HighFreq
# simulate single EWMA model using historical ohlc data
simu_ewma <- function(ohlc, lambdav=0.01, wid_th=251, bid_offer=0.001, tre_nd=1) {
# calculate EWMA prices
weights <- exp(-lambdav*1:wid_th)
weights <- weights/sum(weights)
closep <- quantmod::Cl(ohlc)
ew_ma <- stats::filter(as.numeric(closep), filter=weights, sides=1)
ew_ma[1:(wid_th-1)] <- ew_ma[wid_th]
# determine dates right after EWMA has crossed prices
indic <- tre_nd*xts::xts(sign(as.numeric(closep) - ew_ma), order.by=index(ohlc))
trade_dates <- (rutils::diffit(indic) != 0)
trade_dates <- which(trade_dates) + 1
trade_dates <- trade_dates[trade_dates<NROW(ohlc)]
# calculate positions, either: -1, 0, or 1
position_s <- rep(NA_integer_, NROW(closep))
position_s[1] <- 0
position_s[trade_dates] <- rutils::lagxts(indic)[trade_dates]
position_s <- xts::xts(na.locf(position_s), order.by=index(ohlc))
openp <- quantmod::Op(ohlc)
prices_lag <- rutils::lagxts(closep)
position_lagged <- rutils::lagxts(position_s)
# calculate transaction costs
costs <- 0.0*position_s
costs[trade_dates] <- 0.5*bid_offer*abs(position_lagged[trade_dates] - position_s[trade_dates])*openp[trade_dates]
# calculate daily profits and losses
returns <- position_lagged*(closep - prices_lag)
returns[trade_dates] <- position_lagged[trade_dates] * (openp[trade_dates] - prices_lag[trade_dates]) + position_s[trade_dates] * (closep[trade_dates] - openp[trade_dates]) - costs
output <- cbind(position_s, returns)
colnames(output) <- c("positions", "returns")
output
} # end simu_ewma
# simulate two EWMA model using historical ohlc data
simu_ewma2 <- function(ohlc, lambda1=0.25, lambda2=0.05, wid_th=51) {
# calculate EWMA prices
weights1 <- exp(-lambda1*1:wid_th)
weights1 <- weights1/sum(weights1)
weights2 <- exp(-lambda2*1:wid_th)
weights2 <- weights2/sum(weights2)
# calculate open and close prices
openp <- Op(ohlc)
closep <- Cl(ohlc)
# adjust close price to start at zero
openp <- openp - as.numeric(closep[1, ])
closep <- closep - as.numeric(closep[1, ])
prices_lag <- rutils::lagxts(closep)
# filter the prices using weights
ewma1 <- filter(closep, filter=weights1, sides=1)
ewma1[1:(wid_th-1)] <- ewma1[wid_th]
ewma2 <- filter(closep, filter=weights2, sides=1)
ewma2[1:(wid_th-1)] <- ewma2[wid_th]
# determine dates right after EWMAs have crossed
indic <- xts(sign(ewma1 - ewma2), order.by=index(ohlc))
trade_dates <- (rutils::diffit(indic) != 0)
trade_dates <- which(trade_dates) + 1
trade_dates <- trade_dates[trade_dates<NROW(ohlc)]
# calculate positions, either: -1, 0, or 1
position_s <- rep(NA_integer_, NROW(closep))
position_s[1] <- 0
position_s[trade_dates] <- rutils::lagxts(indic)[trade_dates]
position_s <- xts(na.locf(position_s), order.by=index(ohlc))
position_lagged <- rutils::lagxts(position_s)
# calculate daily profits and losses
returns <- position_lagged*(closep - prices_lag)
returns[trade_dates] <-
position_lagged[trade_dates] *
(openp[trade_dates] - prices_lag[trade_dates]) +
position_s[trade_dates] *
(closep[trade_dates] - openp[trade_dates])
output <- cbind(ewma1, ewma2, position_s, returns)
colnames(output) <- c("ewma1", "ewma2", "positions", "returns")
output
} # end simu_ewma2
# define aggregation function
agg_regate <- function(ohlc, lambdavs, ...) {
sapply(lambdavs, function(lambdav) {
# simulate EWMA strategy and calculate Sharpe ratio
returns <- simu_ewma(ohlc=ohlc, lambdav=lambdav, ...)[, "returns"]
sqrt(260)*sum(returns)/sd(returns)/NROW(returns)
}) # end sapply
} # end agg_regate
# define functional for performing aggregations
roll_agg <- function(xtes, look_backs, FUN, ...) {
# perform lapply() loop over look_backs
agg_s <- lapply(look_backs,
function(look_back) {
FUN(xtes[look_back], ...)
}) # end lapply
# rbind list into single xts or matrix
agg_s <- rutils::do_call_rbind(agg_s)
if (!is.xts(agg_s))
agg_s <- xts(agg_s, order.by=
index(xtes[unlist(lapply(look_backs, last))]))
agg_s
} # end roll_agg
|
80b444788f4ac747102251e6ffa4f7422ecf422a
|
4d8c0ab2151fe6c9abbef558808faac46cf4271b
|
/nonspatial.r
|
2c916b387f2df5592b8fd480d11cf94b0ace2c39
|
[] |
no_license
|
ShilpaBatthineni/Road-Traffic-Injuries
|
ea428c7315946a1c19eb949eff8b7e35963ed2ec
|
c35804a4a6f61c55b3211a0fe5224fed61522b5c
|
refs/heads/master
| 2021-01-20T20:18:26.246725
| 2016-06-17T16:02:46
| 2016-06-17T16:02:46
| 60,674,294
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
nonspatial.r
|
y<-rtia$Accident_Severity
days<-rtia$Day_of_Week
wdays<-table(days,y)
chisq.test(wdays)
month<-rtia$Month
months<-table(month,y)
chisq.test(months)
year<-rtia$Year
Years<-table(year,y)
chisq.test(Years)
date<-rtia$Date
dates<-table(date,y)
chisq.test(dates)
time<-rtia$Time
time1<-table(time,y)
chisq.test(time1)
mode<-rtia$Mode
modes<-table(mode,y)
chisq.test(modes)
|
fc3f2f80707b4ccbc0f6ceeb3be97b3729227935
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/EstMix/man/calc_2d.Rd
|
6f7868c96920e832d0ad1fa12ad4235571607d8a
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,165
|
rd
|
calc_2d.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_2d.R
\name{calc_2d}
\alias{calc_2d}
\title{Return mixture estimation of a normal and 2 tumors
Takes BAF, LRR, chr, x, gt, seg_raw}
\usage{
calc_2d(BAF, LRR, chr, x, GT, seg_raw)
}
\arguments{
\item{BAF}{vector containing B allen frequency (BAF)}
\item{LRR}{vector}
\item{chr}{vector}
\item{x}{vector}
\item{GT}{vectors of factors containing genotype}
\item{seg_raw}{dataframe about segmentation}
}
\value{
\item{sol1}{a numeric vector of length 2. It provides the estimated percentages of normal and tumor from the best solution. The first number is the percentage of the estimated normal percentage. The second number-1 is the percentage of the estimated tumor 1 percentage} \item{sol2}{a numeric vector of length 2. It provides the estimated percentages of normal and tumor from the second best solution. The first number is the percentage of the estimated normal percentage. The second number-1 is the percentage of the estimated tumor 1 percentage}
}
\description{
Return mixture estimation of a normal and 2 tumors
Takes BAF, LRR, chr, x, gt, seg_raw
}
\keyword{internal}
|
731c53bccc0915eea53d95f955e4a55f68b4c8b1
|
a17cf22be2304c96d267fc1b68db7b7279c4a293
|
/R/fastaTools.R
|
1f7844da19bfaf0e69f8a5069f158d97261f0794
|
[] |
no_license
|
robertdouglasmorrison/DuffyTools
|
25fea20c17b4025e204f6adf56c29b5c0bcdf58f
|
35a16dfc3894f6bc69525f60647594c3028eaf93
|
refs/heads/master
| 2023-06-23T10:09:25.713117
| 2023-06-15T18:09:21
| 2023-06-15T18:09:21
| 156,292,164
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,386
|
r
|
fastaTools.R
|
# fastaTools.R -- collection of FASTA file manipulation routines
`loadFasta` <- function( file="file.fasta", mode=c("character","BStrings"), verbose=TRUE, short.desc=TRUE) {
require( Biostrings)
file <- allowCompressedFileName( file)
if (verbose) cat( "\nLoading Fasta file: ", file, "...")
fa <- readBStringSet( file)
# do we send back character strings or BStrings?
mode <- match.arg( mode)
nams <- names(fa)
if ( mode == "character") {
seqs <- as.character(fa, use.names=FALSE)
} else {
seqs <- fa
}
# for consistency with other tools, trim the descriptor after the first blank
if ( short.desc) {
nams <- sub( " .+", "", nams)
}
return( list( "desc"=nams, "seq"=seqs))
}
`readFasta` <- function( file="file.fasta", verbose=TRUE, short.desc=TRUE) {
return( loadFasta( file, verbose=verbose, short.desc=short.desc))
}
`as.Fasta.data.frame` <- function( fasta) {
return( data.frame( "desc"=fasta$desc, "seq"=fasta$seq, stringsAsFactors=FALSE))
}
`as.Fasta` <- function( desc, seq) {
if ( length( desc) != length( seq)) stop( "as.Fasta: unequal length arguments")
return( list( "desc"=desc, "seq"=seq))
}
`is.Fasta` <- function( x) return( is.list(x) && all( c("desc","seq") %in% names(x)))
`writeFasta` <- function( fasta, file=NULL, line.width=80) {
if ( is.null( file)) stop( "writeFasta: required 'file' argument is missing")
require( Biostrings)
# convert to a Biostrings object
bstring <- BStringSet( fasta$seq)
names( bstring) <- fasta$desc
writeXStringSet( bstring, filepath=file, width=line.width)
#writeLines( as.text.Fasta( fasta, line.width=line.width), con=file, sep="\n")
}
`writeLongFasta` <- function( desc, seq, file=NULL) {
if ( is.null( file)) stop( "writeLongFasta: required 'file' argument is missing")
writeLines( base::paste( ">", desc, "\n", seq, sep=""), con=file, sep="\n")
}
`as.text.Fasta` <- function( fasta, line.width=80) {
if ( ! is.list( fasta)) return("")
N <- length(fasta$desc)
if ( is.null(N) || N < 1) return("")
out <- base::paste( ">", fasta$desc, "\n", wrap.text( fasta$seq, line.width=line.width), sep="")
return( out)
}
wrap.text <- function( txt, line.width=60) {
# re-format fasta text to be line wrapped to a fixed width
N <- length(txt)
if ( is.null(txt) || N < 1) return("")
txtlen <- base::nchar( txt)
SUBSTR <- base::substr
SAPPLY <- base::sapply
PASTE <- base::paste
out <- SAPPLY( 1:N, function(i) {
if ( (nch <- txtlen[i]) < 1) return("")
oldtxt <- txt[i]
smltxt <- SAPPLY( seq.default( 1, nch, by=line.width), function(j) {
last <- min( (j+line.width-1), nch)
SUBSTR( oldtxt, j, last)
})
PASTE( smltxt, collapse="\n")
})
return( out)
}
# smart fasta file lookup...
FastaFilePathEnv <- new.env( parent=emptyenv())
assign( "currentFastaFile", "", envir=FastaFilePathEnv)
assign( "currentFastaObject", NULL, envir=FastaFilePathEnv)
# get one FASTA sequence, by filename and seqID. returns a Biostrings object.
`getFastaSeqFromFilePath` <- function( filePath, seqID, verbose=FALSE) {
# see if we need to read a different file
alreadyLoaded <- ( filePath == get( "currentFastaFile", envir=FastaFilePathEnv))
if ( verbose) cat( "\nGetting FASTA seq for: ",seqID)
if ( ! alreadyLoaded) {
# we could be given an explicit filename OR a directory
info <- file.info( filePath)
if ( any( is.na( info$size))) stop( paste( "getFastaSeqFromFilePath: file not found: ", filePath))
isDirectory <- info$isdir
if( isDirectory) {
pathRelative <- TRUE
files <- dir( filePath)
# try to find a file that has that seqID as part of its name
if (verbose) cat( " trying", length(files), "files in folder.")
curSpecies <- getCurrentSpecies()
tryFileName <- paste( seqID, ".fa", sep="")
hit <- pmatch( tryFileName, files, nomatch=0)
if ( hit == 0) {
if (verbose) cat( " trying prepend of speciesID.")
tryFileName <- sub( paste( curSpecies,"_",sep=""), "", tryFileName, fixed=TRUE)
hit <- pmatch( tryFileName, files, nomatch=0)
}
if ( hit == 0) {
files <- dir( filePath, full.name=T)
# last chance: see if any subfolders have that file
myfolders <- files[ file.info( files)$isdir]
if ( length( myfolders) > 0) {
pathRelative <- FALSE
if (verbose) cat( " trying", length( myfolders), "subfolders.")
morefiles <- vector()
for( f in myfolders) morefiles <- append( morefiles, dir( f, full.name=T))
tryFileName <- paste( seqID, ".fa", sep="")
#hit <- pmatch( tryFileName, morefiles, nomatch=0)
hit <- grep( tryFileName, morefiles)
hit <- if ( length(hit) > 0) hit[1] else 0
if ( hit == 0) {
if (verbose) cat( " trying prepend of speciesID.")
tryFileName <- sub( paste( curSpecies,"_",sep=""), "", tryFileName, fixed=TRUE)
#hit <- pmatch( tryFileName, morefiles, nomatch=0)
hit <- grep( tryFileName, morefiles)
hit <- if ( length(hit) > 0) hit[1] else 0
}
files <- morefiles
}
}
if ( hit == 0) stop( paste( "\nUnable to find FASTA file for: ", seqID, " in folder: ", filePath))
if (pathRelative) {
useFile <- file.path( filePath, files[ hit])
} else {
useFile <- files[ hit]
}
} else {
useFile <- filePath
}
# ok, we have a file, so load it
assign( "currentFastaObject", loadFasta( useFile), envir=FastaFilePathEnv)
assign( "currentFastaFile", useFile, envir=FastaFilePathEnv)
}
# now look for that seqID
fasta <- get( "currentFastaObject", envir=FastaFilePathEnv)
where <- match( seqID, fasta$desc, nomatch=0)
if ( where > 0) return( fasta$seq[ where])
# complain and quit if not found
warning( paste( "Fasta descriptor: ", seqID, " not found in Fasta file/path: ", filePath))
return( "")
}
`gene2Fasta` <- function( genes, genomicDNAfilePath, mode=c("gdna","cdna","aa"), utr.tail.length=0, verbose=FALSE) {
mode <- match.arg( mode)
if ( mode != "gdna" && utr.tail.length > 0) stop( "UTR tails only compatible with 'gdna' mode.")
gmap <- getCurrentGeneMap()
smap <- getCurrentSeqMap()
who <- match( genes, gmap$GENE_ID, nomatch=0)
if ( any( who == 0)) {
who <- match( genes, shortGeneName(gmap$GENE_ID,keep=1), nomatch=0)
if ( all( who == 0)) stop( "No matching genes found in current gene map.")
if ( any( who == 0)) {
cat( "\nSome genes not found in current gene map.")
cat( "\nN=", sum( who == 0), genes[who == 0])
}
}
gmap <- gmap[ who, ]
outDesc <- outSeq <- vector()
visitOrder <- order( gmap$SEQ_ID)
for ( i in visitOrder) {
thisGene <- gmap$GENE_ID[i]
thisSeqID <- gmap$SEQ_ID[i]
gdna <- getFastaSeqFromFilePath( filePath=genomicDNAfilePath, seqID=thisSeqID )
if ( mode == "gdna") {
str <- substr( gdna, gmap$POSITION[i] , gmap$END[i])
if ( utr.tail.length > 0) {
gPos <- max( 1, gmap$POSITION[i] - utr.tail.length)
gEnd <- min( smap$LENGTH[match(thisSeqID,smap$SEQ_ID)], gmap$END[i] + utr.tail.length)
str <- substr( gdna, gPos, gEnd)
}
if ( gmap$STRAND[i] == "-") str <- myReverseComplement(str)
} else {
str <- convertGenomicDNAtoCodingDNA( geneID=thisGene, genomicDNA=gdna)
if (mode == "aa") str <- DNAtoAA( str, readingFrame=1, clipAtStop=F)
}
outDesc[i] <- thisGene
outSeq[i] <- str
if (verbose) cat( "\r", i, " ", thisGene, " N_Ch=", nchar(str))
}
return( as.Fasta( outDesc, outSeq))
}
|
e65e67c66f6001a4a115a91e9c587780f70704ae
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googletranslatev2.auto/man/translate_googleAuthR.Rd
|
159024006902bb9f70c5c23ceca890d39e7292ca
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 556
|
rd
|
translate_googleAuthR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/translate_functions.R
\docType{package}
\name{translate_googleAuthR}
\alias{translate_googleAuthR}
\alias{translate_googleAuthR-package}
\title{Translate API
Translates text from one language to another.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2017-03-05 20:21:07
filename: /Users/mark/dev/R/autoGoogleAPI/googletranslatev2.auto/R/translate_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item
}
}
|
516c50695615751fb1cfa5f60f5ac44e2a5887b6
|
adf18f1a24e425b74f28badb5408bf9a5bd69789
|
/server.R
|
12810a6e9fe441bf115efb0229bf4fe899217cf8
|
[] |
no_license
|
ksmzn/HOXOM_card
|
c21de6e76003ea51571b7068ba7189da8f261d4e
|
a0b3424cab2418cfca104aab74ecb39146e77450
|
refs/heads/master
| 2021-08-23T10:22:50.290525
| 2017-12-04T14:30:12
| 2017-12-04T14:30:12
| 112,952,723
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,456
|
r
|
server.R
|
library(shiny)
library(dplyr)
library(magick)
library(glue)
library(jsonlite)
library(longurl)
server <- function(input, output, session) {
observe({
session$sendCustomMessage(type = 'tw_consumer_key',
message = list(key = Sys.getenv("TWITTER_CONSUMER_KEY")))
})
jp_font <- normalizePath("./www/font/migu-1m-regular.ttf")
size <- "717x433!"
icon_size <- "200x200!"
tw_logo <- image_read("./www/img/twitter.png")
# card
card <- image_read("./www/img/card.png") %>%
image_resize(size)
# icon
icon_path <- "./www/img/default.png"
tmp <- reactiveValues(icon_path = icon_path)
## When uploading new image
observeEvent(input$upload, {
if (length(input$upload$datapath)){
tmp$icon_path <<- input$upload$datapath
updateCheckboxGroupInput(session, "effects", selected = "")
}
})
## When uploading new image
observeEvent(input$insert_by_twitter, {
payload <- jsonlite::fromJSON(input$insert_by_twitter)
tmp$icon_path <<- payload$icon_path
site_url <- payload$site_url
df_expanded_url <- site_url %>%
longurl::expand_urls()
if(!is.null(site_url) && df_expanded_url$status_code[[1]]==200L){
site_url <- df_expanded_url %>%
dplyr::pull(expanded_url)
}
updateTextInput(session, "username", value = payload$username)
updateTextInput(session, "tw_account", value = payload$tw_account)
updateTextInput(session, "site_url", value = site_url)
updateTextAreaInput(session, "serif", value = payload$serif)
updateCheckboxGroupInput(session, "effects", selected = "")
})
# plot
output$card <- renderImage({
# elements
bg_color <- input$bg_color
username <- input$username
post <- input$post
tw_account <- input$tw_account
site_url <- input$site_url
serif <- input$serif
other <- input$other
icon <- tmp$icon_path %>%
image_read() %>%
image_convert("png") %>%
image_scale(icon_size)
# Boolean operators
if("edge" %in% input$effects)
icon <- image_edge(icon)
if("charcoal" %in% input$effects)
icon <- image_charcoal(icon)
if("negate" %in% input$effects)
icon <- image_negate(icon)
if("flip" %in% input$effects)
icon <- image_flip(icon)
if("flop" %in% input$effects)
icon <- image_flop(icon)
icon <- icon %>%
image_implode(input$implode)
card <- card %>%
image_background(bg_color) %>% # OK
image_annotate(text = username, location = "+320+200", font = jp_font, size = 50) %>% # OK
image_annotate(text = post, location = "+320+170", font = jp_font, size = 20) %>% # OK
image_composite(tw_logo, offset = "+320+280") %>%
image_annotate(text = glue::glue(" @{tw_account}"), location = "+350+285", font = jp_font, size = 20) %>% # OK
image_annotate(text = glue::glue("URL: {site_url}"), location = "+320+320", font = jp_font, size = 20) %>% # OK
image_annotate(text = other, location = "+320+350", font = jp_font, size = 20) %>% # OK
image_composite(icon, offset = "+60+50") %>%
image_annotate(text = serif, location = "+80+300", font = jp_font, size = 20)
# Numeric operators
tmpfile <- card %>%
image_write(tempfile(fileext='png'), format = 'png')
# Return a list
list(src = tmpfile, contentType = "image/png")
})
}
|
8275031fecfa9a6e3929af7ed0f5f61777579a2a
|
cc0de06f786eb8ee1e8f1cef3c14a32f988d460b
|
/plot2.R
|
a23b61b4429201d0628b06c814562179dddf5a4d
|
[] |
no_license
|
amiedemmel/ExData_Plotting1
|
c4e511ddf851d4394722d34be8700c20a40db52f
|
1e24a310a8675156d912c019757439e0d378bc05
|
refs/heads/master
| 2021-01-20T21:48:23.905972
| 2014-06-07T02:07:40
| 2014-06-07T02:07:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,256
|
r
|
plot2.R
|
#Plot2.R
#Amie Demmel 6/6/14
#Reconstructs the second plot
#Read in data from already download file, make sure your files matches
file <- "Coursera/exdata-data-household_power_consumption.zip"
#Read as a data.frame and disgard rows that do not match Feb 1 2007 and Feb 2 2007
num <- rep("numeric",7)
classing <- c("character","character",num)
data <- read.table(unz(file, "household_power_consumption.txt"), header=T, na.strings = "?", colClasses = classing, quote="\"", sep=";")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
#match dates and only keep these rows
looker <- c("2007-02-01","2007-02-02")
ss <- (data$Date == looker[1])|(data$Date == looker[2])
s.data <- data[ss,]
rm(data)
#Set up datetime vector from the date and time columns of data.frame and choose y to plot with
x <- paste(s.data$Date,s.data$Time, sep = " ")
x <- strptime(x, format = "%Y-%m-%d %H:%M:%S")
y <- s.data$Global_active_power
#Open device and plot using lines to generate the line graph
png(file = "plot2.png", width = 480, height = 480, units = "px")
plot(x,y, xlab ="", ylab ="Global Active Power(kilowatts)", type="n")
lines(x,y)
dev.off()
#remove large data vectors/tables to clean workspace, commit out if data.frame is needed later
rm(s.data)
rm(x)
rm(y)
rm(ss)
|
15d952c3ab80361f99d8c6e0af019745a8dba6fd
|
4f9a9a5ca40d4f1c04ecd303b20fe2c116d3a579
|
/Chapter3_DataVisualization/Section3.9/Section3.9.R
|
a72e2ced2ee1f7cfb2bb61b55297e152f42fe1f4
|
[] |
no_license
|
BatmanNeuroGuy/R4DataScience
|
bae8edc5328376d16679b9e53560ea30aaff984e
|
373b05f967ca6088608cdeae8cad9977dacd0d64
|
refs/heads/master
| 2021-05-17T14:04:47.356576
| 2020-03-31T00:43:13
| 2020-03-31T00:43:13
| 250,811,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,036
|
r
|
Section3.9.R
|
library(tidyverse)
# coord_flip() switches the x and y axes. This is useful (for example),
# if you want horizontal boxplots. It’s also useful for long labels:
# it’s hard to get them to fit without overlapping on the x-axis
ggplot(data = mpg, mapping = aes(x = class, y = hwy)) +
geom_boxplot() # Labels don't fit on x axis
ggplot(data = mpg, mapping = aes(x = class, y = hwy)) +
geom_boxplot() +
coord_flip() # Solution is to flip the x and y axes
# coord_quickmap() sets the aspect ratio correctly for maps.
# This is very important if you’re plotting spatial data with ggplot2
# (which unfortunately we don’t have the space to cover in this book).
nz <- map_data("nz")
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", colour = "black")
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", colour = "black") +
coord_quickmap() # sets aspect ratio correctly
# coord_polar() uses polar coordinates.
bar <- ggplot(data = diamonds) +
geom_bar(
mapping = aes(x = cut, fill = cut),
show.legend = FALSE,
width = 1
) +
theme(aspect.ratio = 1) +
labs(x = NULL, y = NULL)
bar + coord_flip()
bar + coord_polar() # Polar coordinates reveal an interesting connection between
# a bar chart and a Coxcomb chart
########################### Exercises #################################3
# 1. Turn a stacked bar chart into a pie chart using coord_polar()
dmd_bar <- ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity))
dmd_bar + coord_polar() # clarity as a ratio of n within each cut
dmd_bar1 <- ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "fill")
dmd_bar1 + coord_polar() # clarity as ratio as a percent (all pies equal size)
# 2. What does labs() do? Read the documentation.
# labs() modifies the chart labels, can change variable and axes labels
# 3. What’s the difference between coord_quickmap() and coord_map()?
# As elucidated here: https://ggplot2.tidyverse.org/reference/coord_map.html
# "coord_map projects a portion of the earth, which is approximately spherical,
# onto a flat 2D plane using any projection defined by the mapproj package.
# Map projections do not, in general, preserve straight lines, so this
# requires considerable computation. coord_quickmap is a quick approximation
# that does preserve straight lines. It works best for smaller areas closer
# to the equator.
# 4. What does the plot below tell you about the relationship between city and highway mpg?
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_point() +
geom_abline() +
coord_fixed()
# Why is coord_fixed() important?
# this parameter ensures that the x and y axes are on the same scale,
# i.e., ensures that one unit on the x-axis is the same length as one
# unit on the y-axis
# What does geom_abline() do?
# adds a reference line, when y int and slope are not specified, defaults
# to y-int = 0, slope = 1
|
0200073fc22ff05d1b5f921608be88f9bf51a660
|
a1cd651e71990555e3180f335baa28accbf41ee4
|
/Homework 1.r
|
b4e7f96d86727183ab0bb78e7aac964690767f67
|
[] |
no_license
|
juliemattimoe/MSDS-413
|
c9d8fe4d0855e66c5f6487ecaad928cfb8173c89
|
46f7c07c91bec21225031838653ff0f5336f2286
|
refs/heads/master
| 2020-07-06T07:41:47.508790
| 2019-09-06T14:59:22
| 2019-09-06T14:59:22
| 202,943,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,690
|
r
|
Homework 1.r
|
library(fpp2)
autoplot(hsales)
ggseasonplot(hsales)
ggsubseriesplot(hsales)
gglagplot(hsales)
ggAcf(hsales, lag.max = 400)
autoplot(usdeaths)
ggseasonplot(usdeaths)
ggsubseriesplot(usdeaths)
gglagplot(usdeaths)
ggAcf(usdeaths, lag.max = 60)
autoplot(bricksq)
ggseasonplot(bricksq)
ggsubseriesplot(bricksq)
gglagplot(bricksq)
ggAcf(bricksq, lag.max = 200)
autoplot(sunspotarea)
# ggseasonplot(sunspotarea)
# ggsubseriesplot(sunspotarea)
gglagplot(sunspotarea)
ggAcf(sunspotarea, lag.max = 50)
autoplot(gasoline)
ggseasonplot(gasoline)
# ggsubseriesplot(gasoline)
gglagplot(gasoline)
ggAcf(gasoline, lag.max = 1000)
autoplot(visnights[,"QLDMetro"])
train1= window(visnights[,"QLDMetro"], end = c(2015,4))
train2= window(visnights[,"QLDMetro"], end = c(2014,4))
train3= window(visnights[,"QLDMetro"], end = c(2013,4))
fc1 = snaive(train1)
fc2 = snaive(train2)
fc3 = snaive(train3)
accuracy(fc1,window(visnights[,"QLDMetro"], start = 2016, end = c(2016,4)))
accuracy(fc2,window(visnights[,"QLDMetro"], start = 2015, end = c(2015,4)))
accuracy(fc3,window(visnights[,"QLDMetro"], start = 2014, end = c(2014,4)))
par(mfrow=c(2,2))
plot(dowjones, main="Fig 12: Dow Jones Index")
plot(log(dowjones), main = "Fig 13: Log DJI")
plot(sqrt(dowjones), main = "Fig 14: SQRT")
dowjones_drift <- rwf(dowjones , h=10, drift=TRUE)
dowjones_drift_log <- rwf(log(dowjones), h = 10, drift = TRUE)
dowjones_drift_sqrt <- rwf(sqrt(dowjones), h =10, drift = TRUE)
par(mfrow=c(2,2))
plot(dowjones_drift,plot.conf=FALSE,main="Drift Method Dow Jones", ylab="Index",xlab="Year")
legend("topleft",lty=1, col=c(4),legend=c("SQRT"))
plot(dowjones_drift_log,plot.conf=FALSE,main="Log Method Dow Jones", ylab="Index",xlab="Year")
legend("topleft",lty=1, col=c(4),legend=c("Log"))
plot(dowjones_drift_sqrt,plot.conf=FALSE,main="SQRT Method Dow Jones", ylab="Index",xlab="Year")
legend("topleft",lty=1, col=c(4),legend=c("Drift"))
dj_first_last <- window(dowjones, start=1, end=66-.1)
dj_first_last_mean <- meanf(dj_first_last,h=12)
dj_first_last_1 <- rwf(dj_first_last,h=12)
dj_first_last_2 <- rwf(dj_first_last,h=12, drift = TRUE)
plot(dj_first_last_mean, plot.conf=FALSE, main="Dow Jones Index", xlim=c(1,78))
lines(dj_first_last_1$mean,col=2)
lines(dj_first_last_2$mean,col=3)
lines(dowjones)
legend("topleft", lty=1, col=c(4,2,3), legend=c("Mean ","Naive","Drifit"))
par(mfrow=c(1,1))
dowjones_drift <- rwf(dowjones , h=24, drift=TRUE)
dowjones_drift_mean <-meanf(dowjones, h=42)
dowjones_drift_naive <-naive(dowjones, h=42)
plot(dowjones_drift,plot.conf=FALSE,main="Drift Method Dow Jones", ylab="Index",xlab="Year")
lines(dowjones_drift_mean$mean, col=2)
lines(dowjones_drift_naive$mean, col=3)
legend("topleft",lty=1, col=c(4,2,3),legend=c("Mean Method","Naive Method","Drift"))
head(ibmclose)
summary(ibmclose)
par(mfrow=c(2,2))
plot(ibmclose)
qqnorm(ibmclose)
qqline(ibmclose)
plot(log(ibmclose))
plot(sqrt(ibmclose))
ibm_close_train <- window(ibmclose ,end=300)
ibm_close_test <- window(ibmclose ,start=301)
par(mfrow=c(1,1))
ibm_close_avg <- meanf(ibm_close_train,h=54)$mean
ibm_close_naive <- naive(ibm_close_train ,h=54)$mean
ibm_close_drift <- rwf(ibm_close_train ,drift=TRUE,h=54)$mean
plot(ibm_close_train,main="IBM Close Prices",xlab="Day",ylab="Price")
lines(ibm_close_naive,col=2)
lines(ibm_close_avg,col=4)
lines(ibm_close_drift,col=3)
lines(ibm_close_test,col=8)
legend("topleft",lty=1,col=c(4,2,3),
legend=c("Mean Method","Naive Method","Drift Method"))
plot(ibm_close_train,main="IBM Close Prices", ylab="Price",xlab="Day", xlim=c(250,369), ylim=c(300,425))
lines(ibm_close_naive,col=2)
lines(ibm_close_avg,col=4)
lines(ibm_close_drift,col=3)
lines(ibm_close_test,col=8)
legend("topleft",lty=1,col=c(4,2,3),
legend=c("Mean Method","Naive Method","Drift Method"))
checkresiduals(ibm_close_drift)
head(hsales)
summary(hsales)
par(mfrow=c(2,2))
plot(hsales)
qqnorm(hsales)
qqline(hsales)
plot(log(hsales))
acf(hsales)
monthdays <- rep(c(31,28,31,30,31,30,31,31,30,31,30,31),23)
monthdays <- monthdays[-275]
monthdays[38 + (4*12)*(0:4)] <- 29
plot(hsales/monthdays, ylab="Sales", xlab="Years")
training <- window(hsales, start=1973, end=1994-1/12)
test <- window(hsales, start=1994)
plot(training, ylab="Sales", xlab="Year", xlim =c(1973, 1995))
fit1 <- meanf(training, h=23)
fit2 <- rwf(training, h=23)
fit3 <- snaive(training, h=23)
fit4 <- rwf(training, h=23, drift=TRUE)
lines(fit1$mean, col=2)
lines(fit2$mean, col=3)
lines(fit3$mean, col=4)
lines(fit4$mean, col=5)
accuracy(fit1, test)
accuracy(fit2, test)
accuracy(fit3, test)
accuracy(fit4, test)
lines(test)
checkresiduals(fit3)
gasoline_until_2004 <- window(gasoline, end = 2005)
autoplot(gasoline_until_2004, xlab = "Year") +
ggtitle("US finished motor gasoline product supplied") +
xlab("Year") + ylab("million barrels per day")
for(num in c(1, 2, 3, 5, 10, 20)){
var_name <- paste("tslm_ft",
as.character(num),
"_gasoline_until_2004",
sep = "")
assign(var_name,
tslm(gasoline_until_2004 ~ trend + fourier(
gasoline_until_2004,
K = num
))
)
print(
autoplot(gasoline_until_2004) +
autolayer(get(var_name)$fitted.values,
series = as.character(num)) +
ggtitle(var_name) +
ylab("gasoline") +
guides(colour = guide_legend(title = "Number of Fourier Transform pairs")) +
theme(legend.position="bottom")
)
}
autoplot(gasoline_until_2004) +
autolayer(tslm_ft1_gasoline_until_2004$fitted.values, series = "1") +
autolayer(tslm_ft5_gasoline_until_2004$fitted.values, series = "2") +
autolayer(tslm_ft10_gasoline_until_2004$fitted.values, series = "3") +
autolayer(tslm_ft10_gasoline_until_2004$fitted.values, series = "5") +
autolayer(tslm_ft20_gasoline_until_2004$fitted.values, series = "10") +
autolayer(tslm_ft20_gasoline_until_2004$fitted.values, series = "20") +
guides(colour = guide_legend(title = "Fourier Transform pairs")) +
scale_color_discrete(breaks = c(1, 2, 3, 5, 10, 20)) +
theme(legend.position="bottom")
for(i in c(1, 2, 3, 5, 10, 20)){
tslm_ft_gasoline_until_2004.name <- paste(
"tslm_ft", as.character(i), "_gasoline_until_2004",
sep = ""
)
writeLines(
paste(
"\n", tslm_ft_gasoline_until_2004.name, "\n"
)
)
print(CV(get(tslm_ft_gasoline_until_2004.name)))
}
min_AICc <- Inf
min_K_by_AICc <- 0
min_CV <- Inf
min_K_by_CV <- 0
AICc_K <- 0
CV_K <- 0
for(num in 1:26){
AICc_K <- CV(
tslm(
gasoline_until_2004 ~ trend + fourier(gasoline_until_2004, K = num)
)
)[["AICc"]]
CV_K <- CV(
tslm(
gasoline_until_2004 ~ trend + fourier(gasoline_until_2004, K = num)
)
)[["CV"]]
if(num != 1){
if(AICc_K >= min_AICc & CV_K >= min_CV){
writeLines(
paste("The number of Fourier Transform pairs to minimize AICc:",
as.character(min_K_by_AICc)
)
)
writeLines(
paste("The number of Fourier Transform pairs to minimize CV:",
as.character(min_K_by_CV)
)
)
break
}
}
if(AICc_K < min_AICc){
min_AICc <- AICc_K
min_K_by_AICc <- num
}
if(CV_K < min_CV){
min_CV <- CV_K
min_K_by_CV <- num
}
}
tslm_ft7_gasoline_until_2004 <- tslm(
gasoline_until_2004 ~ trend + fourier(
gasoline_until_2004,
K = 7
)
)
checkresiduals(tslm_ft7_gasoline_until_2004)
fc_gasoline_2005 <- forecast(
tslm_ft7_gasoline_until_2004,
newdata=data.frame(fourier(
gasoline_until_2004, K = 7, h = 52)
)
)
autoplot(fc_gasoline_2005) +
autolayer(window(
gasoline,
start = 2004,
end = 2006
)
) +
scale_x_continuous(limits = c(2004, 2006)) +
theme(legend.position="bottom")
stl_brick_fixed_st <- stl(bricksq,
s.window = "periodic",
robust = TRUE)
autoplot(stl_brick_fixed_st) +
ggtitle("STL with fixed seasonality")
stl_brick_changing_st <- stl(bricksq,
s.window = 5,
robust = TRUE)
autoplot(stl_brick_changing_st) +
ggtitle("STL with changing seasonality")
autoplot(bricksq, series = "Data") +
autolayer(trendcycle(stl_brick_fixed_st),
series = "Trend-cycle") +
autolayer(seasadj(stl_brick_fixed_st),
series = "Seasonally Adjusted Data") +
ggtitle("brick production in Australia",
subtitle = "fixed seasonality") +
scale_color_manual(values = c("gray", "red", "blue"),
breaks = c("Data", "Trend-cycle", "Seasonally Adjusted")) +
theme(legend.position="bottom")
autoplot(bricksq, series = "Data") +
autolayer(trendcycle(stl_brick_changing_st),
series = "Trend-cycle") +
autolayer(seasadj(stl_brick_changing_st),
series = "Seasonally Adjusted Data") +
ggtitle("brick production in Australia",
subtitle = "changing seasonality") +
scale_color_manual(values = c("gray", "red", "blue"),
breaks = c("Data", "Trend-cycle", "Seasonally Adjusted")) +
theme(legend.position="bottom")
stl_brick_fixed_st %>% seasadj() %>% naive() %>% autoplot() +
ggtitle(label = "Naive forecast",
subtitle = "fixed seasonality") +
theme(legend.position="bottom")
stl_brick_changing_st %>% seasadj() %>% naive() %>% autoplot() +
ggtitle(label = "Naive forecast",
subtitle = "changing seasonality") +
theme(legend.position="bottom")
stlf_brick <- stlf(bricksq)
autoplot(stlf_brick)
checkresiduals(stlf_brick)
stlf_brick_robust <- stlf(bricksq, robust = TRUE)
autoplot(stlf_brick_robust) +
theme(legend.position="bottom")
checkresiduals(stlf_brick_robust)
trainset_brick <- subset(bricksq, end = length(bricksq) - 8)
testset_brick <- subset(bricksq, start = length(bricksq) - 7)
snaive_brick <- snaive(trainset_brick)
stlf_brick_part <- stlf(trainset_brick, robust = TRUE)
autoplot(bricksq, series = "Previous data") +
geom_line(size = 1) +
autolayer(stlf_brick_part, PI = FALSE, size = 1,
series = "stlf") +
autolayer(snaive_brick, PI = FALSE, size = 1,
series = "snaive") +
scale_color_manual(values = c("gray50", "blue", "red"),
breaks = c("Original data", "stlf", "snaive")) +
scale_x_continuous(limits = c(1990, 1994.5)) +
scale_y_continuous(limits = c(350, 550)) +
guides(colour = guide_legend(title = "Data")) +
ggtitle("stlf and snaive forecasts") +
annotate(
"rect",
xmin=1992.75,xmax=1994.5,ymin=-Inf,ymax=Inf,
fill="lightgreen",alpha = 0.3
) +
theme(legend.position="bottom")
str(visitors)
head(visitors)
autoplot(visitors)
ggseasonplot(visitors)
visitors_train <- subset(visitors,
end = length(visitors) - 24)
visitors_test <- subset(visitors,
start = length(visitors) - 23)
hw_mul_visitors_train <- hw(visitors_train,
h = 24,
seasonal = "multiplicative")
autoplot(hw_mul_visitors_train)
fc_ets_visitors_train <- forecast(ets(visitors_train), h = 24)
autoplot(fc_ets_visitors_train)
fc_ets_add_BoxCox_visitors_train <- forecast(
ets(visitors_train,
lambda = BoxCox.lambda(visitors_train),
additive.only = TRUE),
h = 24
)
autoplot(fc_ets_add_BoxCox_visitors_train)
fc_snaive_visitors_train <- snaive(visitors_train, h = 24)
autoplot(fc_snaive_visitors_train)
fc_BoxCox_stl_ets_visitors_train <- visitors_train %>%
stlm(
lambda = BoxCox.lambda(visitors_train),
s.window = 13,
robust = TRUE,
method = "ets"
) %>%
forecast(h = 24)
autoplot(fc_BoxCox_stl_ets_visitors_train)
accuracy(hw_mul_visitors_train, visitors_test)
accuracy(fc_ets_visitors_train, visitors_test)
accuracy(fc_ets_add_BoxCox_visitors_train, visitors_test)
accuracy(fc_snaive_visitors_train, visitors_test)
accuracy(fc_BoxCox_stl_ets_visitors_train, visitors_test)
checkresiduals(fc_snaive_visitors_train)
fets_add_BoxCox <- function(y, h) {
forecast(ets(
y,
lambda = BoxCox.lambda(y),
additive.only = TRUE
),
h = h)
}
fstlm <- function(y, h) {
forecast(stlm(
y,
lambda = BoxCox.lambda(y),
s.window = frequency(y) + 1,
robust = TRUE,
method = "ets"
),
h = h)
}
fets <- function(y, h) {
forecast(ets(y),
h = h)
}
sqrt(mean(tsCV(visitors, snaive, h = 1)^2, na.rm = TRUE))
sqrt(mean(tsCV(visitors, fets_add_BoxCox, h = 1)^2,
na.rm = TRUE))
sqrt(mean(tsCV(visitors, fstlm, h = 1)^2,
na.rm = TRUE))
sqrt(mean(tsCV(visitors, fets, h = 1)^2, na.rm = TRUE))
sqrt(mean(tsCV(visitors, hw, h = 1,
seasonal = "multiplicative")^2,
na.rm = TRUE))
|
f3b2b50cb47f8075243fafc87517721fc5ed1da5
|
42886f7b175ea5f5f7c40c5c9cf1ee8d91625598
|
/Lab1/Question3.R
|
72e2bd93af47d00f7db1d57fe38d2add95f5635d
|
[] |
no_license
|
janish-parikh/CS-605-Data-Analytics-in-R
|
7d8655ea4a08b2f696c89a832c63b659010ff91a
|
c17be6edf9a1da9dae80fefeb07c85e4a1021942
|
refs/heads/master
| 2022-12-19T11:45:54.412299
| 2020-09-05T03:25:42
| 2020-09-05T03:25:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 850
|
r
|
Question3.R
|
#Question-03
dataset1<-c(19, 24, 12, 19, 18, 24, 8, 5, 9, 20, 13, 11, 1, 12, 11, 10, 22, 21, 7, 16,
15, 15, 26, 16, 1, 13, 21, 21, 20, 19)
dataset2<-c(17, 24, 21, 22, 26, 22, 19, 21, 23, 11, 19, 14, 23, 25, 26, 15, 17, 26,
21, 18, 19, 21, 24, 18, 16, 20, 21, 20, 23, 33)
dataset3<-c(56, 52, 13, 34, 33, 18, 44, 41, 48, 75, 24, 19, 35, 27, 46, 62, 71, 24,
66, 94, 40, 18, 15, 39, 53, 23, 41, 78, 15, 35)
#function to plot histogram
plot_histogram<-function(x){
p<-ggplot(data=data.frame(data=x),aes(x=data))+
geom_histogram(aes(y=stat(ndensity)),bins=10)+
geom_density(aes(y=stat(scaled)),fill = "red",alpha = 0.1)
plot(p)
}
print(skew<-describe(dat1)) # Uses psych package
#function to calculate sample mean and median
sample_mean_median<-function(x){
data<-c("Mean"=mean(x),"Median"=median(x))
return(data)}
|
eb4c82b2cee2ef470332c3c38872d52bd3bdc9b2
|
0a79d856c32d7f63f4f5df88ff39ab22ebdfe26a
|
/Global Vaccines.R
|
73913d232f215031ad0618e7fa078306c3955c95
|
[] |
no_license
|
tiffanguyen/Preventable-Diseases
|
f4406e69c55496af8d70457b62032f0b3bd2554d
|
170d58832bf6732e67a0e62893052b32e8bb3089
|
refs/heads/master
| 2020-09-09T01:00:28.976347
| 2019-12-12T08:05:42
| 2019-12-12T08:05:42
| 221,296,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 873
|
r
|
Global Vaccines.R
|
setwd("~/Desktop/R Scripts/Preventable-Diseases")
vacs <- read.csv("Vacs.csv")
USA <- vacs[153,]
vacs1 <- vacs[-153,]
randomizer <- data.frame(vacs[sample(nrow(vacs1), 9), ])
newdf <- rbind(randomizer, USA)
library(ggplot2)
v <- ggplot(newdf, aes(x= Entity, y = Share.of.people.who.agree.vaccines.are.important.for.children.to.have....))
v + geom_bar(stat = "identity", fill="#336B87") + theme_minimal() +
coord_cartesian(ylim = c(0, 100)) +
geom_text(label= paste(round(newdf$Share.of.people.who.agree.vaccines.are.important.for.children.to.have...., digits = 0)), vjust=1.6, color="white", size=3.5) +
#geom_text(label = paste(newdf$Share.of.people.who.agree.vaccines.are.important.for.children.to.have....), size = 3, position = "above") +
labs(title ="Global Opinions, 2018", x = "Countries", y = "People Who Agree Vaccines are Important for Children (%)")
|
f5a899778274bd97f4498ab185ce4efea13702b8
|
d45af762e445c3ed93a96f2c52e5750114e72446
|
/lecture6.R
|
43f13fed9161e2ec850e56c2d781d3500264ae25
|
[] |
no_license
|
doublezz10/relearning_stats
|
fb6c7dbbf53d0bc71d5bcfef75bacd983e9d4f12
|
21b8ef76b97af5c2d039161647c4e25f1eafaf58
|
refs/heads/master
| 2023-01-07T05:17:21.209504
| 2020-11-09T22:05:07
| 2020-11-09T22:05:07
| 295,513,870
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 314
|
r
|
lecture6.R
|
library(brms)
library(rstanarm)
data(radon)
formula = log_radon ~ log_uranium + (1 + floor||county)
fit <- brm(formula,radon,family="gaussian")
fit2 <- brm(formula,radon,family="gaussian",prior = c(prior(normal(0,1),class=sd)))
get_prior(formula,radon,family="gaussian",prior = c(prior(normal(0,1),class=sd)))
|
092434135f533b94428a6fa90447e1ea3ae700f0
|
a1137535644d2ed3ebeb3b80f5e7dfa7f3458d89
|
/inst/templates/udaf.R
|
429d5830a466bc878eee45e2fffd8669bc1994df
|
[
"MIT"
] |
permissive
|
clarkfitzg/RHive
|
451800add848136b6ee684519c3d2a34973065ce
|
c41f12040de70ef5d2bca7a0361909c8c73afe2c
|
refs/heads/master
| 2021-08-23T11:44:22.183865
| 2017-12-04T19:21:25
| 2017-12-04T19:21:25
| 111,032,531
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,252
|
r
|
udaf.R
|
#!/usr/bin/env Rscript
# {{{gen_time}}}
# Automatically generated from R by RHive version {{{RHive_version}}}
# These values are specific to the analysis
verbose = {{{verbose}}}
rows_per_chunk = {{{rows_per_chunk}}}
cluster_by = {{{cluster_by}}}
sep = {{{sep}}}
input_cols = {{{input_cols}}}
input_classes = {{{input_classes}}}
try = {{{try}}}
f = {{{f}}}
# Other code that the user wanted to include, such as supporting functions
# or variables:
############################################################
{{{include_script}}}
# The remainder of the script is a generic template
############################################################
# Logging to stderr() writes to the Hadoop logs where we can find them.
msg = function(..., log = verbose)
{
if(log) writeLines(paste(...), stderr())
}
multiple_groups = function(queue, g = cluster_by) length(unique(queue[, g])) > 1
process_group = function(grp, outfile, .try = try)
{
msg("Processing group", grp[1, cluster_by])
if(.try) {try({
# TODO: log these failures
out = f(grp)
write.table(out, outfile, col.names = FALSE, row.names = FALSE, sep = sep)
})} else {
out = f(grp)
write.table(out, outfile, col.names = FALSE, row.names = FALSE, sep = sep)
}
}
msg("BEGIN R SCRIPT")
############################################################
stream_in = file("stdin")
open(stream_in)
stream_out = stdout()
# Initialize the queue
# TODO: parameterize Hive's na.strings
queue = read.table(stream_in, nrows = rows_per_chunk, colClasses = input_classes
, col.names = input_cols, na.strings = "\\N")
while(TRUE) {
while(multiple_groups(queue)) {
# Pop the first group out of the queue
nextgrp = queue[, cluster_by] == queue[1, cluster_by]
working = queue[nextgrp, ]
queue = queue[!nextgrp, ]
process_group(working, stream_out)
}
# Fill up the queue
nextqueue = read.table(stream_in, nrows = rows_per_chunk
, colClasses = input_classes, col.names = input_cols, na.strings = "\\N")
if(nrow(nextqueue) == 0) {
msg("Last group")
try(process_group(queue, stream_out))
break
}
queue = rbind(queue, nextqueue)
}
msg("END R SCRIPT")
|
d226846324bf28c8752c38fdb6573e8785f58151
|
a151b7c4b21a25884a97039166c1b5f0f6aaf159
|
/R/messy.R
|
405d8dd99b120293bb6009e22682ff1a148177b6
|
[
"MIT"
] |
permissive
|
andrewheiss/faux
|
f6e7d5c77685d6fdaf25e0d53d093004b1f64ef8
|
4ee6b200d7a9456e3bb3e051e2d222c229208052
|
refs/heads/master
| 2021-01-26T03:37:17.251097
| 2020-02-26T15:41:40
| 2020-02-26T15:41:40
| 243,293,051
| 0
| 0
|
MIT
| 2020-02-26T15:12:48
| 2020-02-26T15:12:48
| null |
UTF-8
|
R
| false
| false
| 959
|
r
|
messy.R
|
#' Simulate missing data
#'
#' Insert NA or another replacement value for some proportion of specified
#' columns to simulate missing data.
#'
#' @param data the tbl
#' @param prop the proportion of data to mess up
#' @param ... the columns to mess up (as a vector of column names or numbers)
#' @param replace the replacement value (defaults to NA)
#'
#' @return the messed up table
#' @export
#'
#' @examples
#' messy(iris, 0.1, "Species", replace = "NO SPECIES")
#' messy(iris, 0.5, 1:4)
messy <- function(data, prop = 0, ..., replace = NA) {
n <- nrow(data)
size <- floor(n*prop)
col <- c(...)
for (i in 1:length(col)) {
thecol <- col[i]
if (data[[thecol]] %>% is.factor()) { # add replace value to factor levels
new_levels <- data[[thecol]] %>% levels() %>% c(replace)
levels(data[[thecol]]) <- new_levels
}
to_replace <- sample.int(n, size)
data[[thecol]][to_replace] <- replace
}
data
}
|
e778f262155d99b157af0b38f47458ac7079d565
|
fe91d12f264a0a993142bc4567f095caff38284f
|
/visualization.R
|
abb174a7b13bec3e3f022228f4c64092a9a7b69c
|
[] |
no_license
|
mcandar/Agents
|
ca74655dd25fb59890d9c47263e6740116a40569
|
566d7d5d1f96b7d0ae24763868f250afae6f1371
|
refs/heads/master
| 2020-05-21T23:38:10.796313
| 2017-07-27T11:59:09
| 2017-07-27T11:59:09
| 61,151,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 911
|
r
|
visualization.R
|
RV <- sample(10,100,replace = TRUE) # form a matrix full of random integers
RM <- matrix(0,100,10) # declare and initialize a matrix to fill later, 100 rows, 10 columns
for (i in 1:10){
RM[,i] <- RV*i*i # fill each column with random integers but increase by square at each step
}
init <- 100 # starting point on y axis
RM2 <- matrix(0,100,10) # declare and initialize a matrix to fill later, 100 rows, 10 columns
for (n in 1:10){
RV <- RV*(n/5)
for (m in 1:100){
RM2[m,n] <- init # set the starting point
init <- init + RV[m] # randomly increase at each step
}
init <- 100 # reset at the end of the column
}
png("mygraph1.png",width = 1366,height = 768) # turn on the image saver and create the image
persp(RM,theta = -60) # draw the graph
dev.off() # save it and turn the device off
png("mygraph2.png",width = 1366,height = 768)
persp(RM2,theta = -60)
dev.off()
|
e59c4193c6fc07b2f0ef54d3bbb6942ae0d271f4
|
8222131f45630e4bd8ebe9e7ed4d1a4ddc6e8eb5
|
/plot3.R
|
13a227880cda453b52db4ba53f169add0c87c13a
|
[] |
no_license
|
EmilyMazo/ExData_Plotting1
|
4b5e23df67f0f7541d7b00262b9046b50f7fd1b9
|
805546310d4ee4ce1f44d2f3bc0b34d46768e729
|
refs/heads/master
| 2021-01-17T12:33:19.203519
| 2014-08-10T20:51:33
| 2014-08-10T20:51:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 367
|
r
|
plot3.R
|
png(file="plot3.png")
with(d, plot(Date, Sub_metering_1, col="black", ylab="Energy sub metering", type="l"))
with(d, points(Date, Sub_metering_2, col="red", type="l"))
with(d, points(Date, Sub_metering_3, col="blue", type="l"))
legend("topright", lty=c(1, 1, 1), col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
e4921e427afeb814abae4e7b68f7d87240aabb86
|
1dfb6ad8ba481a0842df7acbc181b5c3936158f2
|
/man/my.shortterm.Rd
|
21613accef3b51b4d0deeb937118dec80a712c04
|
[] |
no_license
|
vivienroussez/autoTS
|
ca1ac409fafec42e62a1cad21bcdd5e6c54e595c
|
b457c022154753b1b8eeb531c2d8db46ae06cd1c
|
refs/heads/master
| 2021-08-05T20:22:15.779862
| 2020-06-05T12:31:14
| 2020-06-05T12:31:14
| 183,512,754
| 11
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,244
|
rd
|
my.shortterm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algos.R
\name{my.shortterm}
\alias{my.shortterm}
\title{Fit short term algorithm and make the prediction}
\usage{
my.shortterm(prepedTS, n_pred, smooth_window = 2)
}
\arguments{
\item{prepedTS}{A list created by the \code{prepare.ts()} function}
\item{n_pred}{Int number of periods to forecast forward (eg n_pred = 12 will lead to one year of prediction for monthly time series).
Note that this algorithm cannot predict further than one year}
\item{smooth_window}{Int specifying the number of periods to consider for computing the evolution rate that will be applied for the forecast}
}
\value{
A dataframe with 4 columns : date, average prediction, upper and lower 95% confidence interval bounds
}
\description{
Fit short term algorithm and make the prediction
}
\details{
this algorithm uses data of the last year and makes the prediction
taking into account the seasonality and the evolution of the previous periods' evolution
}
\examples{
library(lubridate)
library(dplyr)
dates <- seq(as_date("2000-01-01"),as_date("2010-12-31"),"quarter")
values <- rnorm(length(dates))
my.ts <- prepare.ts(dates,values,"quarter",complete = 0)
my.shortterm(my.ts,n_pred=4)
}
|
e414174d0c1fb6c10ab5417bef5f423a232ae1c8
|
14c2f47364f72cec737aed9a6294d2e6954ecb3e
|
/man/cpmFilter.Rd
|
aa091293f1d2eeda7648212bff1a0f8059948533
|
[] |
no_license
|
bedapub/ribiosNGS
|
ae7bac0e30eb0662c511cfe791e6d10b167969b0
|
a6e1b12a91068f4774a125c539ea2d5ae04b6d7d
|
refs/heads/master
| 2023-08-31T08:22:17.503110
| 2023-08-29T15:26:02
| 2023-08-29T15:26:02
| 253,536,346
| 2
| 3
| null | 2022-04-11T09:36:23
| 2020-04-06T15:18:41
|
R
|
UTF-8
|
R
| false
| true
| 288
|
rd
|
cpmFilter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R
\name{cpmFilter}
\alias{cpmFilter}
\title{Filter by counts per million (cpm)}
\usage{
cpmFilter(object)
}
\arguments{
\item{object}{An object}
}
\description{
Filter by counts per million (cpm)
}
|
184d3f010d2575374c32e8c35e93ab85d504e5ff
|
c3cbb5800875d19adffa57e14ba96f631ecc0103
|
/app.R
|
b0ebba0de70cde6af117efe6947febc54c695841
|
[] |
no_license
|
Zhu-Daniel/RDatabasePractice
|
58b95906838e48c7ad8249693ffde4da5e24b3ee
|
1bf4347e6ea03b0359c04ddc2fb4a13d0cdadb33
|
refs/heads/master
| 2021-07-16T05:34:52.837230
| 2021-01-21T19:44:05
| 2021-01-21T19:44:05
| 232,418,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,556
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dbplyr)
library(dplyr)
library(ggplot2)
library(gridExtra)
mtg <- DBI::dbConnect(
drv = RSQLite::SQLite(),
dbname = "AllPrintings.sqlite")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("MTG Price Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30),
sliderInput("price",
"Price Range:",
min = 1,
max = 10000,
value = c(20,30))
),
# Show a plot of the generated distribution
mainPanel(
fluidRow(
splitLayout(cellWidths = c("50%","50%"),
plotOutput("distPlot"),
plotOutput("boxPlot"))
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
plotInput <- reactive({
list(cardprice = (mtg %>% dbReadTable("prices")),
dat = subset(cardprice, price<input$price[2] & price>input$price[1])
)
})
output$distPlot <- renderPlot({
# bigcardprice <- with(cardprice, price[price<input$price[2] & price>input$price[1]])
selectprice <- plotInput()$dat$price
# generate bins based on input$bins from ui.R
bins <- seq(min(selectprice), max(selectprice), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(selectprice, breaks = bins, col = 'cornflowerblue', border = 'white',
xlab = "Price of Magic: The Gathering Cards (US Dollars)",
main = "Histogram of MTG card prices")
# ggplot(plotInput$dat, aes(type, price, col=type)) + geom_boxplot() + scale_y_log10()
})
output$boxPlot <- renderPlot({
origplot <- ggplot(plotInput()$dat, aes(type, price, col=type)) + geom_boxplot() + scale_y_log10()
newplot <- origplot + theme(axis.text = element_text(size = 10))
print(newplot)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
f4279179390790decbc132b01489b219ee42f2ed
|
e1fe373fd6e8b404ef826069861e12efa88f0eac
|
/R_code/simulation code version 1/functions/welfare_statistic.R
|
e4fe1d26648fd510f5ead7933550e4cdb81acc62
|
[] |
no_license
|
Nathan-Mather/Heterogeneous-Teacher-Value-Added
|
e7331c07fdb45d3e1174078ac01e05b0937051ba
|
e5be646cfe957f74caf4a23511bc0c1626a41a61
|
refs/heads/master
| 2023-03-31T23:01:37.889489
| 2023-03-21T19:38:05
| 2023-03-21T19:38:05
| 244,950,847
| 0
| 0
| null | 2020-07-01T00:45:02
| 2020-03-04T16:35:05
|
TeX
|
UTF-8
|
R
| false
| false
| 16,049
|
r
|
welfare_statistic.R
|
# =========================================================================== #
# ===================== Calculate the Welfare Statistic ===================== #
# =========================================================================== #
# - Purpose of code:
# - Calculate the welfare statistic for each of our methods.
# ========================================================================= #
# ========================= Roxygen documentation ========================= #
# ========================================================================= #
#'@param in_data The data table used for the estimation.
#'@param output The results from the estimation.
#'@param type The type of estimation used, one of "true", "standard", bin",
#'"quant", or "semi".
#'@param npoints Number of grid points to use.
#'@param weight_type The weights to use, one of "equal", "rawlsian", "linear",
#'"v", or "mr".
#'@param in_test_1 Vector containing all pre test data.
#'@param pctile The percentile to be used for the specified weight.
#'@param weight_below The weight below the specified percentile for rawlsian.
#'@param weight_above The weight above the specified percentile for rawlsian.
#'@param v_alpha The alpha parameter for v weights.
#'@param mrpctile The percentile value for mr weights.
#'@param mrdist The spread for mr weights.
#'@param impact_type This specifies the type of impact function that teachers
#'will have for their students. Must by one of "MLRN" (Monotone, Linear,
#'Rank Similar, No Heterogeneity), "MLR" (Monotone, Linear, Rank Similar),
#'"ML" (Monotone, Linear, Not Rank Similar), "MNoR" (Monotone, Non-linear,
#'Rank Similar), "MNo" (Monotone, Non-linear, Not Rank Similar), or "No" (Not
#'Monotone, Non-linear, Not Rank Similar).
#'@param impact_function Which function from the specified type we want for
#'the true teacher impact.
#'@details
#'@examples
# ========================================================================= #
# ============================ debug parameters =========================== #
# ========================================================================= #
# in_dt = in_dt
# output = qtile_res # NOTE ONLY USE ONE OF THESE
# output = output # NOTE ONLY USE ONE OF THESE
# type = 'bin'
# npoints = npoints
# weight_type = weight_type
# in_test_1 = in_dt$test_1
# pctile = pctile
# weight_below = weight_above
# weight_above = weight_below
# v_alpha = v_alpha
# mrpctile = mrpctile
# mrdist = mrdist
# # # parms from MC run
# in_dt = r_dt
# type = 'true'
# npoints = p_npoints
# weight_type = p_weight_type
# in_test_1 = r_dt$test_1
# pctile = p_pctile
# weight_below = p_weight_below
# weight_above = p_weight_above
# v_alpha = p_v_alpha
# mrpctile = p_mrpctile
# mrdist = p_mrdist
# impact_type = p_impact_type
# impact_function = p_impact_function
# ========================================================================= #
# ============================ Define Function ============================ #
# ========================================================================= #
# Start of the function.
welfare_statistic <- function(in_dt = NULL,
output = NULL,
type = NULL,
npoints = 1000,
weight_type = NULL,
in_test_1 = NULL,
lin_alpha = NULL,
pctile = NULL,
weight_below = NULL,
weight_above = NULL,
v_alpha = NULL,
mrpctile = NULL,
mrdist = NULL,
impact_type = NULL,
impact_function = NULL){
#======================#
# ==== Get weights ====
#======================#
# if we want the true stat we need to use laten parms and nothing from inputed test data
if (type == 'true') {
# print out a message about the assumptions here
print("The Assumption here is the true student poplation is nomral(0,1)")
# Generate a grid over which we can get the true welfare added.
grid <- unlist(lapply(rnorm(n = npoints), rep,
times =length(unique(in_dt$teacher_id))))
# Attach teacher ids, start by getting unique teahers
welfare <- unique(in_dt[, c('teacher_id', 'teacher_ability',
'teacher_center', 'teacher_max')])
# replicate each teacher npoints number of times so each can get a grid
welfare <- do.call('rbind', replicate(npoints, welfare, simplify=FALSE))
welfare[, stud_ability_1 := grid]
# Get the weights for each place in the grid.
welfare[, weight := ww_general_fun(weight_type = weight_type,
in_test_1 = grid,
lin_alpha = lin_alpha,
quant_val_l = qnorm(.1),
quant_val_h = qnorm(.9),
pctile = NULL, # entering known value instead
pctile_val = qnorm(pctile),
weight_below = weight_below,
weight_above = weight_above,
v_alpha = v_alpha,
median_va = 0,
mrpctile = NULL, # entering known vlaue instead
mrpctile_val = qnorm(mrpctile),
mrdist = mrdist,
min_score = qnorm(max(mrpctile - mrdist, 0)),
max_score = qnorm(min(mrpctile + mrdist, 100)))]
# if not looking for the truth
}else{
# check that sampe is bigger than n points
if(length(in_test_1) < npoints ){
warning("full sample smaller than npoints, Setting npoints to full sample size")
grid <- in_test_1
}else{
# Generate a random sample of test data
grid <- sample(in_test_1, size = npoints)
}
# Attach teacher ids.
welfare <- unique(in_dt[, c('teacher_id', 'teacher_ability',
'teacher_center', 'teacher_max')])
n_teacher <- nrow(welfare)
welfare <- do.call('rbind', replicate(length(grid), welfare, simplify=FALSE))
# sort this by teacher so I can add on a replicated grid
setorder(welfare, teacher_id)
# add student test grid
welfare[, test_1 := rep(grid, times = n_teacher) ]
welfare[, weight := ww_general_fun(weight_type = weight_type,
in_test_1 = welfare$test_1,
lin_alpha = lin_alpha,
quant_val_l = quantile(welfare$test_1, probs = 0.1),
quant_val_h = quantile(welfare$test_1, probs = 0.9),
pctile = NULL,
weight_below = weight_below,
weight_above = weight_above,
v_alpha = v_alpha,
median_va = median(welfare$test_1),
mrpctile = mrpctile,
mrdist = mrdist,
min_score = quantile(welfare$test_1, max(pctile - mrdist, 0)),
max_score = quantile(welfare$test_1, min(pctile + mrdist, 100)),
pctile_val = quantile(welfare$test_1, pctile))]
}
# Renormalize the weights. so each teacher's weight sums to 1
welfare[, tot_weight := sum(weight), teacher_id]
welfare[, weight := weight/tot_weight]
welfare[, tot_weight := NULL]
# =============================================================== #
# ============ Calculate the welfare statistic ================= #
# ============================================================= #
# Calculate the appropriate welfare statistic.
if (type == 'true') {
# Get the teacher impact for the grid.
welfare[, true_impact := teacher_impact(teacher_ability = teacher_ability,
teacher_center = teacher_center,
teacher_max = teacher_max,
stud_ability_1 = NULL, # don't need this because I know true mean and SD
studmean = 0,
studsd = 1,
other_data = stud_ability_1,
type = impact_type,
func_num = impact_function)]
# Calculate and return the true welfare.
return( welfare[, list(true_welfare = sum(true_impact*weight)), by='teacher_id'])
} else if (type == 'bin') {
# Get the numeric range for each category.
output <- copy(as.data.table(output))
output[, range_low := as.numeric(sub('\\(', '', sapply(strsplit(category, ','), '[', 1)))]
output[, range_high := as.numeric(sub('\\]', '', sapply(strsplit(category, ','), '[', 2)))]
# Make the overall minimum very low and the overall maximum very high to capture all.
output[category != '', temp1 := min(range_low), by='teacher_id']
output[category != '', temp2 := max(range_high), by='teacher_id']
output[range_low == temp1, range_low := -100]
output[range_high == temp2, range_high := 100]
# make cateogry xwalk
cat_xwalk <- unique(output[, c("range_low", "range_high")])
cat_xwalk[, bin := .I]
# loop through bin xwalk and fill out welfare data bins
#note more code, but faster than the way we had it before
for(i in 1:nrow(cat_xwalk)){
low_i <- cat_xwalk[i, range_low]
high_i <- cat_xwalk[i, range_high]
bin_num_i <- cat_xwalk[i, bin]
welfare[test_1 > low_i &
test_1 <= high_i, bin := bin_num_i ]
}
# now merge on estimates
output <- merge(output,cat_xwalk, c("range_low", "range_high"))
output <- output[, c("teacher_id", "bin", "estimate")]
welfare <- merge(welfare, output, c("teacher_id", "bin"))
# Calculate and return the estimated welfare.
# welfare[, estimate := as.numeric(estimate)]
alt_welfare <- welfare[, .(alternative_welfare = sum(estimate*weight)), by='teacher_id']
return(alt_welfare[])
} else if (type == 'quant') {
in_test_1 <- in_test_1
in_coefs <- output
# current dependencies
# the variable name "tau" , "qtile_est", "se", "teacher_id"
# first we need to fill in the quantiles with actual values
tau_xwalk <- data.table(tau = unique(in_coefs$tau))
tau_xwalk[, tau_val := quantile(in_test_1,
probs = tau)]
# Get the weights for each place in the grid.
tau_xwalk[, weight := ww_general_fun(weight_type = weight_type,
in_test_1 = tau_val,
lin_alpha = lin_alpha,
quant_val_l = quantile(in_test_1, probs = 0.1),
quant_val_h = quantile(in_test_1, probs = 0.9),
pctile = NULL,
weight_below = weight_below,
weight_above = weight_above,
v_alpha = v_alpha,
median_va = median(in_test_1),
mrpctile = mrpctile,
mrdist = mrdist,
min_score = quantile(in_test_1, max(pctile - mrdist, 0)),
max_score = quantile(in_test_1, min(pctile + mrdist, 100)),
pctile_val = quantile(in_test_1, pctile))]
# adjust weights for student population using estiamted parameters
stud_mean <- mean(in_test_1)
stud_sd <- sd(in_test_1)
tau_xwalk[, weight := weight*dnorm(tau_val,
mean = stud_mean,
sd = stud_sd)]
# Renormalize the weights. so each teacher's weight sums to 1
tau_xwalk[, tot_weight := sum(weight),]
tau_xwalk[, weight := weight/tot_weight]
tau_xwalk[, tot_weight := NULL]
# now we merge those on
qtile_constants <- in_coefs[teacher_id == 'test_1']
in_coefs <- in_coefs[teacher_id != 'test_1', ]
w_coefs_dt <- merge(in_coefs, tau_xwalk, "tau")
# now standardize the estimates by quantile
w_coefs_dt[, qtile_est := (qtile_est - mean(qtile_est))/(sd(qtile_est)), tau]
w_coefs_dt[, qtile_est := mapply((function(x, y) y + qtile_constants[tau == x, qtile_est]), tau, qtile_est)]
# aggregate estimates
tot_weight <- tau_xwalk[, sum(weight)]
ww_qtile_va <- w_coefs_dt[, list(alternative_welfare = sum(qtile_est*weight/tot_weight)),
teacher_id]
# return the aggregate estimates
return(ww_qtile_va)
}else if (type == 'np_hack') {
# Take output as a j by npoints matrix of fitted values
if (length(output$points)==npoints) {
# add reshaped fitted values to data (should operate column by column to match weights)
welfare[, fit := as.matrix(output$results[, 1:npoints,],ncol(1))]
# Approximate integration over weights
ww_np_hack_va <- welfare[, list(alternative_welfare = sum(weight*(fit))), teacher_id]
# Grab unique values for each teacher
# Standardize to mean zero var one
#ww_np_hack_va[, alternative_welfare := (WA_temp-mean(WA_temp))/sd(WA_temp)]
# return the estimates
return(ww_np_hack_va)
} else {
stop("dimensions of fitted values and weights inconsistant")
}
} else if (type == 'semi') {
}
} # End function.
|
cb786467a353574725377691714d7c7215358a41
|
57f1e348f411854e949936e2e4af3848be1e98c8
|
/Analysis/MATdiversity.r
|
d46d3507f661d6727deb0bc8d8dc58d83ffabbec
|
[] |
no_license
|
selmants/bacteria_MAT
|
36bfd324e5ef88fe11095b746fd530625f7dc54e
|
bdb222ec8f625ee7a03f6d3a06c447f5d41a49c0
|
refs/heads/master
| 2021-01-22T09:09:27.996495
| 2017-06-16T21:39:10
| 2017-06-16T21:39:10
| 40,689,220
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,395
|
r
|
MATdiversity.r
|
#Paul Selmants
#March 31, 2015
##MAT bacterial diversity indices##
#load required packages
library(tidyr)
library(ggplot2)
library(RColorBrewer)
library(dplyr)
#read data into R
obs <- read.delim('observed_species.txt')
chao1 <- read.delim('chao1.txt')
pd <- read.delim('PD_whole_tree.txt')
#Use tidyr to convert OTU richness df from 'wide' to 'long' format
#Use dplyr to filter columns by 2200 sequencing depth and add MAT column
observed <- obs %>%
gather(sampleID, OTUs, s934.8:s1024.3) %>%
separate(sampleID, into = c('plot', 'rep'), sep = '\\.') %>%
arrange(plot, rep) %>%
mutate(MAT = c(rep(13.8, 880), rep(13.0, 660), rep(16.7, 880), rep(16.1, 880),
rep(18.2, 880), rep(17.3, 880),rep(16.1, 880), rep(15.5, 880), rep(15.1, 770))) %>%
filter(sequences.per.sample == 2200) %>%
group_by(plot, MAT, rep) %>%
summarise(iterations = length(OTUs),
OTU.rich = mean(OTUs))
#Use tidyr to convert chao1 estimated OTU richness df from 'wide' to 'long' format
#Use dplyr to filter columns by 2200 sequencing depth and add MAT column
chao1.rich <- chao1 %>%
gather(sampleID, chao1, s934.8:s1024.3) %>%
separate(sampleID, into = c('plot', 'rep'), sep = '\\.') %>%
arrange(plot, rep) %>%
mutate(MAT = c(rep(13.8, 880), rep(13.0, 660), rep(16.7, 880), rep(16.1, 880),
rep(18.2, 880), rep(17.3, 880),rep(16.1, 880), rep(15.5, 880), rep(15.1, 770))) %>%
filter(sequences.per.sample == 2200) %>%
group_by(plot, MAT, rep) %>%
summarise(iterations = length(chao1),
chao1.rich = mean(chao1))
#join observed and estimated (chao1) OTU richness estimates
richness <- full_join(observed, chao1.rich)
#Use tidyr to convert Faith's phylogenetic diversity df from 'wide' to 'long' format
#Use dplyr to filter columns by 2200 sequencing depth and add MAT column
phyl.div <- pd %>%
gather(sampleID, pd, s934.8:s1024.3) %>%
separate(sampleID, into = c('plot', 'rep'), sep = '\\.') %>%
arrange(plot, rep) %>%
mutate(MAT = c(rep(13.8, 880), rep(13.0, 660), rep(16.7, 880), rep(16.1, 880),
rep(18.2, 880), rep(17.3, 880),rep(16.1, 880), rep(15.5, 880), rep(15.1, 770))) %>%
filter(sequences.per.sample == 2200) %>%
group_by(plot, MAT, rep) %>%
summarise(iterations = length(pd),
pd = mean(pd))
#join phylogenetic diversity with richness estimates
diversity <- full_join(richness, phyl.div) %>%
select(-iterations) %>%
arrange(MAT, plot)
#calculate tcrit value for df = 7
t<- qt(0.975, df = 7)
#summarize diversity estimates (mean and 95% CI)
div.summary <- diversity %>%
group_by(plot, MAT) %>%
summarise(N = length(OTU.rich),
obs = mean(OTU.rich),
obs.ci = t*(sd(OTU.rich)/sqrt(N)),
chao1 = mean(chao1.rich),
chao1.ci = t*(sd(chao1.rich)/sqrt(N)),
FaithPD = mean(pd),
FaithPD.ci = t*(sd(pd)/sqrt(N)))
#Linear regression of Faith's PD as a function of MAT
pd.mod <- lm(FaithPD ~ MAT, data = div.summary)
#Linear regression of estimated OTU richness (Chao1) as a function of MAT
chao.mod <- lm(chao1 ~ MAT, data = div.summary)
#Linear regression of observed OTU richness as a function of MAT
obs.mod <- lm(obs ~ MAT, data = div.summary)
#custom x-axis label for figures
my.xlab = expression(paste('Mean annual temperature (', degree, 'C)'))
#use ggplot2 to make fig of pd vs. MAT
pd.fig <- ggplot(div.summary, aes(MAT, FaithPD)) +
theme_classic() +
geom_point(size = 2.8, colour = 'blue') +
geom_errorbar(aes(ymax = FaithPD + FaithPD.ci, ymin = FaithPD - FaithPD.ci),
width = 0.04, size = 0.2, colour = 'blue') +
scale_y_continuous(limits = c(0,80), breaks = c(0,20,40,60,80)) +
xlab(my.xlab) +
ylab("Phylogenetic diversity") +
theme(axis.title.y = element_text(vjust = 1.25)) #axis title further from y-axis
#save figure as .pdf file
ggsave('FaithPD.pdf', width = 4, height = 3.5)
obs.fig <- ggplot(div.summary, aes(MAT, obs)) +
theme_classic() +
geom_point(size = 2.8, colour = 'blue') +
geom_errorbar(aes(ymax = obs + obs.ci, ymin = obs - obs.ci),
width = 0.04, size = 0.2, colour = 'blue') +
scale_y_continuous(limits = c(0, 1000), breaks = c(0,250,500,750,1000)) +
xlab(my.xlab) +
ylab('Observed OTU richness') +
theme(axis.title.y = element_text(vjust = 0.7))
#save figure as .pdf file
ggsave('ObsRich.pdf', width = 4, height = 3.5)
#save FaithPD and obsrich as two-panel figure in TIFF format
library(gridExtra)
tiff('PD_rich.tiff', width = 2500, height = 1050, res = 400)
grid.arrange(obs.fig, pd.fig, ncol = 2)
dev.off()
|
f0bc8f2ada31b77118e5b00f1a5191c49c316192
|
cb0a99127d3d2707700206b2a1e0c4cdd8cf871c
|
/PaternalTransmission/PaternalTransmission.R
|
81893d5ee70b257159e23b5c33c1a42aaa24dc0f
|
[] |
no_license
|
ijwilson/ijwilson.github.io
|
fa2544cd6c32dd75e27c46ecab6f9b0f9af209dc
|
650353cdf746e06edd12978140e44753de094c4a
|
refs/heads/master
| 2021-07-08T21:43:37.383006
| 2021-05-14T10:33:12
| 2021-05-14T10:33:12
| 79,342,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,850
|
r
|
PaternalTransmission.R
|
sperm_count <-
c(80.295, 77.191, 169.566, 19.369, 136.779, 12.938, 12.514, 44.975,
117.89, 22.329, 22.113, 146.105, 79.137, 95.176, 36.811, 61.604,
89.553, 88.86, 35.404, 99.811, 43.441, 77.926, 62.545, 41.976,
102.642, 151.866, 20.169, 25.997, 17.705, 24.782, 37.28, 89.019,
55.345, 241.77, 198.505, 21.919, 37.32, 60.079, 82.141, 93.074,
104.943, 162.588, 118.786)
oocyte_count <-
c(1249200L, 1393650L, 1031100L)
## Hypothesis Test
##
## This function tests the hypothesis that the frequency of the
## paternal haplotype in the child is at the same relative frequency
## as the relative numbers of mtDNA copies transmitted in the sperm
## and oocyte, against an alternative hypothesis that the frequency
## is lower.
##
## bootHT returns the p-value.
bootHT <- function(mother,child,nm,nc,n_boot=100000) {
mo <- mean(oocyte_count)
sdo <- sd(oocyte_count)
s.cn <- replicate(n_boot,mean(sample(sperm_count,43,replace=TRUE)))
o.cn <- replicate(n_boot,mean(rnorm(3,mo,sdo)))
ratio <- s.cn/o.cn
noise.m <- rpois(n_boot,nm*mother/nm)
noise.c <- rpois(n_boot,nc*mother/nm)
child.fromfather <- rbinom(n_boot,nc,ratio)
d <- (child.fromfather+noise.c)/nc - noise.m/nm
obs <- child/nc - mother/nm
return(rank(c(obs,d))[1]/n_boot)
}
PaternalTransmission0 <- function(mother,child,nm,nc,n_boot=100000) {
noise.m <- rpois(n_boot,nm*mother/nm)
noise.c <- rpois(n_boot,nc*mother/nm)
d <- noise.c/nc - noise.m/nm
obs <- child/nc - mother/nm
return(1- rank(c(obs,d))[1]/n_boot) ## upper tail needed
}
## tests done in manuscript.
if (FALSE) { ## do not run
## Extract data for A1
trioHaplogroups <- read.csv("TrioHaplogroups.csv")
A1 <- trioHaplogroups[trioHaplogroups$trio==1 & trioHaplogroups$motif=="A",]
father <- A1$haplotype[A1$individual=="father"]
bootHT(11,6,
nm= sum(A1$count[A1$individual=="mother"]),
nc=sum(A1$count[A1$individual=="child"]),1E6)
PaternalTransmission0(11,6,
nm= sum(A1$count[A1$individual=="mother"]),
nc=sum(A1$count[A1$individual=="child"]),1E6)
#B2
B2 <- trioHaplogroups[trioHaplogroups$trio==2 & trioHaplogroups$motif=="B",]
bootHT(25,16,
nm= sum(B2$count[B2$individual=="mother"]),
nc=sum(B2$count[B2$individual=="child"]),1E6)
PaternalTransmission0(25,16,
nm= sum(B2$count[B2$individual=="mother"]),
nc=sum(B2$count[B2$individual=="child"]),1E6)
#C3
C3 <- trioHaplogroups[trioHaplogroups$trio==3 & trioHaplogroups$motif=="C",]
bootHT(5,5,
nm= sum(C3$count[C3$individual=="mother"]),
nc=sum(C3$count[C3$individual=="child"]),1E6)
PaternalTransmission0(5,5,
nm= sum(C3$count[C3$individual=="mother"]),
nc=sum(C3$count[C3$individual=="child"]),1E6)
#D4
D4 <- trioHaplogroups[trioHaplogroups$trio==4 & trioHaplogroups$motif=="D",]
bootHT(2,4,
nm= sum(D4$count[D4$individual=="mother"]),
nc=sum(D4$count[D4$individual=="child"]),1E6)
PaternalTransmission0(2,4,
nm= sum(D4$count[D4$individual=="mother"]),
nc=sum(D4$count[D4$individual=="child"]),1E6)
}
powerFunc <- function(coverage, het, freq, reps = 1e+05, p = 0.05, show_plot = FALSE) {
if (require(VGAM) == FALSE) {
stop("This function requires the R package VGAM.\n"
, "Install it using \n>install.packages(\"VGAM\")")
}
## need to find the probability that the different between a poisson
## with a mean coverage*(het+freq) and a poisson with mean coverage*het
## is in the bottom 5% of the difference between two poissons.
## do this by simulation We simulate two sets of Poisson differences.
## For a one-tailed test the critical value is the lower tail so we get
## the lower tail of H_0 and see what proportion of
d_h0 <- rskellam(reps, coverage * (het + freq), coverage * het) ## null
d_h1 <- rskellam(reps, coverage * het, coverage * het) ## alternative
critical_value <- quantile(d_h0, probs = c(p)) ## the critical value
if (show_plot) {
plot(density(d_h0), xlim = range(density(c(d_h0, d_h1))$x))
lines(density(d_h1), col = "blue")
lines(c(critical_value, critical_value), c(0, max(density(d_h0)$y/3)),
col = "red")
}
sum(d_h1 <= critical_value)/reps
}
##---------------------------------------------------------------------------------------------
powerFuncB <- function(coverage, het, freq, reps = 1e+05, p = 0.05, show_plot = FALSE) {
if (require(VGAM) == FALSE) {
stop("This function requires the R package VGAM.\n"
, "Install it using \n>install.packages(\"VGAM\")")
}
## need to find the probability that the different between a poisson
## with a mean coverage*(het+freq) and a poisson with mean coverage*het
## is in the bottom 5% of the difference between two poissons.
##
d_h0 <- rskellam(reps, coverage * het, coverage * het) ## null distribution
d_h1 <- rskellam(reps, coverage * (het + freq), coverage * het) ## H_1
critical_value <- quantile(d_h0, probs = c(1 - p)) ## the critical value
if (show_plot) {
plot(density(d_h0), xlim = range(density(c(d_h0, d_h1))$x))
lines(density(d_h1), col = "blue")
lines(c(critical_value, critical_value), c(0, max(density(d_h0)$y/3)),
col = "red")
}
sum(d_h1 > critical_value)/reps
}
if (FALSE) {
# example code
myCoverage <- c(10000, 20000, 50000, 1e+05, 2e+05, 5e+05, 1e+06)
pow <- sapply(myCoverage, powerFunc, het = 0.005, freq = 0.005)
# note that this is equivalent but longer winded
pow <- numeric(length(myCoverage))
for (i in 1:length(myCoverage)) {
pow[i] <- powerFunc(myCoverage[i], het = 0.001, freq = 1e-04)
}
plot(myCoverage, pow, log = "x", ylim = c(0, 1))
powB <- sapply(myCoverage, powerFuncB, het = 0.005, freq = 0.005)
points(myCoverage, powB, col = "red")
}
|
373625f16fa77052104397ef3119f9a18ca0f859
|
e6a89fb6ae0056bfdc0400219be700fa4c2d419f
|
/man/Rbyte.Rd
|
60033c965df95f1fdab75eca73af0a3591449b93
|
[
"MIT"
] |
permissive
|
ramiromagno/c3po
|
b88c763c7af43897c10867799394dc2a31aea826
|
8f31de2734bf856ced4ce64d19655c01e218598c
|
refs/heads/master
| 2023-02-08T22:23:21.600000
| 2021-01-04T02:32:42
| 2021-01-04T02:32:42
| 259,738,174
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 419
|
rd
|
Rbyte.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rbyte.R
\name{Rbyte}
\alias{Rbyte}
\title{Rbyte}
\description{
\Sexpr[results=rd, stage=render]{c3po:::badge('typedef-dtype')}
Rbyte is an alias to an \code{unsigned char}.
}
\section{Declaration}{
\preformatted{typedef unsigned char Rbyte;
}
In \href{https://github.com/wch/r-source/blob/trunk/src/include/Rinternals.h}{Rinternals.h}.
}
|
3fbc210ca07c5e1e311abdb5c97f9cb28dfb1bdb
|
a1d75e1fb878f2fa43218e78b1361b4f1e125e2d
|
/Coral-Species-Cluster-PCA.R
|
a0a71209d8eb2ce8c5a1ed0d902534a3ee281ec1
|
[] |
no_license
|
jesslynne73/Machine-Learning
|
4075f5ade1887c12d49936071107d66734309ef6
|
ed7a6bd487967c73a653bd2d0cea6fd96725b745
|
refs/heads/main
| 2023-07-09T02:55:52.610753
| 2021-08-05T21:08:07
| 2021-08-05T21:08:07
| 329,099,558
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,813
|
r
|
Coral-Species-Cluster-PCA.R
|
# Coral Species Unsupervised Learning
# Author: Jess Strait
# Clear environment and load packages
rm(list = ls())
library(data.table)
library(Rtsne)
library(ggplot2)
library(caret)
library(ClusterR)
library(cluster)
library(mlr)
# Intake data
data <- fread("speciesdata.csv")
# Save IDs
id_vector <- data$id
data$id <- NULL
# Convert values to factors for dummies
data$locus_1 <- as.factor(data$locus_1)
data$locus_2 <- as.factor(data$locus_2)
data$locus_3 <- as.factor(data$locus_3)
data$locus_4 <- as.factor(data$locus_4)
data$locus_5 <- as.factor(data$locus_5)
data$locus_6 <- as.factor(data$locus_6)
data$locus_7 <- as.factor(data$locus_7)
data$locus_8 <- as.factor(data$locus_8)
data$locus_9 <- as.factor(data$locus_9)
data$locus_10 <- as.factor(data$locus_10)
data$locus_11 <- as.factor(data$locus_11)
data$locus_12 <- as.factor(data$locus_12)
data$locus_13 <- as.factor(data$locus_13)
data$locus_14 <- as.factor(data$locus_14)
data$locus_15 <- as.factor(data$locus_15)
# create dummies variables
dummies <- dummyVars(~ ., data = data)
numdummies <- predict(dummies, newdata = data)
# Run a principal component analysis
pca <- prcomp(numdummies)
screeplot(pca)
summary(pca)
biplot(pca)
# Save principal component coordinates
pca_dt <- data.table(pca$x)
# Kmeans clustering with PC's - best performance is with all PC's
kmean_sol <- kmeans(pca_dt[,], centers = 3, nstart = 25)
pca_dt$kmeanPred <- kmean_sol$cluster
# Save kmeans model
saveRDS(kmean_sol, "kmeans.model")
# Add back the ID values for Phase 1 submission
#submission <- data.table(pca_dt$kmeanPred)
#submission$id <- id_vector
# Generate submission file for Phase 1 submission
#submission$species1 <- 0.33
#submission$species1[grep('1', submission$V1)] <- 0.66
#submission$species2 <- 0.33
#submission$species2[grep('2', submission$V1)] <- 0.66
#submission$species3 <- 0.33
#submission$species3[grep('3', submission$V1)] <- 0.66
#submission$V1 <- NULL
# Phase 2 code start
# Remove kmeans information
pca_dt$kmeanPred <- NULL
# Run tSNE baseline as shown in class
set.seed(3)
perplexityvalue <- 10
tsne <- Rtsne(pca_dt, pca = F, perplexityvalue=perplexityvalue, check_duplicates = F)
# Obtain tSNE coordinates and observe clustering
tsne_dt <- data.table(tsne$Y)
ggplot(tsne_dt, aes(x=V1, y=V2)) + geom_point() + labs(title = paste("perplexity = ", perplexityvalue))
# Test other perplexity values
perplexityvalue <- 30
tsne <- Rtsne(pca_dt, pca = F, perplexityvalue=perplexityvalue, check_duplicates = F)
tsne_dt <- data.table(tsne$Y)
ggplot(tsne_dt, aes(x=V1, y=V2)) + geom_point() + labs(title = paste("perplexity = ", perplexityvalue))
perplexityvalue <- 50
tsne <- Rtsne(pca_dt, pca = F, perplexityvalue=perplexityvalue, check_duplicates = F)
tsne_dt <- data.table(tsne$Y)
ggplot(tsne_dt, aes(x=V1, y=V2)) + geom_point() + labs(title = paste("perplexity = ", perplexityvalue))
perplexityvalue <- 20
tsne <- Rtsne(pca_dt, pca = F, perplexityvalue=perplexityvalue, check_duplicates = F)
tsne_dt <- data.table(tsne$Y)
ggplot(tsne_dt, aes(x=V1, y=V2)) + geom_point() + labs(title = paste("perplexity = ", perplexityvalue))
perplexityvalue <- 15
tsne <- Rtsne(pca_dt, pca = F, perplexityvalue=perplexityvalue, check_duplicates = F)
tsne_dt <- data.table(tsne$Y)
ggplot(tsne_dt, aes(x=V1, y=V2)) + geom_point() + labs(title = paste("perplexity = ", perplexityvalue))
# 30 was an improvement from 10. 50 was not an improvement from 30. 20 was an improvement from 30. 15 was worse than 20. Proceed with 20.
# 1000 iterations did not beat the benchmark. Try more iterations.
perplexityvalue <- 20
tsne <- Rtsne(pca_dt, pca = F, perplexityvalue=perplexityvalue, check_duplicates = F, max_iter = 10000)
tsne_dt <- data.table(tsne$Y)
ggplot(tsne_dt, aes(x=V1, y=V2)) + geom_point() + labs(title = paste("perplexity = ", perplexityvalue))
# Okay, so more iterations made the model worse. Try less iteration (last submission).
perplexityvalue <- 20
tsne <- Rtsne(pca_dt, pca = F, perplexityvalue=perplexityvalue, check_duplicates = F, max_iter = 300)
tsne_dt <- data.table(tsne$Y)
ggplot(tsne_dt, aes(x=V1, y=V2)) + geom_point() + labs(title = paste("perplexity = ", perplexityvalue))
# We know from the competition that the optimal number of clusters is k=3
gmm_data <- GMM(tsne_dt[,.(V1,V2)], 3)
# Convert log-likelihood into probability as shown in lecture (remember that likelihood and probability are different)
l_clust <- gmm_data$Log_likelihood^10
l_clust <- data.table(l_clust)
net_lh <- apply(l_clust,1,FUN=function(x){sum(1/x)})
cluster_prob <- 1/l_clust/net_lh
# Observe cluster 1 probabilities
tsne_dt$Cluster_1_prob <- cluster_prob$V1
ggplot(tsne_dt,aes(x=V1,y=V2,col=Cluster_1_prob)) + geom_point()
# Observe cluster 2 probabilities
tsne_dt$Cluster_2_prob <- cluster_prob$V2
ggplot(tsne_dt,aes(x=V1,y=V2,col=Cluster_2_prob)) + geom_point()
# Observe cluster 3 probabilities
tsne_dt$Cluster_3_prob <- cluster_prob$V3
ggplot(tsne_dt,aes(x=V1,y=V2,col=Cluster_3_prob)) + geom_point()
# Assign labels
tsne_dt$gmm_labels <- max.col(cluster_prob, ties.method = "random")
ggplot(tsne_dt,aes(x=V1,y=V2,col=gmm_labels)) + geom_point()
# Create submission file
tsne_dt$id <- id_vector
submissionfinal <- tsne_dt
submissionfinal$V1 <- NULL
submissionfinal$V2 <- NULL
submissionfinal$species1 <- submissionfinal$Cluster_3_prob
submissionfinal$species2 <- submissionfinal$Cluster_2_prob
submissionfinal$species3 <- submissionfinal$Cluster_1_prob
submissionfinal$Cluster_3_prob <- NULL
submissionfinal$Cluster_2_prob <- NULL
submissionfinal$Cluster_1_prob <- NULL
submissionfinal$gmm_labels <- NULL
fwrite(submissionfinal, "tsne_submission_20_less_iters.csv")
|
7b8c0b38928fb6a5d48745403fbf1d4f364a9a5a
|
dd90b0c9d116be4983a732bbcd46bb11c15101e3
|
/R/hello.R
|
e71de7d5e9190505b7a681de9f6c83cc142cdab9
|
[] |
no_license
|
gaurav6351/R-Testing
|
ad34f086495779ed98dc5d258f73903d743af857
|
ab8309ea7d2decbb0e199ab2e5db36a5cc7d0e91
|
refs/heads/master
| 2020-05-25T12:48:52.042791
| 2017-03-01T17:48:35
| 2017-03-01T17:48:35
| 83,583,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,732
|
r
|
hello.R
|
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
hello <- function() {
print("Hello, world!")
}
vec <- c(1,2,3)
vec
mat<-matrix(vec,3,3)
mat
arr <- array(c(1,2),dim=c(3,3,2))
arr
BMI <- data.frame(
gender = c("Male", "Male","Female"),
height = c(152, 171.5, 165),
weight = c(81,93, 78),
Age = c(42,38,26)
)
print(BMI)
foo<- list(vec,mat)
foo
apple <- c('green','red','yelllow','red')
factor_apple <- factor(apple)
factor_apple
# Create a function to print squares of numbers in sequence.
new.function <- function(a) {
for(i in 1:a) {
b <- i^2
print(b)
}
}
new.function(6)
# Create vector objects.
city <- c("Tampa","Seattle","Hartford","Denver")
state <- c("FL","WA","CT","CO")
zipcode <- c(33602,98104,06161,80294)
# Combine above three vectors into one data frame.
addresses <- rbind(city,state,zipcode)
addresses
print(class(addresses))
data <- read.csv("/home/gaurav/work/titanic/test.csv")
print(data)
print(class(data))
retval <- subset( data, Sex == "male" & Pclass == 3)
print(retval)
x <- c(21, 62, 10, 53)
labels <- c("London", "New York", "Singapore", "Mumbai")
png(file = "/home/gaurav/work/titanic/city.jpg")
pie(x, labels, main = "City pie chart", col = rainbow(length(x)))
slices <- c(10, 12, 4, 16, 8)
lbls <- c("US", "UK", "india", "Germany", "France")
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct) # add percents to labels
lbls
lbls <- paste(lbls,"%",sep="") # ad % to labels
pie(slices,labels = lbls, col=rainbow(length(lbls)),
main="Pie Chart of Countries")
library(plotrix)
slices <- c(10, 12, 4, 16, 8)
lbls <- c("US", "UK", "Australia", "Germany", "France")
pie3D(slices,labels=lbls,explode=0.1,
main="Pie Chart of Countries ")
H <- c(7,12,28,3,41)
M <- c("Mar","Apr","May","Jun","Jul")
barplot(H,names.arg = M,xlab = "Month",ylab = "Revenue",col = "blue",
main = "Revenue chart",border = "red")
colors <- c("green","orange","brown")
months <- c("Mar","Apr","May","Jun","Jul")
regions <- c("East","West","North")
# Create the matrix of the values.
Values <- matrix(c(2,9,3,11,9,4,8,7,3,12,5,2,8,10,11),nrow = 3,ncol = 5,byrow = TRUE)
# Create the bar chart.
barplot(Values,main = "total revenue",names.arg = months,xlab = "month",ylab = "revenue",
col = colors)
# Add the legend to the chart.
legend("topleft", regions, cex = 1.3, fill = colors)
input <- mtcars[,c('mpg','cyl')]
print(head(input))
boxplot(mpg ~ cyl, data = mtcars, xlab = "Number of Cylinders",
ylab = "Miles Per Gallon", main = "Mileage Data")
# Create the data for the chart.
v <- c(7,12,28,3,41)
# Plot the bar chart.
plot(v,type = "o", col = "red", xlab = "Month", ylab = "Rain fall",
main = "Rain fall chart")
pairs(~wt+mpg+disp+cyl,data = mtcars,
main = "Scatterplot Matrix")
x <- c(12,7,3,4.2,18,2,54,-21,8,-5)
z <-mean(x,trim = 0.3,na.rm = TRUE)
z
y <- median(x,na.rm = TRUE)
y
x <- c(151, 174, 138, 186, 128, 136, 179, 163, 152, 131)
y <- c(63, 81, 56, 91, 47, 57, 76, 72, 62, 48)
# Apply the lm() function.
relation <- lm(y~x)
print(summary(relation))
# Find weight of a person with height 170.
a <- data.frame(x = 170)
result <- predict(relation,a)
print(result)
# Plot the chart.
plot(y,x,col = "blue",main = "Height & Weight Regression",
abline(lm(x~y)),cex = 1.3,pch = 16,xlab = "Weight in Kg",ylab = "Height in cm")
input <- mtcars[,c("mpg","disp","hp","wt")]
# Create the relationship model.
model <- lm(mpg~disp+hp+wt, data = input)
# Show the model.
print(model)
a <- coef(model)[1]
print(a)
Xdisp <- coef(model)[2]
Xhp <- coef(model)[3]
Xwt <- coef(model)[4]
print(Xdisp)
print(Xhp)
print(Xwt)
input <- mtcars[,c("am","cyl","hp","wt")]
am.data = glm(formula = am ~ cyl + hp + wt, data = input, family = binomial)
print(summary(am.data))
library(party)
# Create the input data frame.
input.dat <- readingSkills[c(1:105),]
# Create the tree.
out <- ctree(
nativeSpeaker ~ age + shoeSize + score,
data = input.dat)
# Plot the tree.
plot(out)
library(party)
library(randomForest)
# Create the forest.
output.forest <- randomForest(nativeSpeaker ~ age + shoeSize + score,
data = readingSkills)
# View the forest results.
print(output.forest)
print(importance(fit,type = 2))
diamonds
View(iris)
library(dplyr)
library(EDAWR)
library(nycflights13)
library(tidyr)
select(storms)
|
1d4acaed1afdb59a0554553c2767c87bbac37b29
|
570d4141186786df5179cc4346dd3808c1c41f26
|
/plots/pres/2018-08-20/protcod.R
|
5de88dfbccb575d8f47d7089e1becfc3787a16d4
|
[
"MIT"
] |
permissive
|
ArtemSokolov/amp-ad
|
552fee92c0ec30539386745210f5ed2292931144
|
dd5038f2497698b56a09471c89bb710329d3ef42
|
refs/heads/master
| 2021-06-21T21:04:44.368314
| 2019-09-10T17:40:48
| 2019-09-10T17:40:48
| 114,150,614
| 0
| 4
|
MIT
| 2019-09-10T17:40:49
| 2017-12-13T17:39:02
|
HTML
|
UTF-8
|
R
| false
| false
| 1,656
|
r
|
protcod.R
|
## Plots showing the effect of reducing to protein-coding regions
##
## by Artem Sokolov
source( "api.R" )
synapseLogin()
## Identifies the IDs of all relevant
idsBK <- function()
{
structure(list(Dataset = c("ROSMAPpc", "ROSMAPpc", "ROSMAPpc", "ROSMAP", "ROSMAP", "ROSMAP"),
Task = c("AB", "AC", "BC", "AB", "AC", "BC"),
BKid = c("syn15589822", "syn15589816", "syn15589810",
"syn15661345", "syn15661346", "syn15661344")),
class = c("tbl_df", "tbl", "data.frame"),
row.names = c(NA, -6L), .Names = c("Dataset", "Task", "BKid"))
}
mainPlot <- function()
{
## Load all the relevant entities
X <- idsBK() %>% mutate( AUCs = map(BKid, ~read_csv(syn(.x), col_types=cols())) )
## Reshape everything into a single data frame
XX <- X %>% mutate( AUCs = map( AUCs, gather, Size, AUC ) ) %>% unnest %>%
mutate_at( "Size", as.integer ) %>% select( -BKid )
## Tweak the names by hand
RR <- XX %>% mutate( `Gene Set` = c("ROSMAP" = "28.4k", "ROSMAPpc" = "ProtCod")[Dataset] )
## Compute summary distributions at key set sizes
SS <- RR %>% filter( Size %in% c( 100, 300, 500, 700, 900 ) )
## Plot the results
gg <- ggplot( RR, aes( x=Size, y=AUC, color=`Gene Set`) ) + theme_bw() +
geom_boxplot( aes(group=interaction(Size, `Gene Set`)), data=SS ) +
geom_smooth(se = FALSE) + facet_wrap( ~Task ) + bold_theme() +
scale_color_manual( values=c("28.4k"="tomato", "ProtCod"="steelblue") ) +
theme( legend.position="bottom" )
ggsave( "plots/protcod.png", gg, width=9, height=4 )
}
|
4eff3ee8450f139a96a5d97bebf2786fe265561e
|
794863d2e9e26424a04079a91c3a23063bdb4f8e
|
/man/ElasticNetVAR.Rd
|
93b7006e36bb5ed3a69a7accad539c83ccc7d81b
|
[] |
no_license
|
GabauerDavid/ConnectednessApproach
|
ef768e64e0bc458ad180bac6b667b3fe5662f01d
|
0ca4799a2f5aa68fdd2c4a3e8a2e0e687d0a9b17
|
refs/heads/main
| 2023-08-09T07:23:45.002713
| 2023-07-27T22:57:04
| 2023-07-27T22:57:04
| 474,462,772
| 47
| 20
| null | 2023-03-12T04:22:26
| 2022-03-26T20:47:15
|
R
|
UTF-8
|
R
| false
| true
| 1,882
|
rd
|
ElasticNetVAR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ElasticNetVAR.R
\name{ElasticNetVAR}
\alias{ElasticNetVAR}
\title{Elastic Net vector autoregression}
\usage{
ElasticNetVAR(
x,
configuration = list(nlag = 1, nfolds = 10, loss = "mae", alpha = NULL, n_alpha = 10)
)
}
\arguments{
\item{x}{zoo data matrix}
\item{configuration}{Model configuration}
\item{nlag}{Lag length}
\item{nfolds}{N-fold cross validation}
\item{loss}{Loss function}
\item{alpha}{LASSO is alpha equal 1 and Ridge if alpha equal 0}
\item{n_alpha}{Creates n-equidistant alpha values}
}
\value{
Estimate VAR model
}
\description{
Estimation of a VAR using equation-by-equation LASSO, Ridge or Elastic Net regressions.
}
\examples{
\donttest{
data(dy2012)
fit = ElasticNetVAR(dy2012, configuration=list(nlag=1, alpha=1, nfolds=10, loss="mae"))
}
}
\references{
Tibshirani, R., Bien, J., Friedman, J., Hastie, T., Simon, N., Taylor, J., & Tibshirani, R. J. (2012). Strong rules for discarding predictors in lasso‐type problems. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 74(2), 245-266.
Hoerl, A. E., & Kennard, R. W. (1970). Ridge regression: Biased estimation for nonorthogonal problems. Technometrics, 12(1), 55-67.
Zou, H., & Hastie, T. (2005). Regularization and variable selection via the elastic net. Journal of the royal statistical society: series B (statistical methodology), 67(2), 301-320.
Demirer, M., Diebold, F. X., Liu, L., & Yilmaz, K. (2018). Estimating global bank network connectedness. Journal of Applied Econometrics, 33(1), 1-15.
Gabauer, D., Gupta, R., Marfatia, H., & Miller, S. M. (2020). Estimating US Housing Price Network Connectedness: Evidence from Dynamic Elastic Net, Lasso, and Ridge Vector Autoregressive Models. Lasso, and Ridge Vector Autoregressive Models (July 26, 2020).
}
\author{
David Gabauer
}
|
1a3b6698e6ce16938e9e11a90e92c5fa767e5546
|
9fba17b8a66b625e5bb609e78e65a631706d43bc
|
/testing-section/Classification-testing/part9-knn/knn.R
|
aff547397608402f84c9334d34e1eca756ba7abf
|
[] |
no_license
|
irekizea/machine-learning-learning
|
70df74e25b19be82eae5f3f5c0ab5337ba698bd3
|
0ac486b562df445f3dc5bc0bfe12b4dce4db199e
|
refs/heads/master
| 2020-12-18T10:16:23.076138
| 2020-01-21T07:43:00
| 2020-01-21T07:43:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,662
|
r
|
knn.R
|
# Get the dataset
dataset <- read.csv('/home/felipe/Documentos/Machine Learning A-Z/Part 3 - Classification/Section 14 - Logistic Regression/Social_Network_Ads.csv')
dataset <- dataset[3:ncol(dataset)]
# Feature scaling
dataset[, 1:(ncol(dataset) - 1)] <- scale(apply(dataset[, 1:(ncol(dataset) - 1)], 2, as.numeric))
# Split the dataset into test set and train set
library(caTools)
datasplit <- sample.split(dataset$Purchased, SplitRatio = 0.75)
train_set <- subset(dataset, datasplit)
test_set <- subset(dataset, !datasplit)
# Visualize the dataset
library(ggplot2)
ggplot() +
geom_point(aes(
x = subset(train_set, train_set$Purchased == TRUE)$Age,
y = subset(train_set, train_set$Purchased == TRUE)$EstimatedSalary),
color = 'blue') +
geom_point(aes(
x = subset(train_set, train_set$Purchased == FALSE)$Age,
y = subset(train_set, train_set$Purchased == FALSE)$EstimatedSalary),
color = 'red') +
xlab('Age') +
ylab('Estimated Salary') +
ggtitle('Customers Scatter Plot') +
xlim(min(train_set$Age) * 1.25, max(train_set$Age) * 1.25) +
ylim(min(train_set$EstimatedSalary) * 1.25, max(train_set$EstimatedSalary) * 1.25)
# K-NN is a lazy algorithm, there's no explicit model. Then, the training
# step and prediction step are the same.
library(class)
prediction <- knn(
train = train_set[, -ncol(train_set)],
test = test_set[, -ncol(test_set)],
cl = train_set$Purchased,
k = 5)
# Confusion matrix
conf_mat <- table(test_set$Purchased, prediction)
conf_mat
# K-NN probabilities
prediction_prob <- knn(train = train_set[, -ncol(train_set)],
test = test_set[, -ncol(test_set)],
cl = train_set$Purchased,
k = 5,
prob = TRUE)
|
87708e706c38907efdaa1fd18460f2c41699f508
|
8a97255cb66455dbef0cf01864a3b334cf20a66b
|
/karma_ML_Ensemble/SavingDBDump.R
|
7d32865732d41c474007a3dff6e1fac586abf14a
|
[] |
no_license
|
AshutoshAgrahari/R_Practice
|
c56bbb3c0893e101305f150c0b74045f24cf5a44
|
4c31ce94f130b363f894177a1505ccac290547e0
|
refs/heads/master
| 2020-03-19T17:51:05.826260
| 2020-01-25T10:34:55
| 2020-01-25T10:34:55
| 136,781,266
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 508
|
r
|
SavingDBDump.R
|
# load the library
library(data.table)
# Create DBDataDump Folder if not exist in KARMA project dir.
if(!dir.exists("DBDataDump")){
dir.create("DBDataDump")
}
# read the each cut file as MFF
MFF <- fread(file.choose(),header = T,stringsAsFactors = F,data.table = F)
# save MFF file as Country_Category.RData file in DBDataDump folder in KARMA project file.
save(MFF,file = paste0(getwd(),"/DBDataDump/",unique(MFF$Country),"_",unique(MFF$Category),".RData"))
# remove the MFF from the env
rm(MFF)
|
54f299dbf6f97a3d541638633045daaaad78ee1d
|
689fbe653cd7315d760976f4bf69ab3a8820dc3b
|
/man/bigtps.Rd
|
35c2491c0775f999afa1688444ceedb95d4e68d0
|
[] |
no_license
|
cran/bigsplines
|
deb579728270353a375dc589bede1dfbd75cfb98
|
9ddb95e9af0852fa80c6b5a670b702acb1859e01
|
refs/heads/master
| 2020-12-24T07:42:06.129006
| 2018-05-25T05:47:54
| 2018-05-25T05:47:54
| 18,805,082
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,886
|
rd
|
bigtps.Rd
|
\name{bigtps}
\alias{bigtps}
\title{
Fits Cubic Thin-Plate Splines
}
\description{
Given a real-valued response vector \eqn{\mathbf{y}=\{y_{i}\}_{n\times1}}, a thin-plate spline model has the form \deqn{y_{i}=\eta(\mathbf{x}_{i})+e_{i}} where \eqn{y_{i}} is the \eqn{i}-th observation's respone, \eqn{\mathbf{x}_{i}=(x_{i1},\ldots,x_{id})} is the \eqn{i}-th observation's nonparametric predictor vector, \eqn{\eta} is an unknown smooth function relating the response and predictor, and \eqn{e_{i}\sim\mathrm{N}(0,\sigma^{2})} is iid Gaussian error. Function only fits interaction models.
}
\usage{
bigtps(x,y,nknots=NULL,nvec=NULL,rparm=NA,
alpha=1,lambdas=NULL,se.fit=FALSE,
rseed=1234,knotcheck=TRUE)
}
\arguments{
\item{x}{
Predictor vector or matrix with three or less columns.
}
\item{y}{
Response vector. Must be same length as \code{x} has rows.
}
\item{nknots}{
Two possible options: (a) scalar giving total number of random knots to sample, or (b) vector indexing which rows of \code{x} to use as knots.
}
\item{nvec}{
Number of eigenvectors (and eigenvalues) to use in approximation. Must be less than or equal to the number of knots and greater than or equal to \code{ncol(x)+2}. Default sets \code{nvec<-nknots}. Can also input \code{0<nvec<1} to retain \code{nvec} percentage of eigenbasis variation.
}
\item{rparm}{
Rounding parameter(s) for \code{x}. Use \code{rparm=NA} to fit unrounded solution. Can provide one (positive) rounding parameter for each column of \code{x}.
}
\item{alpha}{
Manual tuning parameter for GCV score. Using \code{alpha=1} gives unbaised esitmate. Using a larger alpha enforces a smoother estimate.
}
\item{lambdas}{
Vector of global smoothing parameters to try. Default estimates smoothing parameter that minimizes GCV score.
}
\item{se.fit}{
Logical indicating if the standard errors of fitted values should be estimated.
}
\item{rseed}{
Random seed for knot sampling. Input is ignored if \code{nknots} is an input vector of knot indices. Set \code{rseed=NULL} to obtain a different knot sample each time, or set \code{rseed} to any positive integer to use a different seed than the default.
}
\item{knotcheck}{
If \code{TRUE}, only unique knots are used (for stability).
}
}
\details{
To estimate \eqn{\eta} I minimize the penalized least-squares functional \deqn{\frac{1}{n}\sum_{i=1}^{n}(y_{i}-\eta(\mathbf{x}_{i}))^{2}+\lambda J(\eta)} where \eqn{J(\eta)} is the thin-plate penalty (see Helwig and Ma) and \eqn{\lambda\geq0} is a smoothing parameter that controls the trade-off between fitting and smoothing the data. Default use of the function estimates \eqn{\lambda} by minimizing the GCV score (see \code{\link{bigspline}}).
Using the rounding parameter input \code{rparm} can greatly speed-up and stabilize the fitting for large samples. When \code{rparm} is used, the spline is fit to a set of unique data points after rounding; the unique points are determined using the efficient algorithm described in Helwig (2013). Rounding parameter should be on the raw data scale.
}
\value{
\item{fitted.values}{Vector of fitted values corresponding to the original data points in \code{x} (if \code{rparm=NA}) or the rounded data points in \code{xunique} (if \code{rparm} is used).}
\item{se.fit}{Vector of standard errors of \code{fitted.values} (if input \code{se.fit=TRUE)}.}
\item{x}{Predictor vector (same as input).}
\item{y}{Response vector (same as input).}
\item{xunique}{Unique elements of \code{x} after rounding (if \code{rparm} is used).}
\item{yunique}{Mean of \code{y} for unique elements of \code{x} after rounding (if \code{rparm} is used).}
\item{funique}{Vector giving frequency of each element of \code{xunique} (if \code{rparm} is used).}
\item{sigma}{Estimated error standard deviation, i.e., \eqn{\hat{\sigma}}.}
\item{ndf}{Data frame with two elements: \code{n} is total sample size, and \code{df} is effective degrees of freedom of fit model (trace of smoothing matrix).}
\item{info}{Model fit information: vector containing the GCV, multiple R-squared, AIC, and BIC of fit model (assuming Gaussian error).}
\item{myknots}{Spline knots used for fit.}
\item{nvec}{Number of eigenvectors used for solution.}
\item{rparm}{Rounding parameter for \code{x} (same as input).}
\item{lambda}{Optimal smoothing parameter.}
\item{coef}{Spline basis function coefficients.}
\item{coef.csqrt}{Matrix square-root of covariace matrix of \code{coef}. Use \code{tcrossprod(coef.csqrt)} to get covariance matrix of \code{coef}.}
}
\references{
Gu, C. (2013). \emph{Smoothing spline ANOVA models, 2nd edition}. New York: Springer.
Helwig, N. E. (2017). \href{http://dx.doi.org/10.3389/fams.2017.00015}{Regression with ordered predictors via ordinal smoothing splines}. Frontiers in Applied Mathematics and Statistics, 3(15), 1-13.
Helwig, N. E. and Ma, P. (2015). Fast and stable multiple smoothing parameter selection in smoothing spline analysis of variance models with large samples. \emph{Journal of Computational and Graphical Statistics, 24}, 715-732.
Helwig, N. E. and Ma, P. (2016). Smoothing spline ANOVA for super-large samples: Scalable computation via rounding parameters. \emph{Statistics and Its Interface, 9}, 433-444.
}
\author{
Nathaniel E. Helwig <helwig@umn.edu>
}
\note{
The spline is estimated using penalized least-squares, which does not require the Gaussian error assumption. However, the spline inference information (e.g., standard errors and fit information) requires the Gaussian error assumption.
}
\section{Warnings }{
Input \code{nvec} must be greater than \code{ncol(x)+1}.
When using rounding parameters, output \code{fitted.values} corresponds to unique rounded predictor scores in output \code{xunique}. Use \code{\link{predict.bigtps}} function to get fitted values for full \code{y} vector.
}
\section{Computational Details }{
According to thin-plate spline theory, the function \eqn{\eta} can be approximated as \deqn{\eta(x) = \sum_{k=1}^{M}d_{k}\phi_{k}(\mathbf{x}) + \sum_{h=1}^{q}c_{h}\xi(\mathbf{x},\mathbf{x}_{h}^{*})} where the \eqn{\{\phi_{k}\}_{k=1}^{M}} are linear functions, \eqn{\xi} is the thin-plate spline semi-kernel, \eqn{\{\mathbf{x}_{h}^{*}\}_{h=1}^{q}} are the knots, and the \eqn{c_{h}} coefficients are constrained to be orthongonal to the \eqn{\{\phi_{k}\}_{k=1}^{M}} functions.
This implies that the penalized least-squares functional can be rewritten as \deqn{ \|\mathbf{y} - \mathbf{K}\mathbf{d} - \mathbf{J}\mathbf{c}\|^{2} + n\lambda\mathbf{c}'\mathbf{Q}\mathbf{c} }
where \eqn{\mathbf{K}=\{\phi(\mathbf{x}_{i})\}_{n \times M}} is the null space basis function matrix, \eqn{\mathbf{J}=\{\xi(\mathbf{x}_{i},\mathbf{x}_{h}^{*})\}_{n \times q}} is the contrast space basis funciton matrix, \eqn{\mathbf{Q}=\{\xi(\mathbf{x}_{g}^{*},\mathbf{x}_{h}^{*})\}_{q \times q}} is the penalty matrix, and \eqn{\mathbf{d}=(d_{0},\ldots,d_{M})'} and \eqn{\mathbf{c}=(c_{1},\ldots,c_{q})'} are the unknown basis function coefficients, where \eqn{\mathbf{c}} are constrained to be orthongonal to the \eqn{\{\phi_{k}\}_{k=1}^{M}} functions.
See Helwig and Ma for specifics about how the constrained estimation is handled.
}
\examples{
########## EXAMPLE 1 ##########
# define relatively smooth function
set.seed(773)
myfun <- function(x){ sin(2*pi*x) }
x <- runif(500)
y <- myfun(x) + rnorm(500)
# fit thin-plate spline (default 1 dim: 30 knots)
tpsmod <- bigtps(x,y)
tpsmod
########## EXAMPLE 2 ##########
# define more jagged function
set.seed(773)
myfun <- function(x){ 2*x+cos(2*pi*x) }
x <- runif(500)*4
y <- myfun(x) + rnorm(500)
# try different numbers of knots
r1mod <- bigtps(x,y,nknots=20,rparm=0.01)
crossprod( myfun(r1mod$xunique) - r1mod$fitted )/length(r1mod$fitted)
r2mod <- bigtps(x,y,nknots=35,rparm=0.01)
crossprod( myfun(r2mod$xunique) - r2mod$fitted )/length(r2mod$fitted)
r3mod <- bigtps(x,y,nknots=50,rparm=0.01)
crossprod( myfun(r3mod$xunique) - r3mod$fitted )/length(r3mod$fitted)
########## EXAMPLE 3 ##########
# function with two continuous predictors
set.seed(773)
myfun <- function(x1v,x2v){
sin(2*pi*x1v) + log(x2v+.1) + cos(pi*(x1v-x2v))
}
x <- cbind(runif(500),runif(500))
y <- myfun(x[,1],x[,2]) + rnorm(500)
# fit thin-plate spline with 50 knots (default 2 dim: 100 knots)
tpsmod <- bigtps(x,y,nknots=50)
tpsmod
crossprod( myfun(x[,1],x[,2]) - tpsmod$fitted.values )/500
########## EXAMPLE 4 ##########
# function with three continuous predictors
set.seed(773)
myfun <- function(x1v,x2v,x3v){
sin(2*pi*x1v) + log(x2v+.1) + cos(pi*x3v)
}
x <- cbind(runif(500),runif(500),runif(500))
y <- myfun(x[,1],x[,2],x[,3]) + rnorm(500)
# fit thin-plate spline with 50 knots (default 3 dim: 200 knots)
tpsmod <- bigtps(x,y,nknots=50)
tpsmod
crossprod( myfun(x[,1],x[,2],x[,3]) - tpsmod$fitted.values )/500
}
|
8fe263c57642bce779a81e82608cf0b5803e4a65
|
8b92eaf5c51a4d2bedff8ec9a13408f32d883fff
|
/loaddata.R
|
2c3caac44a7617dfbd1107f575428a6b1673a892
|
[] |
no_license
|
rajpradhan/ExData_Plotting1
|
1c515b3325ae9ebfeb1d313e42e09a489124066e
|
34956aa564903f9c0804a50f030a66634a9d758c
|
refs/heads/master
| 2020-12-11T03:19:41.452166
| 2014-10-12T14:32:51
| 2014-10-12T14:32:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 768
|
r
|
loaddata.R
|
loaddata <- function() {
txtfile <- "household_power_consumption.txt"
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipfile <- "exdata-data-household_power_consumption.zip"
if(!file.exists(zipfile))
download.file(url, zipfile)
if(!file.exists(txtfile))
unzip(zipfile, overwrite=TRUE)
data <- read.table(txtfile,header=TRUE,sep=";",na="?")
# convert date and time variables to Date/Time class
data$Time <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
# filter data for the dates 2007-02-01 and 2007-02-02 only
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
data <- subset(data, Date %in% dates)
return(data)
}
|
73263e68e324768f822a6563e0c7488c4db119d1
|
683301d27b28e6ec33d8a2a5b077cd0dbe955a86
|
/R/pdftool.R
|
eff1f81af97339334e739456e4f6368a31f429d2
|
[] |
no_license
|
ekanshbari/R-programs
|
5b0e1ded02f49c108f5ef61b78c06c1c9ecfab96
|
7c267cde520ae07ded2a51e9f82c0fda557b5d5e
|
refs/heads/master
| 2020-12-12T12:10:03.872677
| 2020-01-15T17:01:48
| 2020-01-15T17:01:48
| 234,125,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,327
|
r
|
pdftool.R
|
# text mining by pdftool ie to read read the pdf file
# to find the frequency of words in the given dataset
#
#
#
# https://www.supremecourt.gov/opnions/slipopnion/14
#
#
#
# install all the packages of library
library(pdftools)
library(wordcloud)
library(tm) #for text mining for removing white spaces
loc=file.choose()
txt=pdf_text(loc)
cat(txt[15]) #15 is page of pdf
#cleaning
txt_corpus=Corpus(VectorSource(txt)) #VectorSource detail of pdf page read
txt_corpus=tm_map(txt_corpus,tolower)
txt_corpus=tm_map(txt_corpus,removePuntuation)
txt_corpus=tm_map(txt_corpus,stripWhitespace)
head(stopwords("en")) #common words
stopwords("en") #give all the stopwords from the pdf em=>for english
#corpus-> to put imp data in list
txt_corpus=tm_map(txt_corpus,removeWords,stopwords("en")) #here we can give word tht has to be removed by removeWords=" "
txt_corpus #matadeta
txt_corpus$content #to view content
dtm=DocumentTermMatrix(txt_corpus) #convert corpus to table or matrix
dtm=as.matrix(dtm)
View(dtm)
dtm=t(dtm) #transpose
occu=rowSums(dtm) #total occurences
occu
no_occu=sort(occu,decreasing = T)
head(no_occu) #by default 6 no
wordcloud(head(names(no_occu),30),head(no_occu,30),scale=c(2,1))
|
443ac9a606bcdba879cd273959746a91e212d118
|
8474e5591c6e2564895bde0522424f7cb60c90d1
|
/data-raw/create_package.R
|
6ea41bdc7b752faa428ccdaf595990b13f18dc95
|
[] |
no_license
|
ajpatel2007/methylSig
|
398504ffe01d51c806098ee9da2751e09d260f65
|
cb469678e2e4b5c3569d0927675d698dbe0f8f01
|
refs/heads/master
| 2022-04-14T04:20:20.587995
| 2020-03-25T18:38:33
| 2020-03-25T18:38:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,780
|
r
|
create_package.R
|
# docker run --interactive --tty --rm --volume /Users/rcavalca/Projects:/Projects rcavalcante/bioconductor_docker:RELEASE_3_10
library(devtools)
# Description fields
description = list(
Title = 'MethylSig: Differential Methylation Testing for WGBS and RRBS Data',
Version = '0.99.0',
Date = '2020-02-28',
`Authors@R` = 'c(
person(given = "Yongseok",
family = "Park",
role = c("aut"),
email = "yongpark@pitt.edu"),
person(given = "Raymond G.",
family = "Cavalcante",
role = c("aut", "cre"),
email = "rcavalca@umich.edu"))',
Description = 'MethylSig is a package for testing for differentially methylated
cytosines (DMCs) or regions (DMRs) in whole-genome bisulfite sequencing
(WGBS) or reduced representation bisulfite sequencing (RRBS) experiments.
MethylSig uses a beta binomial model to test for significant differences
between groups of samples. Several options exist for either site-specific
or sliding window tests, and variance estimation.',
BugReports = 'https://github.com/sartorlab/methylSig/issues',
biocViews = 'DNAMethylation, DifferentialMethylation, Epigenetics, Regression, MethylSeq',
License = 'GPL-3',
Depends = 'R (>= 3.6)'
)
# Create package
path = '/Projects/methylSig'
create_package(path, fields = description)
activate_project(path)
# use_description(fields = description) # For updating
# Build ignore
build_ignore_files = c('README.md', '.travis.yml', '.git', '.gitignore')
use_build_ignore(files = build_ignore_files)
# Data
use_data_raw(name = '01-create_cov_files')
use_data_raw(name = '02-create_bsseq_rda')
use_data_raw(name = '03-create_internal_rda')
# Documentation
use_readme_md()
use_news_md()
use_package_doc()
use_vignette(name = 'using-methylSig', title = 'Using methylSig')
use_vignette(name = 'updating-methylSig-code', title = 'Updating methylSig code')
# Travis
use_travis()
use_travis_badge(ext = 'org')
# Coverage
use_coverage(type = 'coveralls')
# Testing
use_testthat()
# Package dependencies
use_package('bsseq', type = 'Imports')
# R files and test files
use_r('filter_loci_by_coverage')
use_r('filter_loci_by_location')
use_r('filter_loci_by_snps')
use_r('tile_by_windows')
use_r('tile_by_regions')
use_r('filter_loci_by_group_coverage')
use_r('diff_binomial')
use_r('diff_methylsig')
use_r('diff_dss_fit')
use_r('diff_dss_test')
use_test('filter_loci_by_coverage')
use_test('filter_loci_by_location')
use_test('filter_loci_by_snps')
use_test('tile_by_windows')
use_test('tile_by_regions')
use_test('filter_loci_by_group_coverage')
use_test('diff_binomial')
use_test('diff_methylsig')
use_test('diff_dss_fit')
use_test('diff_dss_test')
|
728df9ac33bce503c171f5b0ecc1015ee8d26e68
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/agridat/examples/sinclair.clover.Rd.R
|
6162bc751bd35ddcdbaa6d9e13e328cec0e8396c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,403
|
r
|
sinclair.clover.Rd.R
|
library(agridat)
### Name: sinclair.clover
### Title: Clover yields in a factorial fertilizer experiment
### Aliases: sinclair.clover
### Keywords: datasets
### ** Examples
data(sinclair.clover)
dat <- sinclair.clover
require(lattice)
xyplot(yield~P|factor(S), dat, layout=c(5,1),
main="sinclair.clover - Yield by sulfur levels",
xlab="Phosphorous")
# Dodds fits a two-dimensional Mitscherlich-like model:
# z = a*(1+b*{(s+t*x)/(x+1)}^y) * (1+d*{(th+r*y)/(y+1)}^x)
# First, re-scale the problem to a more stable part of the parameter space
dat <- transform(dat, x=P/10, y=S/10)
# Response value for (x=0, y=maximal), (x=maximal, y=0), (x=max, y=max)
z0m <- 5
zm0 <- 5
zmm <- 10.5
# The parameters are somewhat sensitive to starting values.
# I had to try a couple different initial values to match the paper by Dodds
m1 <- nls(yield ~ alpha*(1 + beta*{(sig+tau*x)/(x+1)}^y) * (1 + del*{(th+rho*y)/(y+1)}^x),
data=dat, # trace=TRUE,
start=list(alpha=zmm, beta=(zm0/zmm)-1, del=(z0m/zmm)-1,
sig=.51, tau=.6, th=.5, rho=.7))
summary(m1) # Match Dodds Table 2
## Parameters:
## Estimate Std. Error t value Pr(>|t|)
## alpha 11.15148 0.66484 16.773 1.96e-12 ***
## beta -0.61223 0.03759 -16.286 3.23e-12 ***
## del -0.48781 0.04046 -12.057 4.68e-10 ***
## sig 0.26783 0.16985 1.577 0.13224
## tau 0.68030 0.06333 10.741 2.94e-09 ***
## th 0.59656 0.16716 3.569 0.00219 **
## rho 0.83273 0.06204 13.421 8.16e-11 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Residual standard error: 0.5298 on 18 degrees of freedom
## Not run:
##D
##D pred <- expand.grid(x=0:17, y=0:9)
##D pred$z <- predict(m1, pred)
##D
##D # 3D plot of data with fitted surface. Matches Dodds figure 2.
##D require(rgl)
##D bg3d(color = "white")
##D clear3d()
##D spheres3d(dat$x, dat$y, dat$yield,
##D radius=.2, col = rep("navy", nrow(dat)))
##D surface3d(seq(0, 17, by = 1), seq(0, 9, by = 1), pred$z,
##D alpha=0.9, col="wheat",
##D front="fill", back="fill")
##D axes3d()
##D title3d("sinclair.clover - yield","", xlab="Phosphorous/10",
##D ylab="Sulfur/10", zlab="", line=3, cex=1.5)
##D view3d(userMatrix=matrix(c(.7,.2,-.7,0, -.7,.2,-.6,0, 0,.9,.3,0, 0,0,0,1),ncol=4))
##D # snapshot3d(file, "png")
##D rgl.close()
## End(Not run)
|
cd38298ddcb84c4fe75f1947733091359b23d1fc
|
22ffb3e36696096af9e785ee169f78d0e09b0cdb
|
/server_side/Rscripts/addIndexAttributeToNetwork.R
|
59cd1ed8defa5f3ac71750b29f7abfe135d1c1a2
|
[
"MIT"
] |
permissive
|
ggirelli/tema
|
49c6ec0dd63e7dcfd6ac2d3be252d2869346c431
|
beeb3aff3ad47a8027ab5b4a425875702b3a7c0f
|
refs/heads/master
| 2021-01-19T07:36:20.821428
| 2015-06-25T15:25:50
| 2015-06-25T15:25:50
| 27,998,620
| 1
| 0
| null | 2015-02-26T10:37:35
| 2014-12-14T15:34:37
|
PHP
|
UTF-8
|
R
| false
| false
| 1,528
|
r
|
addIndexAttributeToNetwork.R
|
#!/usr/bin/env Rscript
options(echo=TRUE)
args <- commandArgs(trailingOnly = TRUE)
# Check parameters
if(length(args) != 4) stop('./addIndexAttributeToNetwork.R session_id graph_name attr_name attr_index')
# Load requirements
library(igraph)
library(rjson)
source('./Graph_Manager.class.R')
nm <- GraphManager()
# Start
if(file.exists(paste0('../session/', args[1], '/'))) {
setwd(paste0('../session/', args[1], '/'))
cat('> Read JSON file\n')
s <- read.delim(paste0(args[2], '.json'), header = F, as.is=T, quote = "")[1,1]
l <- fromJSON(s)
g <- nm$graph.list.to.graph(l)
cat('> Add attribute\n')
if( 'degree' == args[4] ) {
ind <- degree(g, V(g))
} else if( 'indegree' == args[4] ) {
ind <- degree(g, V(g), mode='in')
} else if( 'outdegree' == args[4] ) {
ind <- degree(g, V(g), mode='out')
} else if ( 'betweenness' == args[4] ) {
ind <- betweenness(g, V(g))
} else if ( 'closeness' == args[4] ) {
ind <- closeness(g, V(g))
}
eval(parse(text=paste0('V(g)$', args[3], ' <- ind')))
graph.list <- nm$graph.to.attr.table(g)
graph.list$nodes <- nm$update.row.ids(graph.list$nodes)
graph.list$nodes <- nm$add.prefix.to.col(graph.list$nodes, 'id', 'n')
graph.list$edges <- nm$convert.extremities.to.v.id.based.on.table(graph.list$edges,
graph.list$nodes, 'name')
graph.list$edges <- nm$update.row.ids(graph.list$edges)
graph.list$edges <- nm$add.prefix.to.col(graph.list$edges, 'id', 'e')
write(toJSON(nm$attr.tables.to.list(graph.list$nodes, graph.list$edges)),
paste0(args[2], '.json'))
}
|
91e0f5293cc08ae161ec3a3c5733b4811d00d199
|
b313ba13c1156ccb088c4de6327a794117adc4cc
|
/AlanAnalysis/DefunctScripts/AB_phasing/makeHarpPrior.R
|
ef72c7ae07e5d589b14951ee5fcdd3e27af969e6
|
[] |
no_license
|
kbkubow/DaphniaPulex20162017Sequencing
|
50c921f3c3e8f077d49ccb3417daa76fb4dde698
|
c662b0900cc87a64ec43e765246c750be0830f77
|
refs/heads/master
| 2021-08-20T10:09:59.483110
| 2021-06-10T20:04:31
| 2021-06-10T20:04:31
| 182,109,481
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,649
|
r
|
makeHarpPrior.R
|
#ijob -c1 -p standard -A berglandlab
#module load gcc/7.1.0 openmpi/3.1.4 R/3.6.0; R
library(data.table)
library(foreach)
dat <- fread("/scratch/aob2x/daphnia_hwe_sims/trioPhase/testTrio.consensus.header.phase.csv")
setnames(dat, c(1,2), c("chr", "pos"))
dat <- dat[!(A=="1/1" & B=="1/1")][!(A=="0/0" & B=="0/0")]
dat.p <- dat[,list(Ref=REF,
A1=c(ALT, REF)[as.numeric(substr(A, 0, 1))+1],
A2=c(ALT, REF)[as.numeric(substr(A, 3, 3))+1],
B1=c(ALT, REF)[as.numeric(substr(B, 0, 1))+1],
B2=c(ALT, REF)[as.numeric(substr(B, 3, 3))+1],
Coverage=4),
list(chr, pos)]
dat.ag <- dat[,list(.N, max=max(pos)), chr]
dat.ag <- dat.ag[N>1000]
dat.ag[,id:=1:12]
setkey(dat.p, chr)
foreach(i= dat.ag$chr)%do%{
print(i)
# i <- dat.ag[N>1000]$chr[1]
tmp <- dat.p[J(i)][,-"chr",with=F]
setnames(tmp, "pos", i)
write.table(tmp, file=paste("/scratch/aob2x/daphnia_hwe_sims/harp_pools/priors/", i, ".csv", sep=""), quote=F, row.names=F, sep=",")
system(paste("sed 's/$/,/g' /scratch/aob2x/daphnia_hwe_sims/harp_pools/priors/", i, ".csv | gzip -c - > /scratch/aob2x/daphnia_hwe_sims/harp_pools/priors/", i, ".csv.gz", sep=""))
}
### make job id file
pools <- system("ls /scratch/aob2x/daphnia_hwe_sims/harp_pools/bams/*bam", intern=T)
jobs <- foreach(p=pools, .combine="rbind")%do%{
tmp <- dat.ag[,c("chr", "max"),with=F]
tmp[,bam:=p]
}
jobs[,id:=c(1:dim(jobs)[1])]
jobs <- jobs[,c("id", "chr", "max", "bam"),with=F]
write.table(jobs, file="/scratch/aob2x/daphnia_hwe_sims/harp_pools/jobId", quote=F, row.names=F, col.names=T)
|
75c4c826286f9be9289fc2e7ce3f430898c2e4ec
|
b30004a400b47aa21fb202a894c9a1365def53fb
|
/tests/testthat/test-predict-model.R
|
b11987371a84821366e7fe658c7b15e4d57ab34d
|
[] |
no_license
|
cran/disaggregation
|
cfa317ff8d46c14cde349279ce9a26129d5a7cfb
|
173a822b0ae14c718fec55791b6ddad168b40523
|
refs/heads/master
| 2023-05-11T08:30:27.362030
| 2023-04-28T20:10:12
| 2023-04-28T20:10:12
| 236,584,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,885
|
r
|
test-predict-model.R
|
context("Predict model")
polygons <- list()
n_polygon_per_side <- 10
n_polygons <- n_polygon_per_side * n_polygon_per_side
n_pixels_per_side <- n_polygon_per_side * 2
for(i in 1:n_polygons) {
row <- ceiling(i/n_polygon_per_side)
col <- ifelse(i %% n_polygon_per_side != 0, i %% n_polygon_per_side, n_polygon_per_side)
xmin = 2*(col - 1); xmax = 2*col; ymin = 2*(row - 1); ymax = 2*row
polygons[[i]] <- rbind(c(xmin, ymax), c(xmax,ymax), c(xmax, ymin), c(xmin,ymin))
}
polys <- do.call(raster::spPolygons, polygons)
response_df <- data.frame(area_id = 1:n_polygons, response = runif(n_polygons, min = 0, max = 1000))
spdf <- sp::SpatialPolygonsDataFrame(polys, response_df)
# Create raster stack
r <- raster::raster(ncol=n_pixels_per_side, nrow=n_pixels_per_side)
r <- raster::setExtent(r, raster::extent(spdf))
r[] <- sapply(1:raster::ncell(r), function(x) rnorm(1, ifelse(x %% n_pixels_per_side != 0, x %% n_pixels_per_side, n_pixels_per_side), 3))
r2 <- raster::raster(ncol=n_pixels_per_side, nrow=n_pixels_per_side)
r2 <- raster::setExtent(r2, raster::extent(spdf))
r2[] <- sapply(1:raster::ncell(r), function(x) rnorm(1, ceiling(x/n_pixels_per_side), 3))
cov_stack <- raster::stack(r, r2)
if(identical(Sys.getenv("NOT_CRAN"), "true")) {
test_data <- prepare_data(polygon_shapefile = spdf,
covariate_rasters = cov_stack)
} else {
test_data <- prepare_data(polygon_shapefile = spdf,
covariate_rasters = cov_stack,
makeMesh = FALSE)
}
test_that("Check predict.disag_model function works as expected", {
skip_if_not_installed('INLA')
skip_on_cran()
result <- disag_model(test_data, iterations = 2)
pred2 <- predict(result)
expect_is(pred2, 'disag_prediction')
expect_equal(length(pred2), 2)
expect_equal(names(pred2), c('mean_prediction', 'uncertainty_prediction'))
expect_is(pred2$mean_prediction, 'list')
expect_equal(length(pred2$mean_prediction), 4)
expect_is(pred2$mean_prediction$prediction, 'Raster')
expect_is(pred2$mean_prediction$field, 'Raster')
expect_true(is.null(pred2$mean_prediction$iid))
expect_is(pred2$mean_prediction$covariates, 'Raster')
expect_is(pred2$uncertainty_prediction, 'list')
expect_equal(length(pred2$uncertainty_prediction), 2)
expect_equal(names(pred2$uncertainty_prediction), c('realisations', 'predictions_ci'))
expect_is(pred2$uncertainty_prediction$realisations, 'RasterStack')
expect_is(pred2$uncertainty_prediction$predictions_ci, 'RasterBrick')
expect_equal(raster::nlayers(pred2$uncertainty_prediction$realisations), 100)
expect_equal(raster::nlayers(pred2$uncertainty_prediction$predictions_ci), 2)
pred2 <- predict(result, predict_iid = TRUE, N = 10)
expect_is(pred2, 'disag_prediction')
expect_equal(length(pred2), 2)
expect_equal(names(pred2), c('mean_prediction', 'uncertainty_prediction'))
expect_is(pred2$mean_prediction, 'list')
expect_equal(length(pred2$mean_prediction), 4)
expect_equal(names(pred2$mean_prediction), c('prediction', 'field', 'iid', 'covariates'))
expect_is(pred2$mean_prediction$prediction, 'Raster')
expect_is(pred2$mean_prediction$field, 'Raster')
expect_is(pred2$mean_prediction$iid, 'Raster')
expect_is(pred2$mean_prediction$covariates, 'Raster')
expect_is(pred2$uncertainty_prediction, 'list')
expect_equal(length(pred2$uncertainty_prediction), 2)
expect_equal(names(pred2$uncertainty_prediction), c('realisations', 'predictions_ci'))
expect_is(pred2$uncertainty_prediction$realisations, 'RasterStack')
expect_is(pred2$uncertainty_prediction$predictions_ci, 'RasterBrick')
expect_equal(raster::nlayers(pred2$uncertainty_prediction$realisations), 10)
expect_equal(raster::nlayers(pred2$uncertainty_prediction$predictions_ci), 2)
# For a model with no field or iid
result <- disag_model(test_data, iterations = 2, field = FALSE, iid = FALSE)
pred2 <- predict(result)
expect_is(pred2, 'disag_prediction')
expect_equal(length(pred2), 2)
expect_equal(names(pred2), c('mean_prediction', 'uncertainty_prediction'))
expect_is(pred2$mean_prediction, 'list')
expect_equal(length(pred2$mean_prediction), 4)
expect_is(pred2$mean_prediction$prediction, 'Raster')
expect_true(is.null(pred2$mean_prediction$field))
expect_true(is.null(pred2$mean_prediction$iid))
expect_is(pred2$mean_prediction$covariates, 'Raster')
expect_is(pred2$uncertainty_prediction, 'list')
expect_equal(length(pred2$uncertainty_prediction), 2)
expect_equal(names(pred2$uncertainty_prediction), c('realisations', 'predictions_ci'))
expect_is(pred2$uncertainty_prediction$realisations, 'RasterStack')
expect_is(pred2$uncertainty_prediction$predictions_ci, 'RasterBrick')
expect_equal(raster::nlayers(pred2$uncertainty_prediction$realisations), 100)
expect_equal(raster::nlayers(pred2$uncertainty_prediction$predictions_ci), 2)
})
test_that("Check predict.disag_model function works with newdata", {
skip_if_not_installed('INLA')
skip_on_cran()
result <- disag_model(test_data, field = FALSE, iid = TRUE, iterations = 2)
newdata <- raster::crop(raster::stack(r, r2), c(0, 10, 0, 10))
pred1 <- predict(result)
pred2 <- predict(result, newdata, predict_iid = TRUE, N = 5)
expect_is(pred2, 'disag_prediction')
expect_equal(length(pred2), 2)
expect_equal(names(pred2), c('mean_prediction', 'uncertainty_prediction'))
expect_is(pred2$mean_prediction, 'list')
expect_equal(length(pred2$mean_prediction), 4)
expect_equal(names(pred2$mean_prediction), c('prediction', 'field', 'iid', 'covariates'))
expect_is(pred2$mean_prediction$prediction, 'Raster')
expect_true(is.null(pred2$mean_prediction$field))
expect_is(pred2$mean_prediction$iid, 'Raster')
expect_is(pred2$mean_prediction$covariates, 'Raster')
expect_is(pred2$uncertainty_prediction, 'list')
expect_equal(length(pred2$uncertainty_prediction), 2)
expect_equal(names(pred2$uncertainty_prediction), c('realisations', 'predictions_ci'))
expect_is(pred2$uncertainty_prediction$realisations, 'RasterStack')
expect_is(pred2$uncertainty_prediction$predictions_ci, 'RasterBrick')
expect_equal(raster::nlayers(pred2$uncertainty_prediction$realisations), 5)
expect_equal(raster::nlayers(pred2$uncertainty_prediction$predictions_ci), 2)
expect_false(identical(raster::extent(pred1$mean_prediction$prediction), raster::extent(pred2$mean_prediction$prediction)))
expect_false(identical(raster::extent(pred1$uncertainty_prediction$realisations), raster::extent(pred2$uncertainty_prediction$realisations)))
})
test_that('Check that check_newdata works', {
skip_if_not_installed('INLA')
skip_on_cran()
result <- disag_model(test_data, field = FALSE, iterations = 2)
newdata <- raster::crop(raster::stack(r, r2), c(0, 10, 0, 10))
nd1 <- check_newdata(newdata, result)
expect_is(nd1, 'RasterBrick')
nn <- newdata[[1]]
names(nn) <- 'extra_uneeded'
newdata2 <- raster::stack(newdata, nn)
expect_error(check_newdata(newdata2, result), NA)
newdata3 <- newdata[[1]]
expect_error(check_newdata(newdata3, result), 'All covariates')
newdata4 <- result$data$covariate_data
expect_error(check_newdata(newdata4, result), 'newdata should be NULL or')
})
test_that('Check that setup_objects works', {
skip_if_not_installed('INLA')
skip_on_cran()
result <- disag_model(test_data, iterations = 2)
objects <- setup_objects(result)
expect_is(objects, 'list')
expect_equal(length(objects), 3)
expect_equal(names(objects), c('covariates', 'field_objects', 'iid_objects'))
expect_is(objects$field_objects, 'list')
expect_true(is.null(objects$iid_objects))
newdata <- raster::crop(raster::stack(r, r2), c(0, 180, -90, 90))
objects2 <- setup_objects(result, newdata)
expect_is(objects2, 'list')
expect_equal(length(objects2), 3)
expect_equal(names(objects2), c('covariates', 'field_objects', 'iid_objects'))
expect_is(objects2$field_objects, 'list')
expect_true(is.null(objects$iid_objects))
objects3 <- setup_objects(result, predict_iid = TRUE)
expect_is(objects3, 'list')
expect_equal(length(objects3), 3)
expect_equal(names(objects3), c('covariates', 'field_objects', 'iid_objects'))
expect_is(objects3$field_objects, 'list')
expect_is(objects3$iid_objects, 'list')
})
test_that('Check that predict_single_raster works', {
skip_if_not_installed('INLA')
skip_on_cran()
result <- disag_model(test_data, iterations = 2)
objects <- setup_objects(result)
pars <- result$obj$env$last.par.best
pars <- split(pars, names(pars))
pred2 <- predict_single_raster(pars,
objects = objects,
link_function = result$model_setup$link)
expect_is(pred2, 'list')
expect_equal(length(pred2), 4)
expect_equal(names(pred2), c('prediction', 'field', 'iid', 'covariates'))
expect_is(pred2$prediction, 'Raster')
expect_is(pred2$field, 'Raster')
expect_true(is.null(pred2$iid))
expect_is(pred2$covariates, 'Raster')
objects2 <- setup_objects(result, predict_iid = TRUE)
pred2 <- predict_single_raster(pars,
objects = objects2,
link_function = result$model_setup$link)
expect_is(pred2, 'list')
expect_equal(length(pred2), 4)
expect_equal(names(pred2), c('prediction', 'field', 'iid', 'covariates'))
expect_is(pred2$prediction, 'Raster')
expect_is(pred2$field, 'Raster')
expect_is(pred2$iid, 'Raster')
expect_is(pred2$covariates, 'Raster')
})
|
fe291b015bb94aeef07206a7fe100dd55abe3bf4
|
b4f6e5965646758d264a9702734ea7929c8e009b
|
/R/geom_image.R
|
591407685b60cde7d16cb2f8c642b37d7ab4076f
|
[] |
no_license
|
tonyelhabr/tonythemes
|
ec1fbc38007685fff66c95977c69cf192f632fa8
|
ae7e190f7cecdcfe51c413d980c206ac86bd5fac
|
refs/heads/master
| 2023-05-31T22:09:11.351206
| 2021-06-13T21:16:41
| 2021-06-13T21:16:41
| 372,654,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
geom_image.R
|
#' @importFrom ggimage geom_image
#' @importFrom ggplot2 annotate
annotate_img <- function(..., .name) {
path <- system.file('extdata', sprintf('%s.png', .name), package = 'tonythemes', mustWork = TRUE)
list(
ggimage::geom_image(
image = path,
...
)
)
}
#' Annotate Nick Wan
#'
#' Annotate Nick Wan on ggplot
#' @export
annotate_nickwan <- function(...) {
annotate_img(..., .name = 'nick-wan')
}
#' Annotate Meg Risdal
#'
#' Annotate Meg Risdal on ggplot
#' @export
annotate_megrisdal <- function(...) {
annotate_img(..., .name = 'meg-risdal')
}
|
8aa721fdb97907ea731252e0f707c74c307b1bc4
|
fde40765438f8e1e70d8623a4ed0eb7fee6f7e8b
|
/R/corpus_toolbox.R
|
37b91d505ad1530e75606a601debca81cd29d3c1
|
[
"MIT"
] |
permissive
|
yjunechoe/junebug
|
381ed27647f0d215d01fa002a6a745c6eddb85d6
|
cb4db89df273fc689d98db2327e6478361ff3e32
|
refs/heads/master
| 2023-06-14T21:40:21.937869
| 2021-07-08T16:56:34
| 2021-07-08T16:56:34
| 312,421,966
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 445
|
r
|
corpus_toolbox.R
|
#' Reconstruct utterances from observations of tokens in tidy format
#'
#' @param data A data frame of tokens
#' @param token_col Name of column of tokens
#' @param ... Grouping variables
#'
#' @return A data frame of grouping variables and utterances
#' @export
tokens_flatten <- function(data, token_col, ...) {
data %>%
dplyr::group_by(...) %>%
dplyr::summarize(utterance = paste({{token_col}}, collapse = " "), .groups = 'drop')
}
|
755d4800b2d132b9a42b775555b5824a03dd08d2
|
33efeec39033156d7b598f8989f82fcf810db812
|
/man/query_pa_dist.Rd
|
8cff4bc6464e24ec2aadf81e1bcd0c2eb1357dbd
|
[] |
no_license
|
johnchower/oneD7
|
76b4712de0bb89fa70246880b69d7c9a1d90a7fa
|
0ffcf86db58ddbe80330ac5185a7fc14c355545e
|
refs/heads/master
| 2021-01-11T20:39:07.943924
| 2017-03-08T23:11:30
| 2017-03-08T23:11:30
| 79,161,189
| 0
| 0
| null | 2017-02-23T19:02:14
| 2017-01-16T21:28:54
|
R
|
UTF-8
|
R
| false
| true
| 373
|
rd
|
query_pa_dist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_doc.r
\docType{data}
\name{query_pa_dist}
\alias{query_pa_dist}
\title{A string containing the platform action distribution query}
\format{A length-one character vector.}
\usage{
query_pa_dist
}
\description{
A string containing the platform action distribution query
}
\keyword{datasets}
|
6a1e150ab0db78eaaa0e25027b90f83f43ec699a
|
e55d1f014e98ad5bab2aad1a8d1322d66a1a93d4
|
/cachematrix.R
|
0781f66fe688f80ac116a59d959e42906c1f1cab
|
[] |
no_license
|
biffster/ProgrammingAssignment2
|
c7d175b83323ede7a5fb8e565a9d0aa4e6c38b29
|
cba1676c6a8a7bc0b5f631630f059e02ee6f620b
|
refs/heads/master
| 2021-01-18T11:08:08.485668
| 2015-08-23T17:50:31
| 2015-08-23T17:50:31
| 41,227,161
| 0
| 0
| null | 2015-08-22T22:47:43
| 2015-08-22T22:47:43
| null |
UTF-8
|
R
| false
| false
| 1,713
|
r
|
cachematrix.R
|
## Programming Assignment 2
## Michael Fierro
## August 23, 2015
## This assignment solution contains major sections of code from the example
## given in the assignment description examples at:
## https://class.coursera.org/rprog-031/human_grading/view/courses/975105/assessments/3/submissions
## The following two functions work in conjunction to compute the inverse of a
## passed matrix. A cache is consulted on each run: if a value already exists in
## the cache for the passed matrix, that value is returned instead of being
## re-computed.
## makeCacheMatrix creates a matrix that contains the cache of the previous
## matrix inverse computations. It makes available methods for the main function
## to read and write to the cache.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(mean) m <<- mean
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## cachematrix does the heavy lifting of the pair. It first searches the matrix
## cache to see if the computed data is already there. If the cache is valid, that
## value is returned. If not, then this function uses solve() to compute the
## new value, writes this to the cache, and then returns the value.
cachematrix <- function(x = matrix(), ...) {
#class(x)
m <- x$getmatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmatrix(m)
m
}
|
088a139aad29c92c3fec367121e3180a9bccd465
|
271abfff6e1066408334e5e5f633620c50ee6a81
|
/plot3.R
|
9ebeb97494f5da33c838fa9f71cc81c547c1b509
|
[] |
no_license
|
lzyempire/ExData_Plotting1
|
abf160709b261388ed3786d00cb637a9483f16e1
|
f6d7aaa06f2eabd35d85eeb03b23a98297a58e97
|
refs/heads/master
| 2020-03-25T18:15:55.426533
| 2018-08-20T14:06:28
| 2018-08-20T14:06:28
| 144,021,176
| 0
| 0
| null | 2018-08-08T13:56:09
| 2018-08-08T13:56:08
| null |
UTF-8
|
R
| false
| false
| 947
|
r
|
plot3.R
|
hpc <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE)
##The file was downloaded to working direction, all variables setted to Characters rather than Factors.
hpc_day <- subset(hpc, hpc$Date == "2/2/2007"|hpc$Date == "1/2/2007")
##Set Date as character, then subset 2007-02-01 and 2007-02-02
hpc_time_string <- paste(hpc_day$Date, hpc_day$Time)
hpc_time <- strptime(hpc_time_string, format = "%d/%m/%Y %H:%M:%S")
##Got the time value
dev.copy(png)
png("plot3.png")
plot(hpc_time, hpc_day$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black")
lines(hpc_time, hpc_day$Sub_metering_2, col = "red")
lines(hpc_time, hpc_day$Sub_metering_3, col = "blue")
##plot 3 pairs of x-y plots
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3") )
##plot 3 pairs of legends
dev.off()
|
c567128c6f29311978e846d995cef449c017d20e
|
60d40635d000c7a7ef0b8774da34ab3c29d6502e
|
/misc/visu/draw_histogram_wait_fromshell.R
|
7728dbc9bb74dcbe18ec696ed76cc30ab91bd904
|
[] |
no_license
|
obps/obps
|
8d6ce068ab5b802937ad6b8105367703105e4ed5
|
01df6619cc3d96fe821a6650979fa9f8031e9bdb
|
refs/heads/master
| 2020-12-31T06:47:01.030245
| 2017-03-31T07:57:56
| 2017-03-31T07:57:56
| 86,603,881
| 1
| 0
| null | 2017-03-29T16:16:07
| 2017-03-29T16:16:07
| null |
UTF-8
|
R
| false
| false
| 1,539
|
r
|
draw_histogram_wait_fromshell.R
|
#!/usr/bin/env Rscript
library(docopt)
library(ggplot2)
'usage: tool.R <input> ... [--mean=<output1>] [--max=<output2>]
tool.R -h | --help
options:
<input> The input data.
--mean=<output> Output file [Default: tmp1.pdf]
--max=<output> Output file [Default: tmp2.pdf]
' -> doc
args<- docopt(doc)
df=data.frame()
dfm=data.frame()
for (filename in args$input) {
data=read.csv(filename,sep=' ',header=FALSE)
names(data)=c('meanwait','maxwait','name')
data$type=filename
df=rbind(df,data)
dfm=rbind(dfm,data.frame(meanmax=mean(data$maxwait),meanmean=mean(data$meanwait),type=filename))
}
bwmean<-(max(df$meanwait)-min(df$meanwait))/30
pmeanwait=ggplot(df,aes(x=meanwait,fill=type,color=type))+
geom_histogram(aes(y=(..density..)),position="dodge", binwidth=bwmean)+
geom_point(data=dfm,aes(y=-0.0001, x=meanmean,fill=type))+
geom_vline(data=dfm,aes(xintercept=meanmean,color=type,fill=type))+
geom_density(alpha=0.1,aes(color=type,fill=type))+
xlab("Average Waiting Time")+
ylab("Proportion of experiments")
bwmax<-(max(df$maxwait)-min(df$maxwait))/30
pmaxwait=ggplot(df,aes(x=maxwait,fill=type))+
geom_histogram(aes(y=(..density..)),position="dodge", binwidth=bwmax)+
geom_point(data=dfm,aes(y=-0.0001, x=meanmax,fill=type))+
geom_vline(data=dfm,aes(xintercept=meanmax,color=type,fill=type))+
xlab("Max Waiting Time")+
ylab("Proportion of experiments")
pdf(file=args$mean,width=20,height=7)
pmeanwait
pdf(file=args$max,width=20,height=7)
pmaxwait
|
202e069ad531f9e15e92ef46438e2f061a6666cc
|
686388fce3a84c98f52d5b934a2309aa93b9e0fb
|
/cachematrix.R
|
c186a3098b1a48e335e68f2c36ae4a9412e1d44f
|
[] |
no_license
|
jwu125/ProgrammingAssignment2
|
861c5e4d8e3a20dc67d577518e665649a164a134
|
22da0c246df1942de5a994aedfab4f40282e813f
|
refs/heads/master
| 2023-08-31T18:48:55.616253
| 2021-09-20T12:06:36
| 2021-09-20T12:06:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 945
|
r
|
cachematrix.R
|
## These functions are designed to cache potentially time-consuming computations.
## They will use cache to give already calculated data
## rather then recalculating it.
## This function creates a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- funtion() x
setinv <- function(Inv) inv <<- Inv
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function calculates the inverse of the matrix.
## If it is already calculated, it will be retrieved from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x&getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
32b38e77c604797e4d2863876ccca2a7b35fdec1
|
50c137bee0fa6a4d6a1172eee9a03352f86b823f
|
/powers-master/man/boxcox.Rd
|
ffb74e7c5e4877bd7c7a5138f06f5514546ec727
|
[] |
no_license
|
STAT545-UBC-hw-2018-19/hw07-divita95
|
80ba6f5a45bc11eb08642d0187fbfd1eae2458a4
|
c74adba4f1639fa8d72218a60a0201173e53b47b
|
refs/heads/master
| 2020-04-05T22:09:13.535294
| 2018-11-15T20:54:35
| 2018-11-15T20:54:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 483
|
rd
|
boxcox.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/newfunc.R
\name{boxcox}
\alias{boxcox}
\title{Apply Box-Cox Power Transformations}
\usage{
boxcox(x, p)
}
\arguments{
\item{x}{numeric vector to transform, p power (0 = log); if p is a vector then a matrix of transformed values with columns labelled by powers will be returned.}
}
\value{
a vector or matrix of transformed values.
}
\description{
Compute the Box-Cox power transformation of a variable.
}
|
8d2ff39b21bc3d4f0ecabb043c96a5aea2700b9a
|
5e65f58f231b331ba0cddb512398e39cda3a9a67
|
/mathematical_programming_research_methods/Assignment2/codes/Exercise1/choose_centroids.R
|
07c2a3cf328b69bd0b7a118b2d42be3c5fcc7668
|
[] |
no_license
|
cwkprojects/myprojects
|
719644297fbf8c9269f9e3e440be9988a859df57
|
0bed4cd790cf4e4fa18d4683afadfee400ab7b33
|
refs/heads/master
| 2021-06-20T20:57:39.324961
| 2017-08-02T18:56:32
| 2017-08-02T18:56:32
| 98,444,604
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 922
|
r
|
choose_centroids.R
|
randCentroids <- function(X, n_centroids){
# Return n_centroids amount of centroids from the Data Frame X
# initialize an empty matrix
centroids <- matrix(0, n_centroids, ncol(X))
# create as many centroids as clusters we want to have in our algorithm
# for(i in 1:n_centroids){
# # initialize the random points at each iteration
# cent <- NULL
# # select k random points
# cent <- (X[sample(nrow(X), n_centroids),])
# # store the x and y values of each centroid in the array
# for(j in 1:ncol(X)){
# centroids[i,j] <- sum(cent[,j])/n_centroids
# }
# }
n <- ncol(X)
for(j in 1:n){
minJ <- min(X[,j])
maxJ <- max(X[,j])
rangeJ <- maxJ - minJ
centroids[,j] <- minJ + rangeJ * runif(n_centroids)
# print(centroids[,j])
# print(centroids)
}
# return the array with all the x and y coordinates of the initial centroids
return(centroids)
}
|
09579c9588771105307da11753b40bb6f7c35c04
|
93912bf1f51b016e68d29d436d60c3d91314a3fc
|
/rProgrammingWeek2.R
|
4ec558898e5fe6319d078265cda217876ea50a07
|
[] |
no_license
|
dslab123/coursera_datasciencebasic
|
8d52e6232b5f112c1197c1acdf909207293ad100
|
0d45eea1ba44c6e5062d6a346b4d05cf0ca2000a
|
refs/heads/master
| 2021-01-22T08:27:46.430858
| 2016-08-18T22:27:25
| 2016-08-18T22:27:25
| 92,615,417
| 0
| 0
| null | 2017-05-27T18:02:47
| 2017-05-27T18:02:47
| null |
UTF-8
|
R
| false
| false
| 2,170
|
r
|
rProgrammingWeek2.R
|
#directory = /Users/mooncalf/Dropbox/skb/coursera/datasciencecoursera/specdata/
pollutantmean <- function(directory = '/Users/mooncalf/Dropbox/skb/coursera/datasciencecoursera/specdata/', pollutant = 'nitrate', id = 1:332){
pollutantVector = c()
for (i in id){
if (i < 10){
padding <- "00"
}else if (i >=10 && i < 100){
padding <- "0"
}else{
padding <- ""
}
filename <- paste(directory, padding, i, ".csv", sep="")
rawdata <- read.csv(filename)
pollutantdata <- rawdata[complete.cases(rawdata),pollutant]
pollutantVector <- c(pollutantVector, pollutantdata)
}
mean(pollutantVector)
}
#directory = /Users/mooncalf/Dropbox/skb/coursera/datasciencecoursera/specdata/
complete <- function(directory = '/Users/mooncalf/Dropbox/skb/coursera/datasciencecoursera/specdata/', id = 1:332){
nobsdf <- data.frame(matrix(ncol=2, nrow=length(id)))
names(nobsdf) = c("id", "nobs")
for (i in id){
if (i < 10){
padding <- "00"
}else if (i >=10 && i < 100){
padding <- "0"
}else{
padding <- ""
}
filename <- paste(directory, padding, i, ".csv", sep="")
rawdata <- read.csv(filename)
cleandata <- rawdata[complete.cases(rawdata), ]
this_nobs <- nrow(cleandata)
nobsdf$id[i] = i
nobsdf$nobs[i] = this_nobs
}
nobsdf
}
#directory = /Users/mooncalf/Dropbox/skb/coursera/datasciencecoursera/specdata/
corr <- function(directory = '/Users/mooncalf/Dropbox/skb/coursera/datasciencecoursera/specdata/', threshold=114){
corr_vector <- c()
#nobsdf <- data.frame(matrix(ncol=2, nrow=length(id)))
#names(nobsdf) = c("id", "nobs")
id <- 1:332
for (i in id){
if (i < 10){
padding <- "00"
}else if (i >=10 && i < 100){
padding <- "0"
}else{
padding <- ""
}
filename <- paste(directory, padding, i, ".csv", sep="")
rawdata <- read.csv(filename)
cleandata <- rawdata[complete.cases(rawdata), ]
this_nobs <- nrow(cleandata)
if (this_nobs >= threshold){
corr_vector <- c(corr_vector,cor(cleandata$nitrate, cleandata$sulfate))
}
}
corr_vector
}
|
ebf6ad1857fdb2abc9ab555a81c38982a23a6385
|
5910d75f4cc3255195bfa5b3edb4cdbbcd982ddb
|
/tests/testthat/test-plots_APCsurface.R
|
3fa61bcc293b560010b7af5301509c6303b987be
|
[
"MIT"
] |
permissive
|
bauer-alex/APCtools
|
fddbdb15f20c20af07161c6c660fcf807937b4d3
|
f0a1b188007a45dbe3aaba16fae61df2b6faf311
|
refs/heads/main
| 2023-09-01T15:49:33.593873
| 2023-08-29T07:57:07
| 2023-08-29T07:57:07
| 430,766,605
| 20
| 3
|
MIT
| 2022-04-26T15:02:38
| 2021-11-22T15:40:42
|
R
|
UTF-8
|
R
| false
| false
| 3,278
|
r
|
test-plots_APCsurface.R
|
test_that("plot_APCheatmap", {
testthat::skip_if_not_installed("mgcv")
data(drug_deaths)
# plot hexamap of observed data
gg1 <- plot_APCheatmap(dat = drug_deaths, y_var = "mortality_rate")
gg2 <- plot_APCheatmap(dat = drug_deaths, y_var = "mortality_rate",
bin_heatmap = FALSE,
apc_range = list("cohort" = 1980:2010))
gg3 <- plot_APCheatmap(dat = drug_deaths, y_var = "mortality_rate",
markLines_list = list("age" = c(20,70),
"period" = c(1990,2010),
"cohort" = c(1985,1993)),
apc_range = list("cohort" = 1980:2010))
gg4 <- plot_APCheatmap(dat = drug_deaths, y_var = "mortality_rate",
markLines_list = list("age" = c(20,70),
"period" = c(1990,2010),
"cohort" = c(1985,1993)),
apc_range = list("cohort" = 1980:2010),
plot_CI = FALSE)
expect_s3_class(gg1, class = c("gg","ggplot"))
expect_s3_class(gg2, class = c("gg","ggplot"))
expect_s3_class(gg3, class = c("gg","ggplot"))
expect_s3_class(gg4, class = c("gg","ggplot"))
# plot heatmap of smoothed structure
model <- gam(mortality_rate ~ te(period, age), data = drug_deaths)
drug_deaths$mortality_rate <- drug_deaths$mortality_rate + 1
model_logLink <- bam(mortality_rate ~ te(period, age),
family = Gamma(link = "log"), data = drug_deaths)
gg1 <- plot_APCheatmap(dat = drug_deaths, model = model)
gg2 <- plot_APCheatmap(dat = drug_deaths, model = model_logLink)
gg3 <- plot_APCheatmap(dat = drug_deaths, model = model_logLink,
method_expTransform = "delta")
expect_s3_class(gg1, class = c("gg","ggplot"))
expect_s3_class(gg2, class = c("gg","ggplot"))
expect_s3_class(gg3, class = c("gg","ggplot"))
})
test_that("plot_APChexamap", {
testthat::skip_if_not_installed("mgcv")
data(travel)
data(drug_deaths)
# helper functions
expect_identical(round(compute_xCoordinate(period_vec = c(1980,1999)), 2),
c(1714.73,1731.18))
expect_identical(compute_yCoordinate(period_vec = c(1990, 1999), age_vec = c(20,50)),
c(-975.0, -949.5))
# plot hexamap of observed data
expect_null(plot_APChexamap(dat = drug_deaths, y_var = "mortality_rate"))
expect_null(plot_APChexamap(dat = drug_deaths, y_var = "mortality_rate",
y_var_logScale = TRUE, color_range = c(1,50),
apc_range = list("cohort" = 1980:2010)))
expect_null(plot_APChexamap(dat = travel, y_var = "mainTrip_distance",
y_var_logScale = TRUE))
# error when 0 values are logarithmized
expect_error(plot_APChexamap(dat = drug_deaths, y_var = "mortality_rate",
y_var_logScale = TRUE, color_range = c(0,50)))
# plot hexamap of smoothed structure
model <- gam(mortality_rate ~ te(period, age), data = drug_deaths)
expect_null(plot_APChexamap(dat = drug_deaths, model = model))
})
|
82689b6c13f7e9e27c073cb432d59e2bda3169e7
|
b9cd4adc1809f1c34fb96598746ab68eb2411459
|
/R/mcmc-MCMC Builder and Step Sampler.R
|
89648040a8e0cd1f8066d19e29464437c3b360ad
|
[] |
no_license
|
GBarnsley/BinomialEpidemicsNimble
|
693c46597639147f9b4062f334ed5e65532cb194
|
2eac5b36e43a910ec6485f23808e3cec5bb3d5a6
|
refs/heads/master
| 2023-04-02T17:29:45.959734
| 2021-01-05T14:34:14
| 2021-01-05T14:34:14
| 292,693,073
| 0
| 0
| null | 2021-01-05T14:34:15
| 2020-09-03T22:28:19
|
R
|
UTF-8
|
R
| false
| false
| 697
|
r
|
mcmc-MCMC Builder and Step Sampler.R
|
#' Generic function that calls the model specific MCMC set-up methods.
#' The naive version of the NpmDelta Algorithm is generated here.
#' @param epiModel An object of the class one of the specific epidemic model
#' @param hyperParameters A list of lists of the hyper-parameters for the epidemic model and MCMC
#' @return An object of the given epidemic class with a compiled MCMC
#' @export
buildMCMCInternal <- function(epiModel, hyperParameters, showCompilerOutput){
sampler <- nimbleFunction(
contains = sampler_BASE,
setup = stepSampler_setup,
run = stepSampler_run,
methods = list(
reset = function() {}
)
)
UseMethod("buildMCMCInternal")
}
|
00dd8e180db23b00b00fc05c12e657d9a3a66d2f
|
80f01d7fa984d7bf1150846f4282aa036f56129b
|
/run_analysis.R
|
a3a4363acdf0e2096cefc2450d3fc08995a71503
|
[] |
no_license
|
SP10000/Getting_and_Cleaning_Data
|
e2ce2d43885d2255d4fa555b9b19f1e4b2ec95e7
|
fd713abfd6aa53bc56e3c0eec7fe72b210630e91
|
refs/heads/master
| 2020-03-18T16:46:17.825892
| 2018-05-27T09:43:42
| 2018-05-27T09:43:42
| 134,985,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,982
|
r
|
run_analysis.R
|
### Activity 1: Merge the training and the test sets to create one data set ###
### First the datafiles should be downloaded ###
fileDirectory <- "C://Users//648700//Desktop//Coursera"
setwd(fileDirectory)
if(!file.exists("./datapacks")){dir.create("./datapacks")}
FileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(FileUrl,destfile="./datapacks/Datapack.zip")
###Now unzip the datapack to the right folder ###
unzip(zipfile="./datapacks/Datapack.zip",exdir="./datapacks")
###Load 'data.table' package###
library(data.table)
### Read the data into R ###
### First read Testing tables ###
subject_test <- read.table("./datapacks//UCI HAR Dataset//test//subject_test.txt")
x_test <- read.table("./datapacks//UCI HAR Dataset//test//X_test.txt")
y_test <- read.table("./datapacks//UCI HAR Dataset//test//y_test.txt")
### Secondly read the Training tables ###
subject_train <- read.table("./datapacks//UCI HAR Dataset//train//subject_train.txt")
x_train <- read.table("./datapacks//UCI HAR Dataset//train//X_train.txt")
y_train <- read.table("./datapacks//UCI HAR Dataset//train//y_train.txt")
### Now read activity table ###
activity_labels = read.table("./datapacks/UCI HAR Dataset/activity_labels.txt")
### Finally, read the features table ###
features <- read.table("./datapacks/UCI HAR Dataset/features.txt")
### Giving the tables the right columnnames ###
### Since all data is now read, its should be made a more combined and tidy dataset ###
### First make the variables of x_test and x_train more concrete, by combining with the features table ###
colnames(x_test) <- features[,2]
colnames(x_train) <- features[,2]
### Secondly, the y_test and y_train should have a column name to show that it is representing an ID ###
colnames(y_test) <- "ActivityIDcode"
colnames(y_train) <- "ActivityIDcode"
colnames(subject_test) <- "SubjectIDcode"
colnames(subject_train) <- "SubjectIDcode"
### Finally, the activitylabels table should have column names ###
colnames(activity_labels) <- c("ActivityIDcode", "ActivityDescription")
### Now the seperated tables should be combined to one table for train and test, after which they can be completely combined###
total_test <- cbind(y_test, subject_test, x_test)
total_train <- cbind(y_train, subject_train, x_train)
### Making it one final table ###
Complete_table <- rbind(total_train, total_test)
### Activity 2: Extract only the measurements on the mean and standard deviation for each measurement.###
### First each measurment should be defined ###
Measurements <- colnames(Complete_table)
### We only want the mean and standard deviation ###
Mean_and_standard_deviation <- (grepl("ActivityIDcode", Measurements) |
grepl("SubjectIDcode", Measurements) |
grepl("Mean", Measurements) |
grepl("Standard_Deviation", Measurements)
)
### Storing the result in a collection variable ###
Mean_and_std_collection <- Complete_table[ , Mean_and_standard_deviation == TRUE]
### Activity 4: Using descriptive activity names to name the activities in the data set ###
Total_Set_With_Activity_Label <- merge(Mean_and_std_collection, activity_labels,
by='ActivityIDcode',
all.x=TRUE)
### Activity 5: Creating a second, independent tidy data set with the average of each variable for each activity and each subject ###
New_data_set <- aggregate(. ~SubjectIDcode + ActivityIDcode, Total_Set_With_Activity_Label, mean)
New_data_set_sorted <- New_data_set[order(New_data_set$SubjectIDcode, New_data_set$ActivityIDcode),]
### Finally, write the new data set ###
write.table(New_data_set_sorted, "New_data_set_sorted.txt", row.names= FALSE)
|
cd0ddd35956ccc41e45c6b6b044d4ebfa2eab05b
|
da0221ddcae8b085bd8a39ff50842510049d21ed
|
/R/coverage.r
|
2e7219e5f7369b7887ac8c281accebe132fa414b
|
[] |
no_license
|
psmits/cosmo_prov
|
806538919d62ef10e2e06ed2360eed9a5ed39d4f
|
fd2cd5344a4ca8d6d22eae630acfb6896e2aa531
|
refs/heads/master
| 2020-04-11T08:05:33.360722
| 2016-08-15T18:31:11
| 2016-08-15T18:31:11
| 12,221,765
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
coverage.r
|
#' Good's estimate of frequency coverage
#'
#' Coverage is an estimator of the amount of observed diversity of some
#' set of categorical variables. For example, given a distribution of
#' taxonomic abundances, it is possible to determine how much of the possible
#' set has been sampled.
#'
#' @param ab table of total observed abundances
#' @return
#' @export
#' @keywords
#' @author
#' @references
#' @examples
coverage <- function(ab) {
oo <- sum(ab)
ss <- sum(ab == 1)
if (ss == oo) ss = oo - 1
uu <- 1 - ss / oo
uu
}
|
becc301a1152c5d463b64830cbaac8c74be2509e
|
7505da6d4b338f172cac1af24d692302d42be6bc
|
/man/NLWrapper.Run.Rd
|
5e0b7044ea4087f79964140f15cb543e54959efc
|
[
"MIT"
] |
permissive
|
antonio-pgarcia/evoper
|
367da295fd704bbde96370c990b8be56d70879b5
|
5337eb8917ed851ffb5f916023d08de12bf281d1
|
refs/heads/master
| 2021-01-19T04:18:37.948801
| 2020-08-30T10:25:53
| 2020-08-30T10:25:53
| 61,146,979
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 418
|
rd
|
NLWrapper.Run.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/netlogo-helper.R
\name{NLWrapper.Run}
\alias{NLWrapper.Run}
\title{NLWrapper.Run}
\usage{
NLWrapper.Run(obj, r = 1, seed = c())
}
\arguments{
\item{obj}{The object retuned by \link{NLWrapper.Model}}
\item{r}{The number of replications}
\item{seed}{The collection of random seeds}
}
\description{
Executes a NetLogo Model using rNetLogo
}
|
b9c02fd14b8485e9cfecdd91bd048bd6a16c18f7
|
adb379ba53e72cffe0efcd091a9e06e729717bc7
|
/tests/testthat/test_track_interpolation.R
|
748a01991a8424b4d73148cf472b3f72b8b28aad
|
[] |
no_license
|
geanders/stormwindmodel
|
e7edaedeee9ecbafb7695a89e899ac423e71dea0
|
0b0a2906e729d448f3da17ef16612ea461206c0a
|
refs/heads/master
| 2022-10-08T17:52:09.271107
| 2022-09-20T17:50:05
| 2022-09-20T17:50:05
| 56,884,201
| 22
| 13
| null | 2021-05-17T20:06:44
| 2016-04-22T20:45:21
|
R
|
UTF-8
|
R
| false
| false
| 5,995
|
r
|
test_track_interpolation.R
|
library(tidyverse)
test_that("Interpolation works with North Atlantic storm", {
# Floyd
interp_track_floyd <- create_full_track(stormwindmodel::floyd_tracks[34:40, ],
tint = 3)
# Expectations are from 3-hourly IBTrACS data (expect long -77.49, which
# IBTRaCS interpolates to -77.65)
expected_interp_lats <- c(32.10, 32.95, 33.70, 34.53, 35.70, 36.81, 38.00,
39.36, 40.60, 41.45, 42.10, 42.74, 43.30)
expected_interp_longs <- c(-78.70, -78.27, -78.00, -77.49, -76.80, -76.08,
-75.30, -74.40, -73.50, -72.77, -72.10, -71.37,
-70.60)
expect_equal(round(interp_track_floyd$tclat), round(expected_interp_lats))
expect_equal(round(interp_track_floyd$tclon), round(expected_interp_longs))
# Katrina
interp_track_katrina <- create_full_track(stormwindmodel::katrina_tracks[22:26, ],
tint = 3)
# Expectations are from 3-hourly IBTrACS data
expected_interp_lats <- c(27.2, 27.67, 28.20, 28.81, 29.50, 30.27,
31.10, 31.87, 32.60)
expected_interp_longs <- c(-89.20, -89.45, -89.60, -89.62, -89.60, -89.60,
-89.60, -89.40, -89.10)
expect_equal(round(interp_track_katrina$tclat), round(expected_interp_lats))
expect_equal(round(interp_track_katrina$tclon), round(expected_interp_longs))
})
test_that("Interpolation works for Southern Atlantic storm", {
sample_track_1 <- tribble(
~ date, ~ latitude, ~ longitude, ~ wind,
"200403270000", -29.10, -44.90, 70,
"200403270600", -29.30, -45.60, 75,
"200403271200", -29.50, -46.60, 75,
"200403271800", -29.60, -47.40, 75,
"200403280000", -29.30, -48.40, 75
)
interp_track_1 <- create_full_track(hurr_track = sample_track_1, tint = 3)
# Expectations are from 3-hourly IBTrACS data (except -46.40 in
# IBTRaCS replaced with -46.60)
expected_interp_lats <- c(-29.10, -29.20, -29.30, -29.41, -29.50,
-29.59, -29.60, -29.48, -29.30)
expected_interp_longs <- c(-44.90, -45.24, -45.60, -45.98, -46.60,
-46.88, -47.40, -47.89, -48.40)
expect_equal(round(interp_track_1$tclat), round(expected_interp_lats))
expect_equal(round(interp_track_1$tclon), round(expected_interp_longs))
})
test_that("Interpolation works for Western Pacific storm", {
sample_track_1 <- tribble(
~ date, ~ latitude, ~ longitude, ~ wind,
"202008311200", 22.90, 145.80, 25,
"202008311800", 22.10, 145.31, 35,
"202009010000", 21.80, 144.50, 35,
"202009010600", 20.90, 144.40, 39,
"202009011200", 20.50, 144.10, 39
)
interp_track_1 <- create_full_track(hurr_track = sample_track_1, tint = 3)
# Expectations are from 3-hourly IBTrACS data (except 145.31 in
# IBTRaCS replaced with 145.51)
expected_interp_lats <- c(22.90, 22.44, 22.10, 21.96, 21.80,
21.36, 20.90, 20.65, 20.50)
expected_interp_longs <- c(145.80, 145.51, 144.90, 144.64, 144.50,
144.44, 144.40, 144.31, 144.10)
expected_interp_vmax <- c(25, 30, 35, 35, 35, 37, 39, 39, 39) %>%
weathermetrics::knots_to_speed(unit = "mps", round = 1)
expect_equal(round(interp_track_1$tclat), round(expected_interp_lats))
expect_equal(round(interp_track_1$tclon), round(expected_interp_longs))
expect_equal(round(interp_track_1$vmax), round(expected_interp_vmax))
})
# Harold crossed the international dateline. Try with both IBTrACs conventions
# (goes above 180) and other (resets at 180 to -180).
test_that("Interpolation works with IBTrACS convention across international dateline", {
sample_track_1 <- tribble(
~ date, ~ latitude, ~ longitude, ~ wind,
"202004071200", -17.40, 174.00, 109,
"202004071800", -18.30, 175.80, 109,
"202004080000", -18.90, 177.70, 119,
"202004080600", -19.90, 179.70, 119,
"202004081200", -20.60, 181.90, 115,
"202004081800", -21.90, 184.40, 109,
"202004090000", -23.10, 186.50, 109,
"202004090600", -24.60, 189.30, 93,
"202004091200", -25.90, 192.30, 80
)
interp_track_1 <- create_full_track(hurr_track = sample_track_1, tint = 3)
sample_track_2 <- tribble(
~ date, ~ latitude, ~ longitude, ~ wind,
"202004071200", -17.40, 174.00, 109,
"202004071800", -18.30, 175.80, 109,
"202004080000", -18.90, 177.70, 119,
"202004080600", -19.90, 179.70, 119,
"202004081200", -20.60, -178.1, 115,
"202004081800", -21.90, -175.6, 109,
"202004090000", -23.10, -173.5, 109,
"202004090600", -24.60, -170.7, 93,
"202004091200", -25.90, -167.7, 80
)
interp_track_2 <- create_full_track(hurr_track = sample_track_2, tint = 3)
# Expectations are from 3-hourly IBTrACS data (with conversion to make all
# longitudes between -180 and 180)
expected_interp_lats <- c(-17.40, -17.83, -18.30, -18.61, -18.90, -19.28,
-19.90, -20.11, -20.60, -21.23, -21.90, -22.48,
-23.10, -23.84, -24.60, -25.23, -25.90)
expected_interp_longs <- c(174.00, 174.86, 175.80, 176.74, 177.70,
178.68, 179.70, -179.24, -178.10, -176.84,
-175.60, -174.57, -173.50, -172.17, -170.70,
-169.23, -167.70)
expected_interp_vmax <- c(109, 109, 109, 114, 119, 119, 119, 117, 115, 112,
109, 109, 109, 101, 93, 86, 80) %>%
weathermetrics::knots_to_speed(unit = "mps", round = 1)
expect_equal(round(interp_track_1$tclat), round(expected_interp_lats))
expect_equal(round(interp_track_1$tclon), round(expected_interp_longs))
expect_equal(round(interp_track_1$vmax), round(expected_interp_vmax))
expect_equal(round(interp_track_2$tclat), round(expected_interp_lats))
expect_equal(round(interp_track_2$tclon), round(expected_interp_longs))
expect_equal(round(interp_track_2$vmax), round(expected_interp_vmax))
})
|
27dd65fc9cfbba76eac8ac14cd332aa6cad2da44
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sppmix/examples/plot_MPP_probs.Rd.R
|
343e0252d57fe4e6f70c52a1cacde917a60c71ba
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 413
|
r
|
plot_MPP_probs.Rd.R
|
library(sppmix)
### Name: plot_MPP_probs
### Title: Plot the mark probabilities of a marked point pattern
### Aliases: plot_MPP_probs
### ** Examples
## No test:
newMPP=rMIPPP_cond_loc(gammas=c(.1,.2,.5))
plot(newMPP$surf,main="True IPPP intensity surface for the locations")
genMPP=newMPP$genMPP
newMPP$r
mpp_est <- est_MIPPP_cond_loc(genMPP,newMPP$r, hyper=0.2)
plot_MPP_probs(mpp_est)
## End(No test)
|
4919ca116de28c3dacfca699e98993083a100e5a
|
811399b99a474b2247fe31a29daee5a50d221077
|
/general_r/parallel_ADA.R
|
ea62de1071c3aeb150d73e1d66f597c5ebc2d122
|
[] |
no_license
|
peterwu19881230/R_Utility
|
b528d152f12a81a8a7d6baa3418a8e0d22b8b45b
|
1ea244ad3c711ddf71f65bb2fdb168996b68c470
|
refs/heads/master
| 2020-08-15T16:08:09.833905
| 2019-10-15T18:30:53
| 2019-10-15T18:30:53
| 215,368,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,944
|
r
|
parallel_ADA.R
|
#Parallel programming tested on ADA
#ssh peterwu19881230@ada.tamu.edu
#module load R/3.5.0-iomkl-2017b-recommended-mt
##Somthing below cause error on ADA (and will run for a very long time)
##if(!require(pacman)){
## install.packages("pacman")
## library(pacman)
##}
##pacman::p_load(tidyverse,xlsx,factoextra,pheatmap,ComplexHeatmap) #Note ggplot2 is loaded when factoextra is loaded
#Test 1
set.seed(0)
largeData=matrix(rnorm(10^4*10^2*10^2),nrow=10^2)
start.time=Sys.time()
nonpar=apply(largeData,MARGIN=1,FUN=median)
end.time=Sys.time()
end.time-start.time
#Tutorial on parallel apply family: http://gforge.se/2015/02/how-to-go-parallel-in-r-basics-tips/
library(parallel)
no_cores <- detectCores()-1
cl <- makeCluster(no_cores)
start.time=Sys.time()
par=parApply(cl=cl,X=largeData,MARGIN=1,FUN=median)
end.time=Sys.time()
end.time-start.time
#Test 2
set.seed(0)
largeData=rnorm(7914231)
n=c(3:22,27,28,31,41,47,48)
start.time = Sys.time()
set.seed(101)
randomPCC=list()
i=1
for(num in n){
randomPCC[[i]]=sapply(1:5000,FUN=function(iteration){
mean(abs(sample(largeData,num)))
})
i=i+1
}
end.time = Sys.time()
end.time - start.time #Time difference of 9.82577 mins (When I run R command line on ADA)
library(parallel)
no_cores <- detectCores()-1
cl <- makeCluster(no_cores)
##Must use clusterExort or specify within the anonymous function:
##Ref: https://stackoverflow.com/questions/10095956/parsapply-not-finding-objects-in-global-environment
clusterExport(cl,"largeData") #If multiple objs need to be recognized, concatnate the input to be: c("Obj1","Obj2","Obj3"...)
start.time = Sys.time()
set.seed(101)
randomPCC=list()
i=1
for(num in n){
clusterExport(cl,"num")
randomPCC[[i]]=parSapply(cl=cl,X=1:5000,FUN=function(iteration){
mean(abs(sample(largeData,num)))
})
i=i+1
}
end.time = Sys.time()
end.time - start.time #Time difference of 1.44726 mins (When I run R command line on ADA)
|
57bc4b87e99316a7eb13d9d89b877a16bbe3d329
|
e0c17401fbfb1f581e3eaab2a4cb27c6283c2dff
|
/plot1.R
|
26885aea74e3ceb7c3cca3400b1c072fa1403e0d
|
[] |
no_license
|
christopherskyi/ExData_Plotting1
|
b756eb85be5973e6564d096fac13bf01e4657ab3
|
a501028c7446d8edbad31db1adc2a1541e3e0134
|
refs/heads/master
| 2021-01-16T20:49:09.690163
| 2015-08-09T19:34:16
| 2015-08-09T19:34:16
| 40,267,850
| 0
| 0
| null | 2015-08-05T20:40:36
| 2015-08-05T20:40:36
| null |
UTF-8
|
R
| false
| false
| 2,926
|
r
|
plot1.R
|
# to avoid bugs when using both plyr and dplyr you should load plyr before dplyr.
library(lubridate)
library(dplyr)
library(data.table)
#############################################################################
# How to use this script:
#
# 1st) Create a folder called 'Data' in the same folder as this R script.
# 2nd) place the riginal data set, household_power_consumption.txt, in 'Data'
# 3rd) run the script -- but note: we're creating and saving a much smaller
# data set, and we'll be working with that
#############################################################################
# Ouch! household_power_consumption.txt a 2,000,000+ row dataset.
# However, we only need rows where the date is "2007-02-01" or "2007-02-02"
# Unfortunately, read.table() isn't designed to extract specific rows based on the value of a variable,
# so we're forced to read in the whole thing:
epc_full <- as.data.table(read.table('Data/household_power_consumption.txt', header = TRUE, sep = ";", stringsAsFactors = TRUE) )
#############################################################################
# Reduce epc_full to a data set containing rows where date = 2007-02-01" or "2007-02-02"
#############################################################################
# convert the date column into a Date class
epc_full$Date <- as.Date(epc_full$Date, format = "%d/%m/%Y")
# Reduce epc_full! Extract only dates 2007-02-01 and 2007-02-02
epc <- filter(epc_full,Date == "2007-02-01" | Date == "2007-02-02")
# Get this big dataset out of memory
rm(epc_full)
# Export this reduced set
write.table(epc, 'Data/epc.txt',row.name=FALSE, sep=';')
#############################################################################
# Create a histogram of the freq of Global Active Power
#############################################################################
# Use the reduced dataset
epc <- as.data.table(read.table('Data/epc.txt', header = TRUE, sep = ";", stringsAsFactors = TRUE) )
# create more readable variable names
epc.var.names <- colnames(epc)
epc.var.names[3] <- "Global Active Power (kilowatts)"
epc.var.names[4] <- "Global Reactive Power (kilowatts)"
setnames(epc,colnames(epc),epc.var.names)
# convert the data values from a Factor to a Date class
epc$Date <- as.Date(epc$Date)
# get all the weekdays
epc.weekdays <- weekdays(epc$Date)
# add weekdays to data set
epc <- mutate(epc, Weekday = epc.weekdays)
# this call includes 'epc$' in the x-axis label
# hist(epc$`Global Active Power (kilowatts)`, col = "red")
with(epc, hist(`Global Active Power (kilowatts)`, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red"))
# write histo to png file, in the same folder as this script
png(file="plot1.png",width=480,height=480)
with(epc, hist(`Global Active Power (kilowatts)`, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red"))
dev.off()
|
4a23d64638cc7dd76a78ae7dc2078a0ecc293712
|
63e5fc70d2e6233457fc9ad407d7e4984bfc8997
|
/man/get_carbon_increment.Rd
|
5763edcac4d31f60cb134928f291cb4867114dd7
|
[] |
no_license
|
Boffiro/hisafer
|
32f648f6aca222d01006d25da1b237846b13113e
|
8773fe3d5d2aa6d307af0088a6f6e79cc9a087d0
|
refs/heads/master
| 2023-05-06T17:53:22.060974
| 2020-10-16T09:48:48
| 2020-10-16T09:48:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 475
|
rd
|
get_carbon_increment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cycles.R
\name{get_carbon_increment}
\alias{get_carbon_increment}
\title{Get tree carbon increment from a hop object}
\usage{
get_carbon_increment(hop)
}
\arguments{
\item{hop}{An object of class hop or face.}
}
\value{
A tibble with extracted and calculated carbon increments.
}
\description{
Gets tree carbon increment from a hop object.
Used within hisafe cycle functions.
}
\keyword{internal}
|
6fd234d01e4d765eb30dff63e0569e5e5ff0fb3c
|
cff7a73825a6405ecb2b667beb4c607ed3358508
|
/thesis/sketchbook.R
|
e01298a791887d3d4e0a89211b4749fb75de202e
|
[] |
no_license
|
kmatusz/mgr
|
cc308a362d19bf1855bd7b346f161ac9c486dec1
|
40fa62b1834ae9228e5919b953e30899dc43fad5
|
refs/heads/master
| 2023-07-07T01:44:45.197192
| 2021-08-12T21:07:40
| 2021-08-12T21:07:40
| 246,847,687
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,517
|
r
|
sketchbook.R
|
plot_map <- function(variable, title){
print(1)
tm_shape(brazil_map) +
tm_borders()+
tm_shape(stats_per_microregion3) +
tm_polygons(col = variable,border.alpha = 0) +
tm_shape(brasil_cities_coords %>% arrange(-population) %>% head(10)) +
tm_symbols(size = 0.2,
col = "black",
border.lwd = NA,
alpha = 0.8) +
tm_text(text='city', just='top',size = 0.8) +
tm_layout(title= title, title.size = 0.9)
}
w1 <- plot_map('no_customers_per_10000_pop', 'No. customers per 10 thousand inhabitants')
tm_shape(brazil_map) +
tm_borders()+
tm_shape(stats_per_microregion3) +
tm_polygons(col = variable,border.alpha = 0,title=' ') +
tm_shape(brasil_cities_coords %>% arrange(-population) %>% head(10)) +
tm_symbols(size = 0.2,
col = "black",
border.lwd = NA,
alpha = 0.8) +
tm_text(text='city', just='top',size = 0.8) +
tm_layout(title= title, title.size = 0.9)
### DUMP ----
# jak zrobić grid
no_cells = 20
grid_geom=st_make_grid(df_map, n=c(no_cells,no_cells)) #Final number of cells
df_map_grid_0 = st_sf(grid_id=1:length(grid_geom), grid_geom)
# plot(df_map_grid_0)
intersection <- st_intersection(y = df_map_grid_0 %>% select(grid_id) , x = df_map)
intersection %>%
group_by(grid_id) %>%
count() %>%
rename(no_customers = n) %>%
mutate(no_customers = coalesce(log(no_customers), 0)) %>%
st_set_geometry(NULL) -> grid_points_counts
|
6a31526bac28acb10269fcbcb50b430dbbfede7b
|
61c091c21d06b7c61f35a24d4fe3d8882e9fb254
|
/man/wh_plot_proportion.Rd
|
3bb28e0ccdf9fcd9acbbf28f8c4a05bca131c2a9
|
[] |
no_license
|
pfmc-assessments/nwfscSurvey
|
b3be76b410bdc5dae168e84d2ee1a2c64c98e098
|
423800ecb91137cba1587ac19226a3ebb8d50c2d
|
refs/heads/main
| 2023-07-28T08:35:55.810331
| 2023-07-20T17:10:25
| 2023-07-20T18:17:33
| 26,344,817
| 4
| 2
| null | 2023-07-20T17:31:58
| 2014-11-08T00:38:17
|
R
|
UTF-8
|
R
| false
| true
| 1,839
|
rd
|
wh_plot_proportion.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_proportion.R
\name{wh_plot_proportion}
\alias{wh_plot_proportion}
\title{Save figures of proportions by depth and latitude using warehouse data}
\usage{
wh_plot_proportion(
data_catch,
data_bio,
dir = file.path(getwd(), "plots"),
bar_width = c("n", "equal")
)
}
\arguments{
\item{data_catch, data_bio}{Data frames returned from \code{\link[=pull_catch]{pull_catch()}} and
[pull_bio())], respectively. At least one of the arguments must be passed.
[pull_bio())]: R:pull_bio())}
\item{dir}{The directory where you would like the \code{.png} files to be saved.
The default is a directory called \code{"plots"} in your current working
directory.}
\item{bar_width}{A string of \code{"n"} or \code{"equal"}, where the former leads to
bar widths based on the number of observations contained in that group and
the latter leads to equally-sized bars for all bars with data. For groups
without any data, the width of the placeholder on the x axis will be
smaller than the width of the bars for groups with data regardless of which
option you choose. The default is to have bars of variable widths, i.e.,
\code{"n"}.}
}
\value{
Strings of the saved files.
}
\description{
Four figures in total are created and saved to the disk if both catch and
biological data, pulled from the data warehouse, are passed to \code{data_catch} and
\code{data_bio}, respectively. This function will only work with data that has the
standard column names of data pulled from the warehouse.
}
\examples{
\dontrun{
test <- wh_plot_proportion(catch_nwfsc_combo, bio_nwfsc_combo)
}
}
\seealso{
\itemize{
\item \code{\link[=plot_proportion]{plot_proportion()}}
\item \code{\link[purrr:map]{purrr::map()}}
}
}
\author{
Chantel R. Wetzel and Kelli F. Johnson
}
\concept{warehouse}
|
dd58774e37a5f69ff7fbaf5e4ec449744e3894d7
|
962859409041bdfede9dcb1db6baa281ba6021c1
|
/functions_for_solving.R
|
9555d97acac3b7235f9baef7da343c2c1bb503a9
|
[] |
no_license
|
bryla121/Efficiency-of-methods-of-solving-Rubik-s-cube-implemented-in-R
|
aeb7306025b61b381c3403949775f2258821f7aa
|
2f346c1f438405930ff8b3dec3bde7e3774b428a
|
refs/heads/main
| 2023-02-10T04:16:45.121501
| 2021-01-13T00:46:33
| 2021-01-13T00:46:33
| 326,741,932
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 140,548
|
r
|
functions_for_solving.R
|
library("cubing", lib.loc="~/R/win-library/3.6")
WR_cross <- function(x,c) ###WHITE-RED EDGE
{
if ( x$ep["UR"]== 5) #1
{
if (x$eo["UR"]==0)
{
x <- x
c <- c
# dobre miejsce i dobra orientacja
}
else
{
x <- move(x, moves = "R'UF'U'")
c = c+4
# dobre miejsce zla orientacja
}
}
else
{
if (x$ep["FR"]==5) #2
{
if (x$eo["FR"]==0)
{
x <- move(x, moves = "R")
c <- c+1
#orientacja dobra
}
else
{
x <- move(x, moves = "UF'U'")
c <- c+3
#orientacja zła
}
}
else if (x$ep["FL"]==5) #3
{
if (x$eo["FL"]==0)
{
x <- move(x, moves = "F2RF2")
c <- c+3
#orientacja dobra
}
else
{
x <- move(x, moves = "UFU'")
c <- c+3
}
}
else if (x$ep["BL"]==5) #4
{
if (x$eo["BL"]==0)
{
x <- move(x, moves = "B2R'B2")
c <- c+3
#orientacja dobra
}
else
{
x <- move(x, moves = "U'B'U")
c <- c+3
}
}
else if (x$ep["BR"]==5) #5
{
if (x$eo["BR"]==0)
{
x <- move(x, moves = "R'")
c <- c+1
#orientacja dobra
}
else
{
x <- move(x, moves = "U'BU")
c <- c+3
}
}
else if (x$ep["UF"]==5) #6
{
if (x$eo["UF"]==0)
{
x <- move(x, moves = "F'UFU'")
c <- c+4
#orientacja dobra
}
else
{
x <- move(x, moves = "FR")
c <- c+2
}
}
else if (x$ep["UL"]==5) #7
{
if (x$eo["UL"]==0)
{
x <- move(x, moves = "LU2L'U2")
c <- c+4
#orientacja dobra
}
else
{
x <- move(x, moves = "LUFU'")
c <- c+4
}
}
else if (x$ep["UB"]==5) #8
{
if (x$eo["UB"]==0)
{
x <- move(x, moves = "BU'B'U")
c <- c+4
#orientacja dobra
}
else
{
x <- move(x, moves = "B'R'")
c <- c+2
}
}
else if (x$ep["DR"]==5) #9
{
if (x$eo["DR"]==0)
{
x <- move(x, moves = "R2")
c <- c+1
#orientacja dobra
}
else
{
x <- move(x, moves = "D'F'RF")
c <- c+4
}
}
else if (x$ep["DF"]==5) #10
{
if (x$eo["DF"]==0)
{
x <- move(x, moves = "DR2'")
c <- c+2
#orientacja dobra
}
else
{
x <- move(x, moves = "F'RF")
c <- c+3
}
}
else if (x$ep["DL"]==5) #11
{
if (x$eo["DL"]==0)
{
x <- move(x, moves = "D2R2")
c <- c+2
#orientacja dobra
}
else
{
x <- move(x, moves = "DF'RF")
c <- c+4
}
}
else if (x$ep["DB"]==5) #12
{
if (x$eo["DB"]==0)
{
x <- move(x, moves = "D'R2")
c <- c+2
#orientacja dobra
}
else
{
x <- move(x, moves = "BR'B'")
c <- c+3
}
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
WG_cross <- function(x,c) ###WHITE-GREEN EDGE
{
if ( x$ep["UF"]== 6) #1
{
if (x$eo["UF"]==0)
{
x <- x
c <- c
# dobre miejsce i dobra orientacja
}
else
{
x <- move(x, moves = "FU'RU")
c <- c + 4
# dobre miejsce zla orientacja
}
}
else
{
if (x$ep["UR"]==6) #2
{
if (x$eo["UR"]==0)
{
x <- move(x, moves = "RU'R'U")
c <- c + 4
#orientacja dobra
}
else
{
x <- move(x, moves = "R'F'")
c <- c + 2
#orientacja zła
}
}
else if (x$ep["UL"]==6) #3
{
if (x$eo["UL"]==0)
{
x <- move(x, moves = "L'ULU'")
c <- c + 4
#orientacja dobra
}
else
{
x <- move(x, moves = "LF")
c <- c + 2
}
}
else if (x$ep["UB"]==6) #4
{
if (x$eo["UB"]==0)
{
x <- move(x, moves = "BU2B'U2")
c <- c + 4
#orientacja dobra
}
else
{
x <- move(x, moves = "BULU'")
c <- c + 4
}
}
else if (x$ep["FR"]==6) #5
{
if (x$eo["FR"]==0)
{
x <- move(x, moves = "U'RU")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "F'")
c <- c + 1
}
}
else if (x$ep["FL"]==6) #6
{
if (x$eo["FL"]==0)
{
x <- move(x, moves = "UL'U'")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "F")
c <- c + 1
}
}
else if (x$ep["BL"]==6) #7
{
if (x$eo["BL"]==0)
{
x <- move(x, moves = "ULU'")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "U2B'U2")
c <- c + 3
}
}
else if (x$ep["BR"]==6) #8
{
if (x$eo["BR"]==0)
{
x <- move(x, moves = "U'R'U")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "U2BU2")
c <- c + 3
}
}
else if (x$ep["DR"]==6) #9
{
if (x$eo["DR"]==0)
{
x <- move(x, moves = "D'F2")
c <- c + 2
#orientacja dobra
}
else
{
x <- move(x, moves = "RF'R'")
c <- c + 3
}
}
else if (x$ep["DF"]==6) #10
{
if (x$eo["DF"]==0)
{
x <- move(x, moves = "F2")
c <- c + 1
#orientacja dobra
}
else
{
x <- move(x, moves = "DRF'R'")
c <- c + 4
}
}
else if (x$ep["DL"]==6) #11
{
if (x$eo["DL"]==0)
{
x <- move(x, moves = "DF2")
c <- c + 2
#orientacja dobra
}
else
{
x <- move(x, moves = "L'FL")
c <- c + 3
}
}
else if (x$ep["DB"]==6) #12
{
if (x$eo["DB"]==0)
{
x <- move(x, moves = "D2F2")
c <- c + 2
#orientacja dobra
}
else
{
x <- move(x, moves = "DL'FL")
c <- c + 4
}
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
WO_cross <- function(x,c) ###WHITE-ORANGE EDGE
{
if ( x$ep["UL"]== 7) #1
{
if (x$eo["UL"]==0)
{
x <- x
c <- c
# dobre miejsce i dobra orientacja
}
else
{
x <- move(x, moves = "LU'FU")
c <- c + 4
# dobre miejsce zla orientacja
}
}
else
{
if (x$ep["UR"]==7) #2
{
if (x$eo["UR"]==0)
{
x <- move(x, moves = "U2LU2L'")
c <- c + 4
#orientacja dobra
}
else
{
x <- move(x, moves = "UF'U'L'")
c <- c + 4
#orientacja zła
}
}
else if (x$ep["UF"]==7) #3
{
if (x$eo["UF"]==0)
{
x <- move(x, moves = "FU'F'U")
c <- c + 4
#orientacja dobra
}
else
{
x <- move(x, moves = "F'L'")
c <- c + 2
}
}
else if (x$ep["UB"]==7) #4
{
if (x$eo["UB"]==0)
{
x <- move(x, moves = "U'LUL'")
c <- c + 4
#orientacja dobra
}
else
{
x <- move(x, moves = "BL")
c <- c + 2
}
}
else if (x$ep["FR"]==7) #5
{
if (x$eo["FR"]==0)
{
x <- move(x, moves = "U2RU2")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "U'F'U")
c <- c + 3
}
}
else if (x$ep["FL"]==7) #6
{
if (x$eo["FL"]==0)
{
x <- move(x, moves = "L'")
c <- c + 1
#orientacja dobra
}
else
{
x <- move(x, moves = "U'FU")
c <- c + 3
}
}
else if (x$ep["BL"]==7) #7
{
if (x$eo["BL"]==0)
{
x <- move(x, moves = "L")
c <- c + 1
#orientacja dobra
}
else
{
x <- move(x, moves = "UB'U'")
c <- c + 3
}
}
else if (x$ep["BR"]==7) #8
{
if (x$eo["BR"]==0)
{
x <- move(x, moves = "U2R'U2")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "UBU'")
c <- c + 3
}
}
else if (x$ep["DR"]==7) #9
{
if (x$eo["DR"]==0)
{
x <- move(x, moves = "D2L2")
c <- c + 2
#orientacja dobra
}
else
{
x <- move(x, moves = "D'FL'F'")
c <- c + 4
}
}
else if (x$ep["DF"]==7) #10
{
if (x$eo["DF"]==0)
{
x <- move(x, moves = "D'L2")
c <- c + 2
#orientacja dobra
}
else
{
x <- move(x, moves = "FL'F'")
c <- c + 3
}
}
else if (x$ep["DL"]==7) #11
{
if (x$eo["DL"]==0)
{
x <- move(x, moves = "L2")
c <- c + 1
#orientacja dobra
}
else
{
x <- move(x, moves = "L'U'FU")
c <- c + 4
}
}
else if (x$ep["DB"]==7) #12
{
if (x$eo["DB"]==0)
{
x <- move(x, moves = "DL2")
c <- c + 2
#orientacja dobra
}
else
{
x <- move(x, moves = "B'LB")
c <- c + 3
}
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
WB_cross <- function(x,c) ###WHITE-BLUE EDGE
{
if ( x$ep["UB"]== 8) #1
{
if (x$eo["UB"]==0)
{
x <- x
c <- c
# dobre miejsce i dobra orientacja
}
else
{
x <- move(x, moves = "BU'LU")
c <- c + 4
# dobre miejsce zla orientacja
}
}
else
{
if (x$ep["UL"]==8) #2
{
if (x$eo["UL"]==0)
{
x <- move(x, moves = "LU'L'U")
c <- c + 4
#orientacja dobra
}
else
{
x <- move(x, moves = "L'B'")
c <- c + 2
#orientacja zła
}
}
else if (x$ep["UF"]==8) #3
{
if (x$eo["UF"]==0)
{
x <- move(x, moves = "FU2F'U2")
c <- c + 4
#orientacja dobra
}
else
{
x <- move(x, moves = "FURU'")
c <- c + 4
}
}
else if (x$ep["UR"]==8) #4
{
if (x$eo["UR"]==0)
{
x <- move(x, moves = "R'URU'")
c <- c + 4
#orientacja dobra
}
else
{
x <- move(x, moves = "RB")
c <- c + 2
}
}
else if (x$ep["FR"]==8) #5
{
if (x$eo["FR"]==0)
{
x <- move(x, moves = "URU'")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "U2F'U2")
c <- c + 3
}
}
else if (x$ep["FL"]==8) #6
{
if (x$eo["FL"]==0)
{
x <- move(x, moves = "U'L'U")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "U2FU2")
c <- c + 3
}
}
else if (x$ep["BL"]==8) #7
{
if (x$eo["BL"]==0)
{
x <- move(x, moves = "U'LU")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "B'")
c <- c + 1
}
}
else if (x$ep["BR"]==8) #8
{
if (x$eo["BR"]==0)
{
x <- move(x, moves = "UR'U'")
c <- c + 3
#orientacja dobra
}
else
{
x <- move(x, moves = "B")
c <- c + 1
}
}
else if (x$ep["DR"]==8) #9
{
if (x$eo["DR"]==0)
{
x <- move(x, moves = "DB2")
c <- c + 2
#orientacja dobra
}
else
{
x <- move(x, moves = "R'BR")
c <- c + 3
}
}
else if (x$ep["DF"]==8) #10
{
if (x$eo["DF"]==0)
{
x <- move(x, moves = "D2B2")
c <- c + 2
#orientacja dobra
}
else
{
x <- move(x, moves = "DR'BR")
c <- c + 4
}
}
else if (x$ep["DL"]==8) #11
{
if (x$eo["DL"]==0)
{
x <- move(x, moves = "D'B2")
c <- c + 2
#orientacja dobra
}
else
{
x <- move(x, moves = "LB'L'")
c <- c + 3
}
}
else if (x$ep["DB"]==8) #12
{
if (x$eo["DB"]==0)
{
x <- move(x, moves = "B2")
c <- c + 1
#orientacja dobra
}
else
{
x <- move(x, moves = "DLB'L'")
c <- c + 4
}
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
CROSS <- function(x,c) ###WHITE CROSS FUNCTION
{
c <- WO_cross(x,c)$counter
x <- WO_cross(x,c)$cube
c <- WR_cross(x,c)$counter
x <- WR_cross(x,c)$cube
c <- WG_cross(x,c)$counter
x <- WG_cross(x,c)$cube
c <- WB_cross(x,c)$counter
x <- WB_cross(x,c)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
URF_corner <- function(x,c) ###WHITE-GREEN-RED CORNER
{
if (x$cp["URF"]==1) #1
{
if (x$co["URF"]==0)
{
x <- x
c <- c
}
else if (x$co["URF"]==1)
{
x <- move(x, moves = "R'D'RDR'D'R")
c <- c + 7
}
else if (x$co["URF"]==2)
{
x <- move(x, moves = "FDF'D'FDF'")
c <- c + 7
}
}
else if (x$cp["UFL"]==1) #2
{
if (x$co["UFL"]==0)
{
x <- move(x, moves = "F'D'FR'D2R")
c <- c + 6
}
else if (x$co["UFL"]==1)
{
x <- move(x, moves = "F'D'F2D2F'")
c <- c + 5
}
else if (x$co["UFL"]==2)
{
x <- move(x, moves = "LR'DRL'")
c <- c + 5
#MIDDLE
}
}
else if (x$cp["ULB"]==1) #3
{
if (x$co["ULB"]==0)
{
x <- move(x, moves = "L'R'D2LR")
c <- c + 5
}
else if (x$co["ULB"]==1)
{
x <- move(x, moves = "L'FD2F'L")
c <- c + 5
}
else if (x$co["ULB"]==2)
{
x <- move(x, moves = "BR'D2RB'")
c <- c + 5
}
}
else if (x$cp["UBR"]==1) #4
{
if (x$co["UBR"]==0)
{
x <- move(x, moves = "RDR'FD2F'")
c <- c + 6
}
else if (x$co["UBR"]==1)
{
x <- move(x, moves = "FB'D'F'B")
c <- c + 5
}
else if (x$co["UBR"]==2)
{
x <- move(x, moves = "RDR2D2R")
c <- c + 5
}
}
else if (x$cp["DFR"]==1) #5
{
if (x$co["DFR"]==0)
{
x <- move(x, moves = "R'DRFD2F'")
c <- c + 6
}
else if (x$co["DFR"]==1)
{
x <- move(x, moves = "FDF'")
c <- c + 3
}
else if (x$co["DFR"]==2)
{
x <- move(x, moves = "R'D'R")
c <- c + 3
}
}
else if (x$cp["DLF"]==1) #6
{
if (x$co["DLF"]==0)
{
x <- move(x, moves = "DR'DRFD2F'")
c <- c + 7
}
else if (x$co["DLF"]==1)
{
x <- move(x, moves = "R'DR")
c <- c + 3
}
else if (x$co["DLF"]==2)
{
x <- move(x, moves = "D'FD2F'")
c <- c + 4
}
}
else if (x$cp["DBL"]==1) #7
{
if (x$co["DBL"]==0)
{
x <- move(x, moves = "D2R'DRFD2F'")
c <- c + 7
}
else if (x$co["DBL"]==1)
{
x <- move(x, moves = "R'D2R")
c <- c + 3
}
else if (x$co["DBL"]==2)
{
x <- move(x, moves = "FD2F'")
c <- c + 3
}
}
else if (x$cp["DRB"]==1) #8
{
if (x$co["DRB"]==0)
{
x <- move(x, moves = "D'R'DRFD2F'")
c <- c + 7
}
else if (x$co["DRB"]==1)
{
x <- move(x, moves = "DR'D2R")
c <- c + 4
}
else if (x$co["DRB"]==2)
{
x <- move(x, moves = "FD'F'")
c <- c + 3
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
UFL_corner <- function(x,c) ###WHITE-GREEN-ORANGE CORNER
{
if (x$cp["UFL"]==2) #1
{
if (x$co["UFL"]==0)
{
x <- x
c <- c
}
else if (x$co["UFL"]==1)
{
x <- move(x, moves = "F'D'FDF'D'F")
c <- c + 7
}
else if (x$co["UFL"]==2)
{
x <- move(x, moves = "LDL'D'LDL'")
c <- c + 7
}
}
else if (x$cp["ULB"]==2) #2
{
if (x$co["ULB"]==0)
{
x <- move(x, moves = "L'D'LF'D2F")
c <- c + 6
}
else if (x$co["ULB"]==1)
{
x <- move(x, moves = "L'D'L2D2L'")
c <- c + 5
}
else if (x$co["ULB"]==2)
{
x <- move(x, moves = "BF'DFB'")
c <- c + 5
#MIDDLE
}
}
else if (x$cp["UBR"]==2) #3
{
if (x$co["UBR"]==0)
{
x <- move(x, moves = "B'F'D2BF")
c <- c + 5
}
else if (x$co["UBR"]==1)
{
x <- move(x, moves = "B'LD2L'B")
c <- c + 5
}
else if (x$co["UBR"]==2)
{
x <- move(x, moves = "RF'D2FR'")
c <- c + 5
}
}
else if (x$cp["URF"]==2) #4
{
if (x$co["URF"]==0)
{
x <- move(x, moves = "FDF'LD2L'")
c <- c + 6
}
else if (x$co["URF"]==1)
{
x <- move(x, moves = "LR'D'L'R")
c <- c + 5
}
else if (x$co["URF"]==2)
{
x <- move(x, moves = "FDF2D2F")
c <- c + 5
}
}
else if (x$cp["DLF"]==2) #5
{
if (x$co["DLF"]==0)
{
x <- move(x, moves = "F'DFLD2L'")
c <- c + 6
}
else if (x$co["DLF"]==1)
{
x <- move(x, moves = "LDL'")
c <- c + 3
}
else if (x$co["DLF"]==2)
{
x <- move(x, moves = "F'D'F")
c <- c + 3
}
}
else if (x$cp["DBL"]==2) #6
{
if (x$co["DBL"]==0)
{
x <- move(x, moves = "DF'DFLD2L'")
c <- c + 7
}
else if (x$co["DBL"]==1)
{
x <- move(x, moves = "F'DF")
c <- c + 3
}
else if (x$co["DBL"]==2)
{
x <- move(x, moves = "D'LD2L'")
c <- c + 4
}
}
else if (x$cp["DRB"]==2) #7
{
if (x$co["DRB"]==0)
{
x <- move(x, moves = "D2F'DFLD2L'")
c <- c + 7
}
else if (x$co["DRB"]==1)
{
x <- move(x, moves = "F'D2F")
c <- c + 3
}
else if (x$co["DRB"]==2)
{
x <- move(x, moves = "LD2L'")
c <- c + 3
}
}
else if (x$cp["DFR"]==2) #8
{
if (x$co["DFR"]==0)
{
x <- move(x, moves = "D'F'DFLD2L'")
c <- c + 7
}
else if (x$co["DFR"]==1)
{
x <- move(x, moves = "DF'D2F")
c <- c + 4
}
else if (x$co["DFR"]==2)
{
x <- move(x, moves = "LD'L'")
c <- c + 3
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
ULB_corner <- function(x,c) ###WHITE-BLUE-ORANGE CORNER
{
if (x$cp["ULB"]==3) #1
{
if (x$co["ULB"]==0)
{
x <- x
c <- c
}
else if (x$co["ULB"]==1)
{
x <- move(x, moves = "L'D'LDL'D'L")
c <- c + 7
}
else if (x$co["ULB"]==2)
{
x <- move(x, moves = "BDB'D'BDB'")
c <- c + 7
}
}
else if (x$cp["UBR"]==3) #2
{
if (x$co["UBR"]==0)
{
x <- move(x, moves = "B'D'BL'D2L")
c <- c + 6
}
else if (x$co["UBR"]==1)
{
x <- move(x, moves = "B'D'B2D2B'")
c <- c + 5
}
else if (x$co["UBR"]==2)
{
x <- move(x, moves = "RL'DLR'")
c <- c + 5
#MIDDLE
}
}
else if (x$cp["URF"]==3) #3
{
if (x$co["URF"]==0)
{
x <- move(x, moves = "R'L'D2RL")
c <- c + 5
}
else if (x$co["URF"]==1)
{
x <- move(x, moves = "R'BD2B'R")
c <- c + 5
}
else if (x$co["URF"]==2)
{
x <- move(x, moves = "FL'D2LF'")
c <- c + 5
}
}
else if (x$cp["UFL"]==3) #4
{
if (x$co["UFL"]==0)
{
x <- move(x, moves = "LDL'BD2B'")
c <- c + 6
}
else if (x$co["UFL"]==1)
{
x <- move(x, moves = "BF'D'B'F")
c <- c + 5
}
else if (x$co["UFL"]==2)
{
x <- move(x, moves = "LDL2D2L")
c <- c + 5
}
}
else if (x$cp["DBL"]==3) #5
{
if (x$co["DBL"]==0)
{
x <- move(x, moves = "L'DLBD2B'")
c <- c + 6
}
else if (x$co["DBL"]==1)
{
x <- move(x, moves = "BDB'")
c <- c + 3
}
else if (x$co["DBL"]==2)
{
x <- move(x, moves = "L'D'L")
c <- c + 3
}
}
else if (x$cp["DRB"]==3) #6
{
if (x$co["DRB"]==0)
{
x <- move(x, moves = "DL'DLBD2B'")
c <- c + 7
}
else if (x$co["DRB"]==1)
{
x <- move(x, moves = "L'DL")
c <- c + 3
}
else if (x$co["DRB"]==2)
{
x <- move(x, moves = "D'BD2B'")
c <- c + 4
}
}
else if (x$cp["DFR"]==3) #7
{
if (x$co["DFR"]==0)
{
x <- move(x, moves = "D2L'DLBD2B'")
c <- c + 7
}
else if (x$co["DFR"]==1)
{
x <- move(x, moves = "L'D2L")
c <- c + 3
}
else if (x$co["DFR"]==2)
{
x <- move(x, moves = "BD2B'")
c <- c + 3
}
}
else if (x$cp["DLF"]==3) #8
{
if (x$co["DLF"]==0)
{
x <- move(x, moves = "D'L'DLBD2B'")
c <- c + 7
}
else if (x$co["DLF"]==1)
{
x <- move(x, moves = "DL'D2L")
c <- c + 4
}
else if (x$co["DLF"]==2)
{
x <- move(x, moves = "BD'B'")
c <- c + 3
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
UBR_corner <- function(x,c) ###WHITE-BLUE-RED CORNER
{
if (x$cp["UBR"]==4) #1
{
if (x$co["UBR"]==0)
{
x <- x
c <- c
}
else if (x$co["UBR"]==1)
{
x <- move(x, moves = "B'D'BDB'D'B")
c <- c + 7
}
else if (x$co["UBR"]==2)
{
x <- move(x, moves = "RDR'D'RDR'")
c <- c + 7
}
}
else if (x$cp["URF"]==4) #2
{
if (x$co["URF"]==0)
{
x <- move(x, moves = "R'D'RB'D2B")
c <- c + 6
}
else if (x$co["URF"]==1)
{
x <- move(x, moves = "R'D'R2D2R'")
c <- c + 5
}
else if (x$co["URF"]==2)
{
x <- move(x, moves = "FB'DBF'")
c <- c + 5
#MIDDLE
}
}
else if (x$cp["UFL"]==4) #3
{
if (x$co["UFL"]==0)
{
x <- move(x, moves = "F'B'D2FB")
c <- c + 5
}
else if (x$co["UFL"]==1)
{
x <- move(x, moves = "F'RD2R'F")
c <- c + 5
}
else if (x$co["UFL"]==2)
{
x <- move(x, moves = "LB'D2BL'")
c <- c + 5
}
}
else if (x$cp["ULB"]==4) #4
{
if (x$co["ULB"]==0)
{
x <- move(x, moves = "BDB'RD2R'")
c <- c + 6
}
else if (x$co["ULB"]==1)
{
x <- move(x, moves = "RL'D'R'L")
c <- c + 5
}
else if (x$co["ULB"]==2)
{
x <- move(x, moves = "BDB2D2B")
c <- c + 5
}
}
else if (x$cp["DRB"]==4) #5
{
if (x$co["DRB"]==0)
{
x <- move(x, moves = "B'DBRD2R'")
c <- c + 6
}
else if (x$co["DRB"]==1)
{
x <- move(x, moves = "RDR'")
c <- c + 3
}
else if (x$co["DRB"]==2)
{
x <- move(x, moves = "B'D'B")
c <- c + 3
}
}
else if (x$cp["DFR"]==4) #6
{
if (x$co["DFR"]==0)
{
x <- move(x, moves = "DB'DBRD2R'")
c <- c + 7
}
else if (x$co["DFR"]==1)
{
x <- move(x, moves = "B'DB")
c <- c + 3
}
else if (x$co["DFR"]==2)
{
x <- move(x, moves = "D'RD2R'")
c <- c + 4
}
}
else if (x$cp["DLF"]==4) #7
{
if (x$co["DLF"]==0)
{
x <- move(x, moves = "D2B'DBRD2R'")
c <- c + 7
}
else if (x$co["DLF"]==1)
{
x <- move(x, moves = "B'D2B")
c <- c + 3
}
else if (x$co["DLF"]==2)
{
x <- move(x, moves = "RD2R'")
c <- c + 3
}
}
else if (x$cp["DBL"]==4) #8
{
if (x$co["DBL"]==0)
{
x <- move(x, moves = "D'B'DBRD2R'")
c <- c + 7
}
else if (x$co["DBL"]==1)
{
x <- move(x, moves = "DB'D2B")
c <- c + 4
}
else if (x$co["DBL"]==2)
{
x <- move(x, moves = "RD'R'")
c <- c + 3
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
white_corners <- function(x,c) ###WHITE-CORNERS
{
c <- URF_corner(x,c)$counter
x <- URF_corner(x,c)$cube
c <- UFL_corner(x,c)$counter
x <- UFL_corner(x,c)$cube
c <- ULB_corner(x,c)$counter
x <- ULB_corner(x,c)$cube
c <- UBR_corner(x,c)$counter
x <- UBR_corner(x,c)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
first_layer <- function(x,c) ###WHITE FACE
{
c <- CROSS(x,c)$counter
x <- CROSS(x,c)$cube
c <- white_corners(x,c)$counter
x <- white_corners(x,c)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
put_RED_GREEN <- function(x,c)
{
x <- move(x, moves = "FD'F'D'R'DR")
c <- c+7
return(list("cube"=x,"counter"=c))
}
put_GREEN_RED <- function(x,c)
{
x <- move(x, moves = "R'DRDFD'F'")
c <- c+7
return(list("cube"=x,"counter"=c))
}
put_RED_BLUE <- function(x,c)
{
x <- move(x, moves = "B'DBDRD'R'")
c <- c+7
return(list("cube"=x,"counter"=c))
}
put_BLUE_RED <- function(x,c)
{
x <- move(x, moves = "RD'R'D'B'DB")
c <- c+7
return(list("cube"=x,"counter"=c))
}
put_ORANGE_GREEN <- function(x,c)
{
x <- move(x, moves = "F'DFDLD'L'")
c <- c+7
return(list("cube"=x,"counter"=c))
}
put_GREEN_ORANGE <- function(x,c)
{
x <- move(x, moves = "LD'L'D'F'DF")
c <- c+7
return(list("cube"=x,"counter"=c))
}
put_ORANGE_BLUE <- function(x,c)
{
x <- move(x, moves = "BD'B'D'L'DL")
c <- c+7
return(list("cube"=x,"counter"=c))
}
put_BLUE_ORANGE <- function(x,c)
{
x <- move(x, moves = "L'DLDBD'B'")
c <- c+7
return(list("cube"=x,"counter"=c))
}
RED_GREEN_EDGE <- function(x,c) #SECOND LAYER RED-GREEN EDGE
{
if (x$ep["DR"]==1) #1
{
if (x$eo["DR"]==0)
{
x <- move(x, moves = "D")
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
}
else if (x$eo["DR"]==1)
{
x <- move(x, moves = "D2")
c <- put_GREEN_RED(x,c)$counter + 1
x <- put_GREEN_RED(x,c)$cube
}
}
else if (x$ep["DF"]==1) #2
{
if (x$eo["DF"]==0)
{
x <- move(x, moves = "D2")
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
}
else if (x$eo["DF"]==1)
{
x <- move(x, moves = "D'")
c <-put_GREEN_RED(x,c)$counter + 1
x <- put_GREEN_RED(x,c)$cube
}
}
else if (x$ep["DL"]==1) #3
{
if (x$eo["DL"]==0)
{
x <- move(x, moves = "D'")
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
}
else if (x$eo["DL"]==1)
{
c <- put_GREEN_RED(x,c)$counter
x <- put_GREEN_RED(x,c)$cube
}
}
else if (x$ep["DB"]==1) #4
{
if (x$eo["DB"]==0)
{
c <- put_RED_GREEN(x,c)$counter
x <- put_RED_GREEN(x,c)$cube
}
else if (x$eo["DB"]==1)
{
x <- move(x, moves = "D")
c <- put_GREEN_RED(x,c)$counter
x <- put_GREEN_RED(x,c)$cube
}
}
else if (x$ep["FR"]==1) #5
{
if (x$eo["FR"]==0)
{
x <- x
}
else if (x$eo["FR"]==1)
{
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
x <- move(x, moves = "D'")
c <- put_RED_GREEN(x,c)$counter
x <- put_RED_GREEN(x,c)$cube
}
}
else if (x$ep["FL"]==1) #6
{
if (x$eo["FL"]==0)
{
c <- put_GREEN_ORANGE(x,c)$counter
x <- put_GREEN_ORANGE(x,c)$cube
c <- put_RED_GREEN(x,c)$counter
x <- put_RED_GREEN(x,c)$cube
}
else if (x$eo["FL"]==1)
{
c <- put_GREEN_ORANGE(x,c)$counter + 1
x <- put_GREEN_ORANGE(x,c)$cube
x <- move(x, moves = "D")
c <- put_GREEN_RED(x,c)$counter
x <- put_GREEN_RED(x,c)$cube
}
}
else if (x$ep["BL"]==1) #7
{
if (x$eo["BL"]==0)
{
c <- put_ORANGE_BLUE(x,c)$counter + 1
x <- put_ORANGE_BLUE(x,c)$cube
x <- move(x, moves = "D2")
c <- put_GREEN_RED(x,c)$counter
x <- put_GREEN_RED(x,c)$cube
}
else if (x$eo["BL"]==1)
{
c <- put_BLUE_ORANGE(x,c)$counter + 1
x <- put_BLUE_ORANGE(x,c)$cube
x <- move(x, moves = "D'")
c <- put_GREEN_RED(x,c)$counter
x <- put_GREEN_RED(x,c)$cube
}
}
else if (x$ep["BR"]==1) #8
{
if (x$eo["BR"]==0)
{
c <- put_RED_BLUE(x,c)$counter
x <- put_RED_BLUE(x,c)$cube
c <- put_GREEN_RED(x,c)$counter
x <- put_GREEN_RED(x,c)$cube
}
else if (x$eo["BR"]==1)
{
c <- put_BLUE_RED(x,c)$counter + 1
x <- put_BLUE_RED(x,c)$cube
x <- move(x, moves = "D'")
c <- put_GREEN_RED(x,c)$counter
x <- put_GREEN_RED(x,c)$cube
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
GREEN_ORANGE_EDGE <- function(x, c) #SECOND LAYER GREEN-ORANGE EDGE
{
if (x$ep["FL"]==2) #1
{
if (x$eo["FL"]==0)
{
x <- x
c <- c
}
else if (x$eo["FL"]==1)
{
c <- put_GREEN_ORANGE(x,c)$counter + 1
x <- put_GREEN_ORANGE(x,c)$cube
x <- move(x, moves = "D'")
c <- put_GREEN_ORANGE(x,c)$counter
x <- put_GREEN_ORANGE(x,c)$cube
}
}
else if (x$ep["BL"]==2) #2
{
if (x$eo["BL"]==0)
{
c <- put_ORANGE_BLUE(x,c)$counter
x <- put_ORANGE_BLUE(x,c)$cube
c <- put_GREEN_ORANGE(x,c)$counter
x <- put_GREEN_ORANGE(x,c)$cube
}
else if (x$eo["BL"]==1)
{
c <- put_BLUE_ORANGE(x,c)$counter + 1
x <- put_BLUE_ORANGE(x,c)$cube
x <- move(x, moves = "D")
c <- put_GREEN_ORANGE(x,c)$counter
x <- put_GREEN_ORANGE(x,c)$cube
}
}
else if (x$ep["BR"]==2) #3
{
if (x$eo["BR"]==0)
{
c <- put_BLUE_RED(x,c)$counter + 1
x <- put_BLUE_RED(x,c)$cube
x <- move(x, moves = "D2")
c <- put_ORANGE_GREEN(x,c)$counter
x <- put_ORANGE_GREEN(x,c)$cube
}
else if (x$eo["BR"]==1)
{
c <- put_BLUE_RED(x,c)$counter + 1
x <- put_BLUE_RED(x,c)$cube
x <- move(x, moves = "D")
c <- put_GREEN_ORANGE(x,c)$counter
x <- put_GREEN_ORANGE(x,c)$cube
}
}
else if (x$ep["FR"]==2) #4
{
if (x$eo["FR"]==0)
{
c <- put_GREEN_RED(x,c)$counter
x <- put_GREEN_RED(x,c)$cube
c <- put_ORANGE_GREEN(x,c)$counter
x <- put_ORANGE_GREEN(x,c)$cube
}
else if (x$eo["FR"]==1)
{
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
x <- move(x, moves = "D'")
c <- put_ORANGE_GREEN(x,c)$counter
x <- put_ORANGE_GREEN(x,c)$cube
}
}
else if (x$ep["DR"]==2) #5
{
if (x$eo["DR"]==0)
{
c <- put_ORANGE_GREEN(x,c)$counter + 1
x <- move(x, moves = "D")
x <- put_ORANGE_GREEN(x,c)$cube
}
else if (x$eo["DR"]==1)
{
c <- put_GREEN_ORANGE(x,c)$counter
x <- put_GREEN_ORANGE(x,c)$cube
}
}
else if (x$ep["DF"]==2) #6
{
if (x$eo["DF"]==0)
{
c <- put_ORANGE_GREEN(x,c)$counter + 1
x <- move(x, moves = "D2")
x <- put_ORANGE_GREEN(x,c)$cube
}
else if (x$eo["DF"]==1)
{
c <- put_GREEN_ORANGE(x,c)$counter + 1
x <- move(x, moves = "D")
x <- put_GREEN_ORANGE(x,c)$cube
}
}
else if (x$ep["DL"]==2) #7
{
if (x$eo["DL"]==0)
{
c <- put_ORANGE_GREEN(x,c)$counter + 1
x <- move(x, moves = "D'")
x <- put_ORANGE_GREEN(x,c)$cube
}
else if (x$eo["DL"]==1)
{
c <- put_GREEN_ORANGE(x,c)$counter + 1
x <- move(x, moves = "D2")
x <- put_GREEN_ORANGE(x,c)$cube
}
}
else if (x$ep["DB"]==2) #8
{
if (x$eo["DB"]==0)
{
c <- put_ORANGE_GREEN(x,c)$counter
x <- put_ORANGE_GREEN(x,c)$cube
}
else if (x$eo["DB"]==1)
{
c <- put_GREEN_ORANGE(x,c)$counter + 1
x <- move(x, moves = "D'")
x <- put_GREEN_ORANGE(x,c)$cube
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
ORANGE_BLUE_EDGE <- function(x,c) #SECOND LAYER ORANGE-BLUE EDGE
{
if (x$ep["BL"]==3) #1
{
if (x$eo["BL"]==0)
{
x <- x
c <- c
}
else if (x$eo["BL"]==1)
{
c <- put_ORANGE_BLUE(x,c)$counter + 1
x <- put_ORANGE_BLUE(x,c)$cube
x <- move(x, moves = "D'")
c <- put_ORANGE_BLUE(x,c)$counter
x <- put_ORANGE_BLUE(x,c)$cube
}
}
else if (x$ep["BR"]==3) #2
{
if (x$eo["BR"]==0)
{
c <- put_BLUE_RED(x,c)$counter
x <- put_BLUE_RED(x,c)$cube
c <- put_ORANGE_BLUE(x,c)$counter
x <- put_ORANGE_BLUE(x,c)$cube
}
else if (x$eo["BR"]==1)
{
c <- put_RED_BLUE(x,c)$counter + 1
x <- put_RED_BLUE(x,c)$cube
x <- move(x, moves = "D")
c <- put_ORANGE_BLUE(x,c)$counter
x <- put_ORANGE_BLUE(x,c)$cube
}
}
else if (x$ep["FR"]==3) #3
{
if (x$eo["FR"]==0)
{
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
x <- move(x, moves = "D2")
c <- put_BLUE_ORANGE(x,c)$counter
x <- put_BLUE_ORANGE(x,c)$cube
}
else if (x$eo["FR"]==1)
{
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
x <- move(x, moves = "D")
c <- put_ORANGE_BLUE(x,c)$counter
x <- put_ORANGE_BLUE(x,c)$cube
}
}
else if (x$ep["FL"]==3) #4
{
if (x$eo["FL"]==0)
{
c <- put_ORANGE_GREEN(x,c)$counter
x <- put_ORANGE_GREEN(x,c)$cube
c <- put_BLUE_ORANGE(x,c)$counter
x <- put_BLUE_ORANGE(x,c)$cube
}
else if (x$eo["FL"]==1)
{
c <- put_GREEN_ORANGE(x,c)$counter + 1
x <- put_GREEN_ORANGE(x,c)$cube
x <- move(x, moves = "D'")
c <- put_BLUE_ORANGE(x,c)$counter
x <- put_BLUE_ORANGE(x,c)$cube
}
}
else if (x$ep["DR"]==3) #5
{
if (x$eo["DR"]==0)
{
c <- put_ORANGE_BLUE(x,c)$counter + 1
x <- move(x, moves = "D'")
x <- put_ORANGE_BLUE(x,c)$cube
}
else if (x$eo["DR"]==1)
{
c <- put_BLUE_ORANGE(x,c)$counter
x <- put_BLUE_ORANGE(x,c)$cube
}
}
else if (x$ep["DF"]==3) #6
{
if (x$eo["DF"]==0)
{
c <- put_ORANGE_BLUE(x,c)$counter
x <- put_ORANGE_BLUE(x,c)$cube
}
else if (x$eo["DF"]==1)
{
c <- put_BLUE_ORANGE(x,c)$counter + 1
x <- move(x, moves = "D")
x <- put_BLUE_ORANGE(x,c)$cube
}
}
else if (x$ep["DL"]==3) #7
{
if (x$eo["DL"]==0)
{
c <- put_ORANGE_BLUE(x,c)$counter + 1
x <- move(x, moves = "D")
x <- put_ORANGE_BLUE(x,c)$cube
}
else if (x$eo["DL"]==1)
{
c <- put_BLUE_ORANGE(x,c)$counter + 1
x <- move(x, moves = "D2")
x <- put_BLUE_ORANGE(x,c)$cube
}
}
else if (x$ep["DB"]==3) #8
{
if (x$eo["DB"]==0)
{
c <- put_ORANGE_BLUE(x,c)$counter + 1
x <- move(x, moves = "D2")
x <- put_ORANGE_BLUE(x,c)$cube
}
else if (x$eo["DB"]==1)
{
c <- put_BLUE_ORANGE(x,c)$counter + 1
x <- move(x, moves = "D'")
x <- put_BLUE_ORANGE(x,c)$cube
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
BLUE_RED_EDGE <- function(x,c) #SECOND LAYER BLUE-RED EDGE
{
if (x$ep["BR"]==4) #1
{
if (x$eo["BR"]==0)
{
x <- x
c <- c
}
else if (x$eo["BR"]==1)
{
c <- put_BLUE_RED(x,c)$counter + 1
x <- put_BLUE_RED(x,c)$cube
x <- move(x, moves = "D'")
c <- put_BLUE_RED(x,c)$counter
x <- put_BLUE_RED(x,c)$cube
}
}
else if (x$ep["FR"]==4) #2
{
if (x$eo["FR"]==0)
{
c <- put_RED_GREEN(x,c)$counter
x <- put_RED_GREEN(x,c)$cube
c <- put_BLUE_RED(x,c)$counter
x <- put_BLUE_RED(x,c)$cube
}
else if (x$eo["FR"]==1)
{
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
x <- move(x, moves = "D")
c <- put_RED_BLUE(x,c)$counter
x <- put_RED_BLUE(x,c)$cube
}
}
else if (x$ep["FL"]==4) #3
{
if (x$eo["FL"]==0)
{
c <- put_ORANGE_GREEN(x,c)$counter + 1
x <- put_ORANGE_GREEN(x,c)$cube
x <- move(x, moves = "D2")
c <- put_BLUE_RED(x,c)$counter
x <- put_BLUE_RED(x,c)$cube
}
else if (x$eo["FL"]==1)
{
c <- put_ORANGE_GREEN(x,c)$counter + 1
x <- put_ORANGE_GREEN(x,c)$cube
x <- move(x, moves = "D'")
c <- put_RED_BLUE(x,c)$counter
x <- put_RED_BLUE(x,c)$cube
}
}
else if (x$ep["BL"]==4) #4
{
if (x$eo["BL"]==0)
{
c <- put_BLUE_ORANGE(x,c)$counter
x <- put_BLUE_ORANGE(x,c)$cube
c <- put_RED_BLUE(x,c)$counter
x <- put_RED_BLUE(x,c)$cube
}
else if (x$eo["BL"]==1)
{
c <- put_BLUE_ORANGE(x,c)$counter + 1
x <- put_BLUE_ORANGE(x,c)$cube
x <- move(x, moves = "D'")
c <- put_BLUE_RED(x,c)$counter
x <- put_BLUE_RED(x,c)$cube
}
}
else if (x$ep["DR"]==4) #5
{
if (x$eo["DR"]==0)
{
c <- put_RED_BLUE(x,c)$counter + 1
x <- move(x, moves = "D'")
x <- put_RED_BLUE(x,c)$cube
}
else if (x$eo["DR"]==1)
{
c <- put_BLUE_RED(x,c)$counter + 1
x <- move(x, moves = "D2")
x <- put_BLUE_RED(x,c)$cube
}
}
else if (x$ep["DF"]==4) #6
{
if (x$eo["DF"]==0)
{
c <- put_RED_BLUE(x,c)$counter
x <- put_RED_BLUE(x,c)$cube
}
else if (x$eo["DF"]==1)
{
c <- put_BLUE_RED(x,c)$counter + 1
x <- move(x, moves = "D'")
x <- put_BLUE_RED(x,c)$cube
}
}
else if (x$ep["DL"]==4) #7
{
if (x$eo["DL"]==0)
{
c <- put_RED_BLUE(x,c)$counter + 1
x <- move(x, moves = "D")
x <- put_RED_BLUE(x,c)$cube
}
else if (x$eo["DL"]==1)
{
c <- put_BLUE_RED(x,c)$counter
x <- put_BLUE_RED(x,c)$cube
}
}
else if (x$ep["DB"]==4) #8
{
if (x$eo["DB"]==0)
{
c <- put_RED_BLUE(x,c)$counter + 1
x <- move(x, moves = "D2")
x <- put_RED_BLUE(x,c)$cube
}
else if (x$eo["DB"]==1)
{
c <- put_BLUE_RED(x,c)$counter + 1
x <- move(x, moves = "D")
x <- put_BLUE_RED(x,c)$cube
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
second_layer <- function(x,c)
{
c <- RED_GREEN_EDGE(x,c)$counter
x <- RED_GREEN_EDGE(x,c)$cube
c <- GREEN_ORANGE_EDGE(x,c)$counter
x <- GREEN_ORANGE_EDGE(x,c)$cube
c <- ORANGE_BLUE_EDGE(x,c)$counter
x <- ORANGE_BLUE_EDGE(x,c)$cube
c <- BLUE_RED_EDGE(x,c)$counter
x <- BLUE_RED_EDGE(x,c)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
first_two_layers <- function(x,c)
{
c <- first_layer(x,c)$counter
x <- first_layer(x,c)$cube
c <- second_layer(x,c)$counter
x <- second_layer(x,c)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
yellow_edges_orientation <- function(x,c) ###ORIENTATION OF YELLOW CROSS
{
if (x$eo["DR"]==0 && x$eo["DF"]==0 && x$eo["DL"]==0 && x$eo["DB"]==0) #1
{
x <- x
c <- c
}
else if (x$eo["DR"]==1 && x$eo["DF"]==1 && x$eo["DL"]==1 && x$eo["DB"]==1) #2
{
x <- move(x, moves = "FLDL'D'F'BDRD'R'B'")
c <- c + 12
}
else if (x$eo["DR"]==1 && x$eo["DF"]==1 && x$eo["DL"]==0 && x$eo["DB"]==0) #3
{
x <- move(x, moves = "RDFD'F'R'")
c <- c + 6
}
else if (x$eo["DR"]==1 && x$eo["DF"]==0 && x$eo["DL"]==1 && x$eo["DB"]==0) #4
{
x <- move(x, moves = "RFDF'D'R'")
c <- c + 6
}
else if (x$eo["DR"]==1 && x$eo["DF"]==0 && x$eo["DL"]==0 && x$eo["DB"]==1) #5
{
x <- move(x, moves = "BDRD'R'B'")
c <- c + 6
}
else if (x$eo["DR"]==0 && x$eo["DF"]==0 && x$eo["DL"]==1 && x$eo["DB"]==1) #6
{
x <- move(x, moves = "LDBD'B'L'")
c <- c + 6
}
else if (x$eo["DR"]==0 && x$eo["DF"]==1 && x$eo["DL"]==0 && x$eo["DB"]==1) #7
{
x <- move(x, moves = "FLDL'D'F'")
c <- c + 6
}
else if (x$eo["DR"]==0 && x$eo["DF"]==1 && x$eo["DL"]==1 && x$eo["DB"]==0) #8
{
x <- move(x, moves = "FDLD'L'F'")
c <- c + 6
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
change_right_back <- function(x)
{
x <- move(x, moves = "FDF'DFD2F'D")
return(x)
}
change_left_back <- function(x)
{
x <- move(x, moves = "RDR'DRD2R'D")
return(x)
}
change_right_front <- function(x)
{
x <- move(x, moves = "LDL'DLD2L'D")
return(x)
}
change_left_front <- function(x)
{
x <- move(x, moves = "BDB'DBD2B'D")
return(x)
}
yellow_edges_permutation <- function(x,c)
{
if (x$ep["DR"]==9 && x$ep["DF"]==10 && x$ep["DL"]==11 && x$ep["DB"]==12) #1
{
x <- x
c <- c
}
else if (x$ep["DR"]==12 && x$ep["DF"]==9 && x$ep["DL"]==10 && x$ep["DB"]==11) #2
{
x <- move(x, moves = "D")
c <- c + 1
}
else if (x$ep["DR"]==11 && x$ep["DF"]==12 && x$ep["DL"]==9 && x$ep["DB"]==10) #3
{
x <- move(x, moves = "D2")
c <- c + 1
}
else if (x$ep["DR"]==10 && x$ep["DF"]==11 && x$ep["DL"]==12 && x$ep["DB"]==9) #4
{
x <- move(x, moves = "D'")
c <- c + 1
}
else if (x$ep["DR"]==9 && x$ep["DF"]==10 && x$ep["DL"]==12 && x$ep["DB"]==11) #5
{
x <- change_left_back(x)
c <- c + 8
}
else if (x$ep["DR"]==11 && x$ep["DF"]==9 && x$ep["DL"]==10 && x$ep["DB"]==12) #6
{
x <- move(x, moves = "D")
x <- change_left_back(x)
c <- c + 9
}
else if (x$ep["DR"]==12 && x$ep["DF"]==11 && x$ep["DL"]==9 && x$ep["DB"]==10) #7
{
x <- move(x, moves = "D2")
x <- change_left_back(x)
c <- c + 9
}
else if (x$ep["DR"]==10 && x$ep["DF"]==12 && x$ep["DL"]==11 && x$ep["DB"]==9) #8
{
x <- move(x, moves = "D'")
x <- change_left_back(x)
c <- c + 9
}
else if (x$ep["DR"]==12 && x$ep["DF"]==10 && x$ep["DL"]==11 && x$ep["DB"]==9) #9
{
x <- change_right_back(x)
c <- c + 8
}
else if (x$ep["DR"]==9 && x$ep["DF"]==12 && x$ep["DL"]==10 && x$ep["DB"]==11) #10
{
x <- move(x, moves = "D")
x <- change_right_back(x)
c <- c + 9
}
else if (x$ep["DR"]==11 && x$ep["DF"]==9 && x$ep["DL"]==12 && x$ep["DB"]==10) #11
{
x <- move(x, moves = "D2")
x <- change_right_back(x)
c <- c + 9
}
else if (x$ep["DR"]==10 && x$ep["DF"]==11 && x$ep["DL"]==9 && x$ep["DB"]==12) #12
{
x <- move(x, moves = "D'")
x <- change_right_back(x)
c <- c + 9
}
else if (x$ep["DR"]==11 && x$ep["DF"]==10 && x$ep["DL"]==9 && x$ep["DB"]==12) #13
{
x <- change_left_front(x)
x <- change_right_back(x)
x <- move(x, moves = "D")
c <- c + 17
}
else if (x$ep["DR"]==12 && x$ep["DF"]==11 && x$ep["DL"]==10 && x$ep["DB"]==9) #14
{
x <- change_left_back(x)
x <- change_right_front(x)
x <- move(x, moves = "D2")
c <- c + 17
}
else if (x$ep["DR"]==9 && x$ep["DF"]==12 && x$ep["DL"]==11 && x$ep["DB"]==10) #15
{
x <- change_right_back(x)
x <- change_left_front(x)
x <- move(x, moves = "D'")
c <- c + 17
}
else if (x$ep["DR"]==10 && x$ep["DF"]==9 && x$ep["DL"]==12 && x$ep["DB"]==11) #16
{
x <- change_right_front(x)
x <- change_left_back(x)
c <- c + 16
}
else if (x$ep["DR"]==11 && x$ep["DF"]==10 && x$ep["DL"]==12 && x$ep["DB"]==9) #17
{
x <- change_right_front(x)
x <- move(x, moves = "D'")
c <- c + 9
}
else if (x$ep["DR"]==9 && x$ep["DF"]==11 && x$ep["DL"]==10 && x$ep["DB"]==12) #18
{
x <- change_left_front(x)
c <- c + 8
}
else if (x$ep["DR"]==12 && x$ep["DF"]==9 && x$ep["DL"]==11 && x$ep["DB"]==10) #19
{
x <- change_left_back(x)
x <- move(x, moves = "D")
c <- c + 9
}
else if (x$ep["DR"]==10 && x$ep["DF"]==12 && x$ep["DL"]==9 && x$ep["DB"]==11) #20
{
x <- change_right_back(x)
x <- move(x, moves = "D2")
c <- c + 9
}
else if (x$ep["DR"]==12 && x$ep["DF"]==10 && x$ep["DL"]==9 && x$ep["DB"]==11) #21
{
x <- change_left_front(x)
x <- move(x, moves = "D")
c <- c + 9
}
else if (x$ep["DR"]==11 && x$ep["DF"]==12 && x$ep["DL"]==10 && x$ep["DB"]==9) #22
{
x <- change_left_back(x)
x <- move(x, moves = "D2")
c <- c + 9
}
else if (x$ep["DR"]==9 && x$ep["DF"]==11 && x$ep["DL"]==12 && x$ep["DB"]==10) #23
{
x <- change_right_back(x)
x <- move(x, moves = "D'")
c <- c + 9
}
else if (x$ep["DR"]==10 && x$ep["DF"]==9 && x$ep["DL"]==11 && x$ep["DB"]==12) #24
{
x <- change_right_front(x)
c <- c + 8
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
yellow_cross <- function(x,c)
{
c <- yellow_edges_orientation(x,c)$counter
x <- yellow_edges_orientation(x,c)$cube
c <- yellow_edges_permutation(x,c)$counter
x <- yellow_edges_permutation(x,c)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
yellow_corners_permutations <- function(x,c)
{
if (x$cp["DFR"]==5 && x$cp["DLF"]==6 && x$cp["DBL"]==7 && x$cp["DRB"]==8) #1
{
x <- x
c <- c
}
else if (x$cp["DFR"]==5 && x$cp["DLF"]==8 && x$cp["DBL"]==6 && x$cp["DRB"]==7) #2
{
x <- move(x, moves = "D'R'DLD'RDL'")
c <- c + 8
}
else if (x$cp["DFR"]==5 && x$cp["DLF"]==7 && x$cp["DBL"]==8 && x$cp["DRB"]==6) #3
{
x <- move(x, moves = "DFD'B'DF'D'B")
c <- c + 8
}
else if (x$cp["DFR"]==8 && x$cp["DLF"]==6 && x$cp["DBL"]==5 && x$cp["DRB"]==7) #4
{
x <- move(x, moves = "D'F'DBD'FDB'")
c <- c + 8
}
else if (x$cp["DFR"]==7 && x$cp["DLF"]==6 && x$cp["DBL"]==8 && x$cp["DRB"]==5) #5
{
x <- move(x, moves = "DLD'R'DL'D'R")
c <- c + 8
}
else if (x$cp["DFR"]==8 && x$cp["DLF"]==5 && x$cp["DBL"]==7 && x$cp["DRB"]==6) #6
{
x <- move(x, moves = "D'L'DRD'LDR'")
c <- c + 8
}
else if (x$cp["DFR"]==6 && x$cp["DLF"]==8 && x$cp["DBL"]==7 && x$cp["DRB"]==5) #7
{
x <- move(x, moves = "DBD'F'DB'D'F")
c <- c + 8
}
else if (x$cp["DFR"]==6 && x$cp["DLF"]==7 && x$cp["DBL"]==5 && x$cp["DRB"]==8) #8
{
x <- move(x, moves = "DRD'L'DR'D'L")
c <- c + 8
}
else if (x$cp["DFR"]==7 && x$cp["DLF"]==5 && x$cp["DBL"]==6 && x$cp["DRB"]==8) #9
{
x <- move(x, moves = "D'B'DFD'BDF'")
c <- c + 8
}
else if (x$cp["DFR"]==6 && x$cp["DLF"]==5 && x$cp["DBL"]==8 && x$cp["DRB"]==7) #10
{
x <- move(x, moves = "D'B'DFD'BDF'D'R'DLD'RDL'")
c <- c + 16
}
else if (x$cp["DFR"]==7 && x$cp["DLF"]==8 && x$cp["DBL"]==5 && x$cp["DRB"]==6) #11
{
x <- move(x, moves = "D'B'DFD'BDF'D'L'DRD'LDR'")
c <- c + 16
}
else if (x$cp["DFR"]==8 && x$cp["DLF"]==7 && x$cp["DBL"]==6 && x$cp["DRB"]==5) #12
{
x <- move(x, moves = "D'B'DFD'BDF'DLD'R'DL'D'R")
c <- c + 16
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
sexy_move <- function(x,c)
{
x <- move(x, moves = "RUR'U'RUR'U'")
c <- c + 8
return(list("cube"=x,"counter"=c))
}
yellow_corners_orientations <- function(x,c)
{
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
else
{
c <- c - 1
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
else
{
x <- move(x, moves = "D")
c <- c + 1
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
else
{
c <- c - 1
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
else
{
x <- move(x, moves = "D")
c <- c + 1
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
else
{
c <- c - 1
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
else
{
x <- move(x, moves = "D")
c <- c + 1
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
else
{
c <- c - 1
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
if (x$co["DFR"]!=0)
{
c <- sexy_move(x,c)$counter
x <- sexy_move(x,c)$cube
}
else
{
x <- move(x, moves = "D")
c <- c + 1
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
yellow_face <- function(x,c)
{
c <- yellow_cross(x,c)$counter
x <- yellow_cross(x,c)$cube
c <- yellow_corners_permutations(x,c)$counter
x <- yellow_corners_permutations(x,c)$cube
c <- yellow_corners_orientations(x,c)$counter
x <- yellow_corners_orientations(x,c)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
LBL <- function(x,c)
{
c <- CROSS(x,c)$counter
x <- CROSS(x,c)$cube
c <- white_corners(x,c)$counter
x <- white_corners(x,c)$cube
c <- second_layer(x,c)$counter
x <- second_layer(x,c)$cube
c <- yellow_edges_orientation(x,c)$counter
x <- yellow_edges_orientation(x,c)$cube
c <- yellow_edges_permutation(x,c)$counter
x <- yellow_edges_permutation(x,c)$cube
c <- yellow_corners_permutations(x,c)$counter
x <- yellow_corners_permutations(x,c)$cube
c <- yellow_corners_orientations(x,c)$counter
x <- yellow_corners_orientations(x,c)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
##################################################################################### CFOP
green_red_block <- function(x,c)
{
if (x$cp["URF"]==1 && x$co["URF"]==0) #1st corner
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #1.1a
{
x <- x
c <- c
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #1.1b
{
x <- move(x, moves = "R'DRD'FD2F'D'FD2F'")
c <- c + 11
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #1.2a
{
x <- move(x, moves = "F2D'F2DF2")
c <- c + 5
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #1.2b
{
x <- move(x, moves = "UF'D'FU'R'D2R")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #1.3a
{
x <- move(x, moves = "L2F2D'F2DF2L2")
c <- c + 7
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #1.3b
{
x <- move(x, moves = "BDB'U2R'D2RU2")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #1.4a
{
x <- move(x, moves = "R2DR2D'R2")
c <- c + 5
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #1.4b
{
x <- move(x, moves = "U'RDR'UFD2F'")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #1.5a
{
x <- move(x, moves = "D")
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #1.5b
{
x <- move(x, moves = "D2")
c <- put_GREEN_RED(x,c)$counter + 1
x <- put_GREEN_RED(x,c)$cube
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #1.6a
{
x <- move(x, moves = "D2")
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #1.6b
{
x <- move(x, moves = "D'")
c <- put_GREEN_RED(x,c)$counter + 1
x <- put_GREEN_RED(x,c)$cube
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #1.7a
{
x <- move(x, moves = "D'")
c <- put_RED_GREEN(x,c)$counter + 1
x <- put_RED_GREEN(x,c)$cube
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #1.7b
{
c <- put_GREEN_RED(x,c)$counter
x <- put_GREEN_RED(x,c)$cube
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #1.8a
{
c <- put_RED_GREEN(x,c)$counter
x <- put_RED_GREEN(x,c)$cube
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #1.8b
{
x <- move(x, moves = "D")
c <- put_GREEN_RED(x,c)$counter + 1
x <- put_GREEN_RED(x,c)$cube
}
}
else if (x$cp["URF"]==1 && x$co["URF"]==1) #1st corner turned clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #1.1a
{
x <- move(x, moves = "FD'F'D'FDF'D'FD2F'")
c <- c + 11
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #1.1b
{
x <- move(x, moves = "FD'F'D'FD'F'DR'D'R")
c <- c + 11
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #1.2a
{
x <- move(x, moves = "RF'R2D2RF2D2F'")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #1.2b
{
x <- move(x, moves = "LDL'R'D2RFD2F'")
c <- c + 9
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #1.3a
{
x <- move(x, moves = "BD2B'R'D2RFD2F'")
c <- c + 9
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #1.3b
{
x <- move(x, moves = "L'DLR'D2RFD2F'")
c <- c + 9
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #1.4a
{
x <- move(x, moves = "B'D2BR'D2RFD2F'")
c <- c + 9
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #1.4b
{
x <- move(x, moves = "RD'R2D2RFD2F'")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #1.5a
{
x <- move(x, moves = "D2FD'F'R'D'R")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #1.5b
{
x <- move(x, moves = "D'R'D2RFD2F'")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #1.6a
{
x <- move(x, moves = "D'FD'F'R'D'R")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #1.6b
{
x <- move(x, moves = "R'D2RFD2F'")
c <- c + 6
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #1.7a
{
x <- move(x, moves = "FD'F'R'D'R")
c <- c + 6
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #1.7b
{
x <- move(x, moves = "DR'D2RFD2F'")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #1.8a
{
x <- move(x, moves = "DFD'F'R'D'R")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #1.8b
{
x <- move(x, moves = "D2R'D2RFD2F'")
c <- c + 7
}
}
else if (x$cp["URF"]==1 && x$co["URF"]==2) #1st corner turned anti-clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #1.1a
{
x <- move(x, moves = "R'DRDR'D'RDR'D2R")
c <- c + 11
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #1.1b
{
x <- move(x, moves = "R'DRD'FDF'DFDF'")
c <- c + 11
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #1.2a
{
x <- move(x, moves = "LD2L'FD2F'R'D2R")
c <- c + 9
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #1.2b
{
x <- move(x, moves = "F'DF2D2F'R'D2R")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #1.3a
{
x <- move(x, moves = "L'D2LFD2F'R'D2R")
c <- c + 9
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #1.3b
{
x <- move(x, moves = "BD'B'FD2F'R'D2R")
c <- c + 9
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #1.4a
{
x <- move(x, moves = "F'RF2D2F'R2D2R")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #1.4b
{
x <- move(x, moves = "B'D'BFD2F'R'D2R")
c <- c + 9
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #1.5a
{
x <- move(x, moves = "FD2F'R'D2R")
c <- c + 6
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #1.5b
{
x <- move(x, moves = "DR'DRFDF'")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #1.6a
{
x <- move(x, moves = "DFD2F'R'D2R")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #1.6b
{
x <- move(x, moves = "D2R'DRFDF'")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #1.7a
{
x <- move(x, moves = "D2FD2F'R'D2R")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #1.7b
{
x <- move(x, moves = "D'R'DRFDF'")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #1.8a
{
x <- move(x, moves = "D'FD2F'R'D2R")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #1.8b
{
x <- move(x, moves = "R'DRFDF'")
c <- c + 6
}
}
if (x$cp["UFL"]==1 && x$co["UFL"]==0) #2nd corner
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #2.1a
{
x <- move(x, moves = "U'R'D'RUR'DR")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #2.1b
{
x <- move(x, moves = "F2DF2D'FD'FR'D2R")
c <- c + 10
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #2.2a
{
x <- move(x, moves = "F'DF2D2F'D'FD2F'")
c <- c + 9
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #2.2b
{
x <- move(x, moves = "F'D'FR'D2R")
c <- c + 6
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #2.3a
{
x <- move(x, moves = "UL'D'LU'DR'D2R")
c <- c + 8
#middle
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #2.3b
{
x <- move(x, moves = "L2D'L2DL'D2L'FD'F'")
c <- c + 10
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #2.4a
{
x <- move(x, moves = "R2U'R'D'RURDR")
c <- c + 9
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #2.4b
{
x <- move(x, moves = "U2B'D2BU2R'DR")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #2.5a
{
x <- move(x, moves = "LDL'R'D'R")
c <- c + 6
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #2.5b
{
x <- move(x, moves = "D'F'D2FD'FDF'")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #2.6a
{
x <- move(x, moves = "D'LD2L'D'R'D'R")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #2.6b
{
x <- move(x, moves = "F'D2FD'FDF'")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #2.7a
{
x <- move(x, moves = "LD2L'D'R'D'R")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #2.7b
{
x <- move(x, moves = "DF'D2FD'FDF'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #2.8a
{
x <- move(x, moves = "DLD2L'D'R'D'R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #2.8b
{
x <- move(x, moves = "F'DFD'FD2F'")
c <- c + 7
}
}
else if (x$cp["UFL"]==1 && x$co["UFL"]==1) #2nd corner turned clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #2.1a
{
x <- move(x, moves = "FD'F2D'F2D2F'")
c <- c + 7
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #2.1b
{
x <- move(x, moves = "R'D2RF'D'F2D2F'")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #2.2a
{
x <- move(x, moves = "FDF'LD'L'D'FDF'")
c <- c + 10
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #2.2b
{
x <- move(x, moves = "LD'L'FDF'D'FD2F'")
c <- c + 10
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #2.3a
{
x <- move(x, moves = "BDB'F'D'F2D2F'")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #2.3b
{
x <- move(x, moves = "FL'F2D'F2D2F'L")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #2.4a
{
x <- move(x, moves = "B'DBF'D'F2D2F'")
c <- c + 7
#middle
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #2.4b
{
x <- move(x, moves = "RD2R'F'D'F2D2F'")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #2.5a
{
x <- move(x, moves = "DLD'L'DR'D'R")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #2.5b
{
x <- move(x, moves = "D2F'D'F2D2F'")
c <- c + 6
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #2.6a
{
x <- move(x, moves = "F'D'FD2R'D'R")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #2.6b
{
x <- move(x, moves = "D'F'D'F2D2F'")
c <- c + 6
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #2.7a
{
x <- move(x, moves = "D'LD'L'DR'D'R")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #2.7b
{
x <- move(x, moves = "F'D'F2D2F'")
c <- c + 5
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #2.8a
{
x <- move(x, moves = "LD'L'DR'D'R")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #2.8b
{
x <- move(x, moves = "DF'D'F2D2F'")
c <- c + 6
}
}
else if (x$cp["UFL"]==1 && x$co["UFL"]==2) #2nd corner turned anti-clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #2.1a
{
x <- move(x, moves = "R'D'LDRL'")
c <- c + 5
#middle
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #2.1b
{
x <- move(x, moves = "FDF'D'LR'DL'R")
c <- c + 7
#middle
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #2.2a
{
x <- move(x, moves = "F'DFDF'DF2DF'")
c <- c + 9
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #2.2b
{
x <- move(x, moves = "F'DFDF'D'FD'R'DR")
c <- c + 11
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #2.3a
{
x <- move(x, moves = "L'DL2R'DRL'")
c <- c + 6
#middle
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #2.3b
{
x <- move(x, moves = "BD2B'LR'DRL'")
c <- c + 6
#middle
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #2.4a
{
x <- move(x, moves = "RD'R2LDRL'")
c <- c + 6
#middle
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #2.4b
{
x <- move(x, moves = "B'D2BLR'DRL'")
c <- c + 6
#middle
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #2.5a
{
x <- move(x, moves = "D'LR'DRL'")
c <- c + 4
#middle
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #2.5b
{
x <- move(x, moves = "F'DFDFDF'")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #2.6a
{
x <- move(x, moves = "LR'DL'R")
c <- c + 3
#middle
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #2.6b
{
x <- move(x, moves = "DF'DFDFDF'")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #2.7a
{
x <- move(x, moves = "DLR'DRL'")
c <- c + 4
#middle
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #2.7b
{
x <- move(x, moves = "D2F'DFDFDF'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #2.8a
{
x <- move(x, moves = "D2LR'DRL'")
c <- c + 4
#middle
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #2.8b
{
x <- move(x, moves = "D'F'DFDFDF'")
c <- c + 8
}
}
if (x$cp["ULB"]==1 && x$co["ULB"]==0) #3rd corner
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #3.1a
{
x <- move(x, moves = "U2R'D'RU2R'DR")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #3.1b
{
x <- move(x, moves = "L'DLFD'F'D'R'D'R")
c <- c + 10
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #3.2a
{
x <- move(x, moves = "L2DL2D'LR'D2RL")
c <- c + 8
#middle
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #3.2b
{
x <- move(x, moves = "U'F'D'FUR'D2R")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #3.3a
{
x <- move(x, moves = "BFD2B'F'")
c <- c + 5
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #3.3b
{
x <- move(x, moves = "L'DLD'BD2B'FD'F'")
c <- c + 10
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #3.4a
{
x <- move(x, moves = "B2D'B2DB'FD2F'B'")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #3.4b
{
x <- move(x, moves = "URDR'U'FD2F'")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #3.5a
{
x <- move(x, moves = "D'BD'B'R'D2R")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #3.5b
{
x <- move(x, moves = "L'DLFD2F'")
c <- c + 6
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #3.6a
{
x <- move(x, moves = "BD'B'R'D2R")
c <- c + 6
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #3.6b
{
x <- move(x, moves = "DL'DLFD2F'")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #3.7a
{
x <- move(x, moves = "DBD'B'R'D2R")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #3.7b
{
x <- move(x, moves = "D2L'DLFD2F'")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #3.8a
{
x <- move(x, moves = "D2BD'B'R'D2R")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #3.8b
{
x <- move(x, moves = "D'L'DLFD2F'")
c <- c + 7
}
}
else if (x$cp["ULB"]==1 && x$co["ULB"]==1) #3rd corner turned clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #3.1a
{
x <- move(x, moves = "FD2L'D2LF'")
c <- c + 6
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #3.1b
{
x <- move(x, moves = "R'DRL'FD2F'L")
c <- c + 7
#middle
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #3.2a
{
x <- move(x, moves = "F'D2F2L'D2LF'")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #3.2b
{
x <- move(x, moves = "LD'L2FD2F'L")
c <- c + 7
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #3.3a
{
x <- move(x, moves = "BD'B'DFDF'D'FD2F'")
c <- c + 11
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #3.3b
{
x <- move(x, moves = "BD'B'DFD'F'DR'D'R")
c <- c + 11
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #3.4a
{
x <- move(x, moves = "LB'L2FD2F'LB")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #3.4b
{
x <- move(x, moves = "RDR'L'FD2F'L")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #3.5a
{
x <- move(x, moves = "BD'B'D2R'D'R")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #3.5b
{
x <- move(x, moves = "DL'FD2F'L")
c <- c + 6
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #3.6a
{
x <- move(x, moves = "DBD'B'D2R'D'R")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #3.6b
{
x <- move(x, moves = "D2L'FD2F'L")
c <- c + 6
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #3.7a
{
x <- move(x, moves = "D2BD'B'D2R'D'R")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #3.7b
{
x <- move(x, moves = "D'L'FD2F'L")
c <- c + 6
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #3.8a
{
x <- move(x, moves = "D'BD'B'D2R'D'R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #3.8b
{
x <- move(x, moves = "L'FD2F'L")
c <- c + 5
}
}
else if (x$cp["ULB"]==1 && x$co["ULB"]==2) #3rd corner turned anti-clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #3.1a
{
x <- move(x, moves = "R'D2RBR'D2RB'")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #3.1b
{
x <- move(x, moves = "FD'F'BR'D2RB'")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #3.2a
{
x <- move(x, moves = "B'LB2R'D2RB'L'")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #3.2b
{
x <- move(x, moves = "F'D'FBR'D2RB'")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #3.3a
{
x <- move(x, moves = "L'DLD'R'D'RDR'D2R")
c <- c + 11
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #3.3b
{
x <- move(x, moves = "L'DLDFDF'DFDF'")
c <- c + 11
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #3.4a
{
x <- move(x, moves = "L'D'LRD'R2DR")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #3.4b
{
x <- move(x, moves = "B'DB2R'D2RB'")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #3.5a
{
x <- move(x, moves = "D2BR'D2RB'")
c <- c + 6
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #3.5b
{
x <- move(x, moves = "D'L'DLD2FDF'")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #3.6a
{
x <- move(x, moves = "D'BR'D2RB'")
c <- c + 6
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #3.6b
{
x <- move(x, moves = "L'DLD2FDF'")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #3.7a
{
x <- move(x, moves = "BR'D2RB'")
c <- c + 5
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #3.7b
{
x <- move(x, moves = "DL'DLD2FDF'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #3.8a
{
x <- move(x, moves = "DBR'D2RB'")
c <- c + 6
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #3.8b
{
x <- move(x, moves = "D2L'DLD2FDF'")
c <- c + 8
}
}
if (x$cp["UBR"]==1 && x$co["UBR"]==0) #4th corner
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #4.1a
{
x <- move(x, moves = "UR'D'RU'R'DR")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #4.1b
{
x <- move(x, moves = "R2D'R2DR'DR'FD2F'")
c <- c + 10
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #4.2a
{
x <- move(x, moves = "F2UFDF2D'FU'F2")
c <- c + 9
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #4.2b
{
x <- move(x, moves = "U2F'D'FU2R'D2R")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #4.3a
{
x <- move(x, moves = "U'L'D'LUDR'D2R")
c <- c + 9
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #4.3b
{
x <- move(x, moves = "B2DB2D'BD2BR'DR")
c <- c + 10
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #4.4a
{
x <- move(x, moves = "RD'R2D2RDR'D2R")
c <- c + 9
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #4.4b
{
x <- move(x, moves = "B'D2BR'DR")
c <- c + 6
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #4.5a
{
x <- move(x, moves = "D2RD'R'DR'D2R")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #4.5b
{
x <- move(x, moves = "D'B'DBFD'F'")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #4.6a
{
x <- move(x, moves = "D'RD'R'DR'D2R")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #4.6b
{
x <- move(x, moves = "B'DBFD'F'")
c <- c + 6
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #4.7a
{
x <- move(x, moves = "RD'R'DR'D2R")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #4.7b
{
x <- move(x, moves = "DB'DBFD'F'")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #4.8a
{
x <- move(x, moves = "DRD'R'DR'D2R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #4.8b
{
x <- move(x, moves = "D2B'DBFD'F'")
c <- c + 7
}
}
else if (x$cp["UBR"]==1 && x$co["UBR"]==1) #4th corner turned clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #4.1a
{
x <- move(x, moves = "FDB'D'F'B")
c <- c + 5
#middle
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #4.1b
{
x <- move(x, moves = "BR'B2D2BRD2FD'F'")
c <- c + 10
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #4.2a
{
x <- move(x, moves = "F'DF2B'D'F'B")
c <- c + 6
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #4.2b
{
x <- move(x, moves = "LD2L'FB'D'F'B")
c <- c + 6
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #4.3a
{
x <- move(x, moves = "BD'B2FD'F'B")
c <- c + 6
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #4.3b
{
x <- move(x, moves = "L'D2LFB'D'F'B")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #4.4a
{
x <- move(x, moves = "RD'R2D'RD'R'D'R")
c <- c + 9
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #4.4b
{
x <- move(x, moves = "RD'R'D'RDR'DFD'F'")
c <- c + 11
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #4.5a
{
x <- move(x, moves = "D'RD'R'D'R'D'R")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #4.5b
{
x <- move(x, moves = "FB'D'F'B")
c <- c + 3
#middle
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #4.6a
{
x <- move(x, moves = "RD'R'D'R'D'R")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #4.6b
{
x <- move(x, moves = "DFB'D'F'B")
c <- c + 4
#middle
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #4.7a
{
x <- move(x, moves = "DRD'R'D'R'D'R")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #4.7b
{
x <- move(x, moves = "D2FB'D'F'B")
c <- c + 4
#middle
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #4.8a
{
x <- move(x, moves = "D2RD'R'D'R'D'R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #4.8b
{
x <- move(x, moves = "D'FB'D'F'B")
c <- c + 4
#middle
}
}
else if (x$cp["UBR"]==1 && x$co["UBR"]==2) #4th corner turned anti-clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #4.1a
{
x <- move(x, moves = "R'DR2DR2D2R")
c <- c + 7
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #4.1b
{
x <- move(x, moves = "FD2F'RDR2D2R")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #4.2a
{
x <- move(x, moves = "LD'L'RDR2D2R")
c <- c + 7
#middle
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #4.2b
{
x <- move(x, moves = "F'D2FRDR2D2R")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #4.3a
{
x <- move(x, moves = "L'D'LRDR2D2R")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #4.3b
{
x <- move(x, moves = "R'BR2D2R2B'DR")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #4.4a
{
x <- move(x, moves = "B'DBD'RDR'FDF'")
c <- c + 10
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #4.4b
{
x <- move(x, moves = "B'DBR'D'RDR'D2R")
c <- c + 10
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #4.5a
{
x <- move(x, moves = "DRDR2D2R")
c <- c + 6
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #4.5b
{
x <- move(x, moves = "D2B'DBD'FDF'")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #4.6a
{
x <- move(x, moves = "D2RDR2D2R")
c <- c + 6
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #4.6b
{
x <- move(x, moves = "D'B'DBD'FDF'")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #4.7a
{
x <- move(x, moves = "D'RDR2D2R")
c <- c + 6
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #4.7b
{
x <- move(x, moves = "B'DBD'FDF'")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #4.8a
{
x <- move(x, moves = "RDR2D2R")
c <- c + 5
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #4.8b
{
x <- move(x, moves = "DB'DBD'FDF'")
c <- c + 8
}
}
if (x$cp["DFR"]==1 && x$co["DFR"]==0) #5th corner
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #5.1a
{
x <- move(x, moves = "R'D2RFD2F'DR'D'R")
c <- c + 10
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #5.1b
{
x <- move(x, moves = "'FD'F'R'D2R")
c <- c + 6
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #5.2a
{
x <- move(x, moves = "D'F'DF2D'F'")
c <- c + 6
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #5.2b
{
x <- move(x, moves = "DF2D'FR'DR2F2R'")
c <- c + 9
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #5.3a
{
x <- move(x, moves = "FL'DLD'F'R'D2R")
c <- c + 9
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #5.3b
{
x <- move(x, moves = "D2FL'DLF'")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #5.4a
{
x <- move(x, moves = "DRD'R2DR")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #5.4b
{
x <- move(x, moves = "FD'B'DBF'R'D2R")
c <- c + 9
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #5.5a
{
x <- move(x, moves = "R'D2RDR'D'R")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #5.5b
{
x <- move(x, moves = "R'D'RD'R'D2RFD2F'")
c <- c + 10
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #5.6a
{
x <- move(x, moves = "FD2F'R'D2RDR'D2R")
c <- c + 10
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #5.6b
{
x <- move(x, moves = "FD2F'D'FDF'")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #5.7a
{
x <- move(x, moves = "R'DRD2R'D'R")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #5.7b
{
x <- move(x, moves = "DFD2F'DFD'F'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #5.8a
{
x <- move(x, moves = "D'R'D2RD'R'DR")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #5.8b
{
x <- move(x, moves = "FD'F'D2FDF'")
c <- c + 7
}
}
else if (x$cp["DFR"]==1 && x$co["DFR"]==1) #5th corner turned clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #5.1a
{
x <- move(x, moves = "DR'DRD2R'DR")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #5.1b
{
x <- move(x, moves = "D2R'DRDFDF'")
c <- c + 9
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #5.2a
{
x <- move(x, moves = "R'D2RF'DF2D'F'")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #5.2b
{
x <- move(x, moves = "F'DFD'R'DR")
c <- c + 7
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #5.3a
{
x <- move(x, moves = "D'L'DLR'DR")
c <- c + 6
#middle
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #5.3b
{
x <- move(x, moves = "R'D'BD'B'R")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #5.4a
{
x <- move(x, moves = "D'B'DBFDF'")
c <- c + 7
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #5.4b
{
x <- move(x, moves = "RD2R'FDF'")
c <- c + 6
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #5.5a
{
x <- move(x, moves = "D'R'DR")
c <- c + 4
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #5.5b
{
x <- move(x, moves = "DR'D2RD'FDF'")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #5.6a
{
x <- move(x, moves = "FD'F'D2R'D'R")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #5.6b
{
x <- move(x, moves = "D'FD'F'DFDF'")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #5.7a
{
x <- move(x, moves = "DR'D2RDR'D2R")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #5.7b
{
x <- move(x, moves = "FDF'")
c <- c + 3
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #5.8a
{
x <- move(x, moves = "DR'D'RDR'D2R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #5.8b
{
x <- move(x, moves = "D'FDF'DFDF'")
c <- c + 8
}
}
else if (x$cp["DFR"]==1 && x$co["DFR"]==2) #5th corner turned anti-clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #5.1a
{
x <- move(x, moves = "D'FD'F'D'FD2F'")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #5.1b
{
x <- move(x, moves = "D2FD'F'D'R'D'R")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #5.2a
{
x <- move(x, moves = "DLD'L'R'D'R")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #5.2b
{
x <- move(x, moves = "D2LD'L'FD2F'")
c <- c + 7
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #5.3a
{
x <- move(x, moves = "DBD'B'FD'F'")
c <- c + 6
#middle
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #5.3b
{
x <- move(x, moves = "BR'D'RB'")
c <- c + 5
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #5.4a
{
x <- move(x, moves = "D'RD'R'D2R'D'R")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #5.4b
{
x <- move(x, moves = "RD'R'DFD'F'")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #5.5a
{
x <- move(x, moves = "DR'DRD'R'D'R")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #5.5b
{
x <- move(x, moves = "R'DRD2FDF'")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #5.6a
{
x <- move(x, moves = "D'FD2F'DR'D'R")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #5.6b
{
x <- move(x, moves = "DFD'F'")
c <- c + 4
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #5.7a
{
x <- move(x, moves = "D'FD'F'DR'D'R")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #5.7b
{
x <- move(x, moves = "D'FDF'D'FD2F'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #5.8a
{
x <- move(x, moves = "R'D'R")
c <- c + 3
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #5.8b
{
x <- move(x, moves = "D'FD2F'D'FD2F'")
c <- c + 8
}
}
if (x$cp["DLF"]==1 && x$co["DLF"]==0) #6th corner
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #6.1a
{
x <- move(x, moves = "DR'D2RFD2F'DR'D'R")
c <- c + 11
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #6.1b
{
x <- move(x, moves = "DR'DRFD2F'")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #6.2a
{
x <- move(x, moves = "F'DF2D'F'")
c <- c + 5
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #6.2b
{
x <- move(x, moves = "DR'DRF'D'FD'R'D'R")
c <- c + 11
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #6.3a
{
x <- move(x, moves = "DFL'DLD'F'R'D2R")
c <- c + 10
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #6.3b
{
x <- move(x, moves = "D'FL'DLF'")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #6.4a
{
x <- move(x, moves = "DRD'R2DR")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #6.4b
{
x <- move(x, moves = "DRDR'FD'F'D2FDF'")
c <- c + 11
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #6.5a
{
x <- move(x, moves = "R'D2RD'R'DR")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #6.5b
{
x <- move(x, moves = "DFD'F'D2FDF'")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #6.6a
{
x <- move(x, moves = "DR'D2RDR'D'R")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #6.6b
{
x <- move(x, moves = "DR'D'RD'R'D2RFD2F'")
c <- c + 11
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #6.7a
{
x <- move(x, moves = "DFDF'DFD2F'R'D2R")
c <- c + 11
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #6.7b
{
x <- move(x, moves = "DFD2F'D'FDF'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #6.8a
{
x <- move(x, moves = "DR'DRD2R'D'R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #6.8b
{
x <- move(x, moves = "D2FD2F'DFD'F'")
c <- c + 8
}
}
else if (x$cp["DLF"]==1 && x$co["DLF"]==1) #6th corner turned clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #6.1a
{
x <- move(x, moves = "D2R'DRDR'D2R")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #6.1b
{
x <- move(x, moves = "D'R'DRDFDF'")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #6.2a
{
x <- move(x, moves = "DR'D2RF'DF2D'F'")
c <- c + 9
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #6.2b
{
x <- move(x, moves = "DF'DFD'R'DR")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #6.3a
{
x <- move(x, moves = "L'DLR'DR")
c <- c + 5
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #6.3b
{
x <- move(x, moves = "DR'DBD'B'R")
c <- c + 7
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #6.4a
{
x <- move(x, moves = "DR'DRD'RD'R2DR")
c <- c + 10
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #6.4b
{
x <- move(x, moves = "D'B'DBR'D2R")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #6.5a
{
x <- move(x, moves = "D2R'D'RDR'D2R")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #6.5b
{
x <- move(x, moves = "FDF'DFDF'")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #6.6a
{
x <- move(x, moves = "R'DR")
c <- c + 3
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #6.6b
{
x <- move(x, moves = "D2R'D2RD'FDF'")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #6.7a
{
x <- move(x, moves = "DFD'F'D2R'D'R")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #6.7b
{
x <- move(x, moves = "FD'F'DFDF'")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #6.8a
{
x <- move(x, moves = "D2R'D2RDR'D2R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #6.8b
{
x <- move(x, moves = "DFDF'")
c <- c + 4
}
}
else if (x$cp["DLF"]==1 && x$co["DLF"]==2) #6th corner turned anti-clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #6.1a
{
x <- move(x, moves = "FD'F'D'FD2F'")
c <- c + 7
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #6.1b
{
x <- move(x, moves = "D'FD'F'D'R'D'R")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #6.2a
{
x <- move(x, moves = "LD'L'R'D'R")
c <- c + 6
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #6.2b
{
x <- move(x, moves = "D'LD'L'FD2F'")
c <- c + 7
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #6.3a
{
x <- move(x, moves = "D2BD'B'FD'F'")
c <- c + 6
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #6.3b
{
x <- move(x, moves = "DFD'L'DLF'")
c <- c + 7
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #6.4a
{
x <- move(x, moves = "DFD2F'RD'R2DR")
c <- c + 9
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #6.4b
{
x <- move(x, moves = "DRD'R'DFD'F'")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #6.5a
{
x <- move(x, moves = "DR'D'R")
c <- c + 4
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #6.5b
{
x <- move(x, moves = "FD2F'D'FD2F'")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #6.6a
{
x <- move(x, moves = "D2R'DRD'R'D'R")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #6.6b
{
x <- move(x, moves = "DR'DRD2FDF'")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #6.7a
{
x <- move(x, moves = "FD2F'DR'D'R")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #6.7b
{
x <- move(x, moves = "D'FD2F'")
c <- c + 4
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #6.8a
{
x <- move(x, moves = "FD'F'DR'D'R")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #6.8b
{
x <- move(x, moves = "FDF'D'FD2F'")
c <- c + 7
}
}
if (x$cp["DBL"]==1 && x$co["DBL"]==0) #7th corner
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #7.1a
{
x <- move(x, moves = "FD'F'DFDF'DFD'F'")
c <- c + 11
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #7.1b
{
x <- move(x, moves = "D2R'DRFD2F'")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #7.2a
{
x <- move(x, moves = "DF'DF2D'F'")
c <- c + 6
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #7.2b
{
x <- move(x, moves = "LDL'FDF'DFD'F'")
c <- c + 10
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #7.3a
{
x <- move(x, moves = "L'D2LBD2B'D'R'D'R")
c <- c + 10
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #7.3b
{
x <- move(x, moves = "FL'DLF'")
c <- c + 5
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #7.4a
{
x <- move(x, moves = "D'RD'R2DR")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #7.4b
{
x <- move(x, moves = "D2FD'F'B'DBR'D2R")
c <- c + 10
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #7.5a
{
x <- move(x, moves = "R'D'RD'R'DR")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #7.5b
{
x <- move(x, moves = "D'FD2F'DFD'F'")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #7.6a
{
x <- move(x, moves = "DR'D2RD'R'DR")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #7.6b
{
x <- move(x, moves = "FDF'DFD'F'")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #7.7a
{
x <- move(x, moves = "D2R'D2RDR'D'R")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #7.7b
{
x <- move(x, moves = "D2R'D'RD'R'D2RFD2F'")
c <- c + 11
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #7.8a
{
x <- move(x, moves = "D2FDF'DFD2F'R'D2R")
c <- c + 11
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #7.8b
{
x <- move(x, moves = "D2FD2F'D'FDF'")
c <- c + 8
}
}
else if (x$cp["DBL"]==1 && x$co["DBL"]==1) #7th corner turned clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #7.1a
{
x <- move(x, moves = "D'R'DRDR'D2R")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #7.1b
{
x <- move(x, moves = "R'DRDFDF'")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #7.2a
{
x <- move(x, moves = "D'F'DFD2FDF'")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #7.2b
{
x <- move(x, moves = "D2F'DFD'R'DR")
c <- c + 8
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #7.3a
{
x <- move(x, moves = "DL'DLR'DR")
c <- c + 6
#middle
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #7.3b
{
x <- move(x, moves = "D2L'FDF'L")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #7.4a
{
x <- move(x, moves = "DB'DBFDF'")
c <- c + 7
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #7.4b
{
x <- move(x, moves = "B'DBR'D2R")
c <- c + 6
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #7.5a
{
x <- move(x, moves = "D'R'D2RDR'D2R")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #7.5b
{
x <- move(x, moves = "D2FDF'")
c <- c + 4
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #7.6a
{
x <- move(x, moves = "D'R'D'RDR'D2R")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #7.6b
{
x <- move(x, moves = "D'R'DRD'FDF'")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #7.7a
{
x <- move(x, moves = "R'D2R")
c <- c + 3
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #7.7b
{
x <- move(x, moves = "D'R'D2RD'FDF'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #7.8a
{
x <- move(x, moves = "D2FD'F'D2R'D'R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #7.8b
{
x <- move(x, moves = "DFD'F'DFDF'")
c <- c + 8
}
}
else if (x$cp["DBL"]==1 && x$co["DBL"]==2) #7th corner turned anti-clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #7.1a
{
x <- move(x, moves = "DFD'F'D'FD2F'")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #7.1b
{
x <- move(x, moves = "FD'F'D'R'D'R")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #7.2a
{
x <- move(x, moves = "D'LD'L'R'D'R")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #7.2b
{
x <- move(x, moves = "LD'L'FD2F'")
c <- c + 6
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #7.3a
{
x <- move(x, moves = "D'BD'B'FD'F'")
c <- c + 6
#middle
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #7.3b
{
x <- move(x, moves = "D2BR'D'RB'")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #7.4a
{
x <- move(x, moves = "DRD'R'D2R'D'R")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #7.4b
{
x <- move(x, moves = "D2RD'R'DFD'F'")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #7.5a
{
x <- move(x, moves = "DFD'F'DR'D'R")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #7.5b
{
x <- move(x, moves = "DFDF'D'FD2F'")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #7.6a
{
x <- move(x, moves = "D2R'D'R")
c <- c + 4
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #7.6b
{
x <- move(x, moves = "DFD2F'D'FD2F'")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #7.7a
{
x <- move(x, moves = "D'R'DRD'R'D'R")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #7.7b
{
x <- move(x, moves = "D2R'DRD2FDF'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #7.8a
{
x <- move(x, moves = "DFD2F'DR'D'R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #7.8b
{
x <- move(x, moves = "FD2F'")
c <- c + 3
}
}
if (x$cp["DRB"]==1 && x$co["DRB"]==0) #8th corner
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #8.1a
{
x <- move(x, moves = "D'R'D2RFD2F'DR'D'R")
c <- c + 11
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #8.1b
{
x <- move(x, moves = "D'FD'F'R'D2R")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #8.2a
{
x <- move(x, moves = "D2F'DF2D'F'")
c <- c + 6
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #8.2b
{
x <- move(x, moves = "D'F'D'FR'DRD2R'D'R")
c <- c + 11
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #8.3a
{
x <- move(x, moves = "D'R'BD'B'DRFD2F'")
c <- c + 10
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #8.3b
{
x <- move(x, moves = "DR'BD'B'R")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #8.4a
{
x <- move(x, moves = "RD'R2DR")
c <- c + 5
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #8.4b
{
x <- move(x, moves = "D'FD'F'B'DBR'D2R")
c <- c + 10
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #8.5a
{
x <- move(x, moves = "D'FDF'DFD2F'R'D2R")
c <- c + 11
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #8.5b
{
x <- move(x, moves = "D'FD2F'D'FDF'")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #8.6a
{
x <- move(x, moves = "D'R'DRD2R'D'R")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #8.6b
{
x <- move(x, moves = "FD2F'DFD'F'")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #8.7a
{
x <- move(x, moves = "D2R'D2RD'R'DR")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #8.7b
{
x <- move(x, moves = "D'FD'F'D2FDF'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #8.8a
{
x <- move(x, moves = "D'R'D2RDR'D'R")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #8.8b
{
x <- move(x, moves = "D'R'D'RD'R'D2RFD2F'")
c <- c + 11
}
}
else if (x$cp["DRB"]==1 && x$co["DRB"]==1) #8th corner turned clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #8.1a
{
x <- move(x, moves = "R'DRDR'D2R")
c <- c + 7
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #8.1b
{
x <- move(x, moves = "DR'DRDFDF'")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #8.2a
{
x <- move(x, moves = "F'DFD2FDF'")
c <- c + 7
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #8.2b
{
x <- move(x, moves = "DR'D2F'DFR")
c <- c + 7
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #8.3a
{
x <- move(x, moves = "D2L'DLR'DR")
c <- c + 6
#middle
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #8.3b
{
x <- move(x, moves = "D'L'FDF'L")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #8.4a
{
x <- move(x, moves = "D2B'DFDBF'")
c <- c + 6
#middle
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #8.4b
{
x <- move(x, moves = "DB'DBR'D2R")
c <- c + 7
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #8.5a
{
x <- move(x, moves = "D'FD'F'D2R'D'R")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #8.5b
{
x <- move(x, moves = "D2FD'F'DFDF'")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #8.6a
{
x <- move(x, moves = "R'D2RDR'D2R")
c <- c + 7
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #8.6b
{
x <- move(x, moves = "D'FDF'")
c <- c + 4
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #8.7a
{
x <- move(x, moves = "R'D'RDR'D2R")
c <- c + 7
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #8.7b
{
x <- move(x, moves = "R'DRD'FDF'")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #8.8a
{
x <- move(x, moves = "DR'D2R")
c <- c + 4
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #8.8b
{
x <- move(x, moves = "R'D2RD'FDF'")
c <- c + 7
}
}
else if (x$cp["DRB"]==1 && x$co["DRB"]==2) #8th corner turned anti-clockwise
{
if (x$ep["FR"]==1 && x$eo["FR"]==0) #8.1a
{
x <- move(x, moves = "D2FD'F'D'FD2F'")
c <- c + 8
}
else if (x$ep["FR"]==1 && x$eo["FR"]==1) #8.1b
{
x <- move(x, moves = "DFD'F'D'R'D'R")
c <- c + 8
}
else if (x$ep["FL"]==1 && x$eo["FL"]==0) #8.2a
{
x <- move(x, moves = "LD'L'R'D'R")
c <- c + 6
}
else if (x$ep["FL"]==1 && x$eo["FL"]==1) #8.2b
{
x <- move(x, moves = "DLD'L'FD2F'")
c <- c + 7
}
else if (x$ep["BL"]==1 && x$eo["BL"]==0) #8.3a
{
x <- move(x, moves = "BD'B'FD'F'")
c <- c + 5
#middle
}
else if (x$ep["BL"]==1 && x$eo["BL"]==1) #8.3b
{
x <- move(x, moves = "D'BR'D'RB'")
c <- c + 6
}
else if (x$ep["BR"]==1 && x$eo["BR"]==0) #8.4a
{
x <- move(x, moves = "D2RD'R'D2R'D'R")
c <- c + 8
}
else if (x$ep["BR"]==1 && x$eo["BR"]==1) #8.4b
{
x <- move(x, moves = "D'RD'R'DFD'F'")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==0) #8.5a
{
x <- move(x, moves = "D2FD2F'DR'D'R")
c <- c + 8
}
else if (x$ep["DR"]==1 && x$eo["DR"]==1) #8.5b
{
x <- move(x, moves = "FD'F'")
c <- c + 3
}
else if (x$ep["DF"]==1 && x$eo["DF"]==0) #8.6a
{
x <- move(x, moves = "D2FD'F'DR'D'R")
c <- c + 8
}
else if (x$ep["DF"]==1 && x$eo["DF"]==1) #8.6b
{
x <- move(x, moves = "D2FDF'D'FD2F'")
c <- c + 8
}
else if (x$ep["DL"]==1 && x$eo["DL"]==0) #8.7a
{
x <- move(x, moves = "D'R'D'R")
c <- c + 4
}
else if (x$ep["DL"]==1 && x$eo["DL"]==1) #8.7b
{
x <- move(x, moves = "D2FD2F'D'FD2F'")
c <- c + 8
}
else if (x$ep["DB"]==1 && x$eo["DB"]==0) #8.8a
{
x <- move(x, moves = "R'DRD'R'D'R")
c <- c + 7
}
else if (x$ep["DB"]==1 && x$eo["DB"]==1) #8.8b
{
x <- move(x, moves = "D'R'DRD2FDF'")
c <- c + 8
}
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
red_blue_block <- function(x,c)
{
x <- move(x, moves = "y")
c <- green_red_block(x,c)$counter
x <- green_red_block(x,c)$cube
x <- move(x, moves = "y'")
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
blue_orange_block <- function(x,c)
{
x <- move(x, moves = "y2")
c <- green_red_block(x,c)$counter
x <- green_red_block(x,c)$cube
x <- move(x, moves = "y2")
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
orange_green_block <- function(x,c)
{
x <- move(x, moves = "y'")
c <- green_red_block(x,c)$counter
x <- green_red_block(x,c)$cube
x <- move(x, moves = "y")
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
F2L <- function(x,c)
{
c <- green_red_block(x,c)$counter
x <- green_red_block(x,c)$cube
c <- red_blue_block(x,c)$counter
x <- red_blue_block(x,c)$cube
c <- blue_orange_block(x,c)$counter
x <- blue_orange_block(x,c)$cube
c <- orange_green_block(x,c)$counter
x <- orange_green_block(x,c)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
OLL_perms <- function(x,c)
{
if (x$eo["UR"]==1 && x$eo["UF"]==1 && x$eo["UL"]==1 && x$eo["UB"]==1) #4 EDGES ORIENTED INCORRECTLY
{
if (x$co["URF"]==1 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==2) #8
{
x <- move(x, moves = "RU2R2FRF'U2R'FRF'")
c <- c + 11
}
else if (x$co["URF"]==2 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==1) #9
{
x <- move(x, moves = "FRUR'U'F'UL'U'LFRUR'F'")
c <- c + 15
}
else if (x$co["URF"]==0 && x$co["UFL"]==1 && x$co["ULB"]==0 && x$co["UBR"]==2) #10
{
x <- move(x, moves = "LF'L'FU2FU'RU'R'F'")
c <- c + 11
}
else if (x$co["URF"]==0 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==0) #11
{
x <- move(x, moves = "RU2R2FRF'U2M'URU'L'x'")
c <- c + 12
}
else if (x$co["URF"]==1 && x$co["UFL"]==2 && x$co["ULB"]==0 && x$co["UBR"]==0) #12
{
x <- move(x, moves = "R'U2FRUR'U'F2U2FR")
c <- c + 12
}
else if (x$co["URF"]==2 && x$co["UFL"]==0 && x$co["ULB"]==2 && x$co["UBR"]==2) #13
{
x <- move(x, moves = "MUR'F2RUL'ULM'")
c <- c + 9
}
else if (x$co["URF"]==0 && x$co["UFL"]==1 && x$co["ULB"]==1 && x$co["UBR"]==1) #14
{
x <- move(x, moves = "MU'LF2L'U'RU'R'M'")
c <- c + 10
}
else if (x$co["URF"]==0 && x$co["UFL"]==0 && x$co["ULB"]==0 && x$co["UBR"]==0) #15
{
x <- move(x, moves = "MURUR'U'M2URU'L'x'")
c <- c + 11
}
}
else if (x$eo["UR"]==0 && x$eo["UF"]==1 && x$eo["UL"]==1 && x$eo["UB"]==0) #2 EDGES ORIENTED INCORRECTLY "L-shape"
{
if (x$co["URF"]==0 && x$co["UFL"]==1 && x$co["ULB"]==2 && x$co["UBR"]==0) #16
{
x <- move(x, moves = "R'U'FURU'R'F'R")
c <- c + 9
}
else if (x$co["URF"]==0 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==0) #19
{
x <- move(x, moves = "F'U'L'ULF")
c <- c + 6
}
else if (x$co["URF"]==0 && x$co["UFL"]==2 && x$co["ULB"]==0 && x$co["UBR"]==1) #20
{
x <- move(x, moves = "L'U'LU'L'ULULF'L'F")
c <- c + 12
}
else if (x$co["URF"]==1 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==2) #23
{
x <- move(x, moves = "LFR'FRF'R'FRF2L'")
c <- c + 11
}
else if (x$co["URF"]==1 && x$co["UFL"]==1 && x$co["ULB"]==2 && x$co["UBR"]==2) #24
{
x <- move(x, moves = "F'L'U'LUL'U'LUF")
c <- c + 10
}
else if (x$co["URF"]==1 && x$co["UFL"]==1 && x$co["ULB"]==1 && x$co["UBR"]==0) #39
{
x <- move(x, moves = "LF2R'F'RF'L'")
c <- c + 7
}
else if (x$co["URF"]==1 && x$co["UFL"]==0 && x$co["ULB"]==2 && x$co["UBR"]==0) #40
{
x <- move(x, moves = "x'R'F2R2U'R'UR'F2Rx")
c <- c + 9
}
else if (x$co["URF"]==0 && x$co["UFL"]==1 && x$co["ULB"]==1 && x$co["UBR"]==1) #43
{
x <- move(x, moves = "R'F'LF'L'F2R")
c <- c + 7
}
else if (x$co["URF"]==1 && x$co["UFL"]==1 && x$co["ULB"]==0 && x$co["UBR"]==1) #45
{
x <- move(x, moves = "LR2F'RF'R'F2RF'Mx")
c <- c + 10
}
else if (x$co["URF"]==0 && x$co["UFL"]==0 && x$co["ULB"]==1 && x$co["UBR"]==2) #46
{
x <- move(x, moves = "F'LF'L2ULUL'U'LF2")
c <- c + 11
}
else if (x$co["URF"]==1 && x$co["UFL"]==0 && x$co["ULB"]==0 && x$co["UBR"]==2) #49
{
x <- move(x, moves = "(LF'L'FLF'L'FL'U'LUL'U'L")
c <- c + 15
}
}
else if (x$eo["UR"]==1 && x$eo["UF"]==1 && x$eo["UL"]==0 && x$eo["UB"]==0) #2 EDGES ORIENTED INCORRECTLY "L-shape"
{
if (x$co["URF"]==2 && x$co["UFL"]==0 && x$co["ULB"]==0 && x$co["UBR"]==1) #17
{
x <- move(x, moves = "LUF'U'L'ULFL'")
c <- c + 9
}
else if (x$co["URF"]==1 && x$co["UFL"]==0 && x$co["ULB"]==0 && x$co["UBR"]==2) #18
{
x <- move(x, moves = "FURU'R'F'")
c <- c + 6
}
else if (x$co["URF"]==1 && x$co["UFL"]==0 && x$co["ULB"]==2 && x$co["UBR"]==0) #21
{
x <- move(x, moves = "RUR'URU'R'U'R'FRF'")
c <- c + 12
}
else if (x$co["URF"]==1 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==2) #22
{
x <- move(x, moves = "R'F'LF'L'FLF'L'F2R")
c <- c + 11
}
else if (x$co["URF"]==2 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==1) #25
{
x <- move(x, moves = "FRUR'U'RUR'U'F'")
c <- c + 10
}
else if (x$co["URF"]==2 && x$co["UFL"]==2 && x$co["ULB"]==0 && x$co["UBR"]==2) #38
{
x <- move(x, moves = "R'F2LFL'FR")
c <- c + 7
}
else if (x$co["URF"]==0 && x$co["UFL"]==1 && x$co["ULB"]==0 && x$co["UBR"]==2) #41
{
x <- move(x, moves = "FR'F'RURU'R'")
c <- c + 8
}
else if (x$co["URF"]==2 && x$co["UFL"]==0 && x$co["ULB"]==2 && x$co["UBR"]==2) #42
{
x <- move(x, moves = "LFR'FRF2L'")
c <- c + 7
}
else if (x$co["URF"]==2 && x$co["UFL"]==2 && x$co["ULB"]==2 && x$co["UBR"]==0) #44
{
x <- move(x, moves = "R'L2FL'FLF2L'FMx")
c <- c + 10
}
else if (x$co["URF"]==0 && x$co["UFL"]==0 && x$co["ULB"]==1 && x$co["UBR"]==2) #47
{
x <- move(x, moves = "FR'FR2U'R'U'RUR'F2")
c <- c + 11
}
else if (x$co["URF"]==0 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==0) #48
{
x <- move(x, moves = "R'FRF'R'FRF'RUR'U'RUR'")
c <- c + 15
}
else if (x$co["URF"]==0 && x$co["UFL"]==0 && x$co["ULB"]==0 && x$co["UBR"]==0) #56
{
x <- move(x, moves = "RL'BLR'U2RL'BLR'")
c <- c + 7
}
}
else if (x$eo["UR"]==0 && x$eo["UF"]==0 && x$eo["UL"]==1 && x$eo["UB"]==1) #2 EDGES ORIENTED INCORRECTLY "L-shape"
{
if (x$co["URF"]==2 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==1) #26
{
x <- move(x, moves = "L'BL2F'L2B'L2FL'")
c <- c + 9
}
else if (x$co["URF"]==1 && x$co["UFL"]==1 && x$co["ULB"]==0 && x$co["UBR"]==1) #28
{
x <- move(x, moves = "L'U'Ly'LF'L'ULFL'y")
c <- c + 10
}
}
else if (x$eo["UR"]==1 && x$eo["UF"]==0 && x$eo["UL"]==0 && x$eo["UB"]==1) #2 EDGES ORIENTED INCORRECTLY "L-shape"
{
if (x$co["URF"]==1 && x$co["UFL"]==1 && x$co["ULB"]==2 && x$co["UBR"]==2) #27
{
x <- move(x, moves = "RB'R2FR2BR2F'R")
c <- c + 9
}
else if (x$co["URF"]==2 && x$co["UFL"]==2 && x$co["ULB"]==2 && x$co["UBR"]==0) #29
{
x <- move(x, moves = "RUR'yR'FRU'R'F'Ry'")
c <- c + 10
}
}
else if (x$eo["UR"]==0 && x$eo["UF"]==1 && x$eo["UL"]==0 && x$eo["UB"]==1) #2 EDGES ORIENTED INCORRECTLY HORIZONTAL "LINE"
{
if (x$co["URF"]==1 && x$co["UFL"]==2 && x$co["ULB"]==0 && x$co["UBR"]==0) #30
{
x <- move(x, moves = "R'U'RUFx'RU'R'Eyx")
c <- c + 9
}
else if (x$co["URF"]==0 && x$co["UFL"]==1 && x$co["ULB"]==2 && x$co["UBR"]==0) #32
{
x <- move(x, moves = "RUR'U'R'FRF'")
c <- c + 8
}
else if (x$co["URF"]==0 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==0) #33
{
x <- move(x, moves = "FRUR'U'F'")
c <- c + 6
}
else if (x$co["URF"]==1 && x$co["UFL"]==1 && x$co["ULB"]==2 && x$co["UBR"]==2) #35
{
x <- move(x, moves = "FURU'R'URU'R'F'")
c <- c + 10
}
else if (x$co["URF"]==1 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==2) #36
{
x <- move(x, moves = "L'B'LU'R'URU'R'URL'BL")
c <- c + 14
}
else if (x$co["URF"]==1 && x$co["UFL"]==0 && x$co["ULB"]==2 && x$co["UBR"]==0) #50
{
x <- move(x, moves = "LF'L'U'LUFU'L'")
c <- c + 9
}
else if (x$co["URF"]==0 && x$co["UFL"]==2 && x$co["ULB"]==0 && x$co["UBR"]==1) #51
{
x <- move(x, moves = "R'FRUR'U'F'UR")
c <- c + 9
}
else if (x$co["URF"]==2 && x$co["UFL"]==0 && x$co["ULB"]==2 && x$co["UBR"]==2) #52
{
x <- move(x, moves = "LF'L'U'LFL'F'UF")
c <- c + 10
}
else if (x$co["URF"]==0 && x$co["UFL"]==1 && x$co["ULB"]==1 && x$co["UBR"]==1) #53
{
x <- move(x, moves = "R'FRUR'F'RFU'F'")
c <- c + 10
}
else if (x$co["URF"]==0 && x$co["UFL"]==2 && x$co["ULB"]==2 && x$co["UBR"]==2) #54
{
x <- move(x, moves = "L'B'LR'U'RUL'BL")
c <- c + 10
}
else if (x$co["URF"]==1 && x$co["UFL"]==0 && x$co["ULB"]==1 && x$co["UBR"]==1) #55
{
x <- move(x, moves = "RBR'LUL'U'RB'R'")
c <- c + 10
}
else if (x$co["URF"]==0 && x$co["UFL"]==0 && x$co["ULB"]==0 && x$co["UBR"]==0) #57
{
x <- move(x, moves = "RUR'U'LR'FRF'L'")
c <- c + 10
}
}
else if (x$eo["UR"]==1 && x$eo["UF"]==0 && x$eo["UL"]==1 && x$eo["UB"]==0) #2 EDGES ORIENTED INCORRECTLY VERTICAL "LINE"
{
if (x$co["URF"]==1 && x$co["UFL"]==0 && x$co["ULB"]==0 && x$co["UBR"]==2) #31
{
x <- move(x, moves = "R'U'R'FRF'UR")
c <- c + 8
}
else if (x$co["URF"]==1 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==2) #34
{
x <- move(x, moves = "RU2R2U'RU'R'U2FRF'")
c <- c + 11
}
else if (x$co["URF"]==1 && x$co["UFL"]==1 && x$co["ULB"]==2 && x$co["UBR"]==2) #37
{
x <- move(x, moves = "R'U'RU'R'UF'UFR")
c <- c + 10
}
}
else if (x$eo["UR"]==0 && x$eo["UF"]==0 && x$eo["UL"]==0 && x$eo["UB"]==0) #4 EDGES ORIENTED CORRECTLY
{
if (x$co["URF"]==0 && x$co["UFL"]==0 && x$co["ULB"]==0 && x$co["UBR"]==0) #0
{
x <- x
c <- c
}
else if (x$co["URF"]==2 && x$co["UFL"]==1 && x$co["ULB"]==2 && x$co["UBR"]==1) #1
{
x <- move(x, moves = "RU2R'U'RUR'U'RU'R'")
c <- c + 11
}
else if (x$co["URF"]==2 && x$co["UFL"]==2 && x$co["ULB"]==1 && x$co["UBR"]==1) #2
{
x <- move(x, moves = "(RU2R2U'R2U'R2U2R")
c <- c + 9
}
else if (x$co["URF"]==1 && x$co["UFL"]==0 && x$co["ULB"]==2 && x$co["UBR"]==0) #3
{
x <- move(x, moves = "x'RU'R'DRUR'D'x")
c <- c + 8
}
else if (x$co["URF"]==0 && x$co["UFL"]==0 && x$co["ULB"]==2 && x$co["UBR"]==1) #4
{
x <- move(x, moves = "R2D'RU2R'DRU2R")
c <- c + 9
}
else if (x$co["URF"]==1 && x$co["UFL"]==2 && x$co["ULB"]==0 && x$co["UBR"]==0) #5
{
x <- move(x, moves = "x'RUR'DRU'R'D'x")
c <- c + 8
}
else if (x$co["URF"]==0 && x$co["UFL"]==1 && x$co["ULB"]==1 && x$co["UBR"]==1) #6
{
x <- move(x, moves = "L'U'LU'L'U2L")
c <- c + 7
}
else if (x$co["URF"]==2 && x$co["UFL"]==0 && x$co["ULB"]==2 && x$co["UBR"]==2) #7
{
x <- move(x, moves = "RUR'URU2R'")
c <- c + 7
}
}
else
{
x <- x
c <- c
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
OLL <- function(x,c)
{
x <- move(x, moves = "z2")
plot(x)
for (i in 1:4)
{
c <- OLL_perms(x,c)$counter
x <- OLL_perms(x,c)$cube
x <- move(x, moves = "y")
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
A_perm <- function(x,c)
{
x <- move(x, moves = "xR'UR'D2RU'R'D2R2x'")
c <- c + 9
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
A_perm_mirror <- function(x,c)
{
x <- move(x, moves = "xLU'LD2L'ULD2L2x'")
c <- c + 9
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
E_perm <- function(x,c)
{
x <- move(x, moves = "x'RU'R'DRUR'D'RUR'DRU'R'D'x")
c <- c + 16
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
F_perm <- function(x,c)
{
x <- move(x, moves = "LUFL'U'LULF'L2ULUL'U'LU'L'")
c <- c + 18
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
G1_perm <- function(x,c)
{
x <- move(x, moves = "R'U'RB2DL'ULU'LD'B2")
c <- c + 12
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
G1_perm_mirror <- function(x,c)
{
x <- move(x, moves = "LUL'B2D'RU'R'UR'DB2")
c <- c + 12
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
G2_perm <- function(x,c)
{
x <- move(x, moves = "z'U2RB'LB'L'BR'U2F'LFz")
c <- c + 12
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
G2_perm_mirror <- function(x,c)
{
x <- move(x, moves = "zU2L'BR'BRB'LU2FR'F'z'")
c <- c + 12
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
H_perm <- function(x,c)
{
x <- move(x, moves = "M2U'M2U2M2U'M2")
c <- c + 7
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
J_perm <- function(x,c)
{
x <- move(x, moves = "RUR'F'RUR'U'R'FR2U'R'U'")
c <- c + 14
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
J_perm_mirror <- function(x,c)
{
x <- move(x, moves = "L'U'LFL'U'LULF'L2ULU")
c <- c + 14
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
N_perm <- function(x,c)
{
x <- move(x, moves = "R'UL'U2RU'LR'UL'U2RU'LU")
c <- c + 15
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
N_perm_mirror <- function(x,c)
{
x <- move(x, moves = "LU'RU2L'UR'LU'RU2L'UR'U'")
c <- c + 15
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
R_perm <- function(x,c)
{
x <- move(x, moves = "R'U2RU2R'FRUR'U'R'F'R2U'")
c <- c + 14
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
R_perm_mirror <- function(x,c)
{
x <- move(x, moves = "LU2L'U2LF'L'U'LULFL2U")
c <- c + 14
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
T_perm <- function(x,c)
{
x <- move(x, moves = "RUR'U'R'FR2U'R'U'RUR'F'")
c <- c + 14
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
U_perm <- function(x,c)
{
x <- move(x, moves = "R'UR'U'R'U'R'URUR2")
c <- c + 11
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
U_perm_mirror <- function(x,c)
{
x <- move(x, moves = "R2U'R'U'RURURU'R")
c <- c + 11
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
V_perm <- function(x,c)
{
x <- move(x, moves = "L'UL'U'y'R'F'R2U'R'UR'FRFy")
c <- c + 14
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
Y_perm <- function(x,c)
{
x <- move(x, moves = "FRU'R'U'RUR'F'RUR'U'R'FRF'")
c <- c + 17
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
Z_perm <- function(x,c)
{
x <- move(x, moves = "UR'U'RU'RURU'R'URUR2U'R'U")
c <- c + 17
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
PLL_perms <- function(x,c,test_check)
{
test_check <- 0
if (x$ep["UR"]==5 && x$ep["UF"]==6 && x$ep["UL"]==7 && x$ep["UB"]==8 && !(x$cp["URF"]==3 && x$cp["UFL"]==4 && x$cp["ULB"]==1 && x$cp["UBR"]==2)) # edges correctly permuted
{
if (x$cp["URF"]==1 && x$cp["UFL"]==2 && x$cp["ULB"]==3 && x$cp["UBR"]==4)
{
x <- x
c <- c
}
else if (x$cp["URF"]==3 && x$cp["UFL"]==2 && x$cp["ULB"]==4 && x$cp["UBR"]==1)
{
c <- A_perm(x,c)$counter
x <- A_perm(x,c)$cube
}
else if (x$cp["URF"]==1 && x$cp["UFL"]==4 && x$cp["ULB"]==2 && x$cp["UBR"]==3)
{
c <- A_perm_mirror(x,c)$counter
x <- A_perm_mirror(x,c)$cube
}
else if (x$cp["URF"]==4 && x$cp["UFL"]==3 && x$cp["ULB"]==2 && x$cp["UBR"]==1)
{
c <- E_perm(x,c)$counter
x <- E_perm(x,c)$cube
}
}
else if (x$cp["URF"]==1 && x$cp["UFL"]==2 && x$cp["ULB"]==3 && x$cp["UBR"]==4) # corners correctly permuted
{
if (x$ep["UR"]==7 && x$ep["UF"]==8 && x$ep["UL"]==5 && x$ep["UB"]==6)
{
c <- H_perm(x,c)$counter
x <- H_perm(x,c)$cube
}
else if (x$ep["UR"]==7 && x$ep["UF"]==6 && x$ep["UL"]==8 && x$ep["UB"]==5)
{
c <- U_perm(x,c)$counter
x <- U_perm(x,c)$cube
}
else if (x$ep["UR"]==8 && x$ep["UF"]==6 && x$ep["UL"]==5 && x$ep["UB"]==7)
{
c <- U_perm_mirror(x,c)$counter
x <- U_perm_mirror(x,c)$cube
}
else if (x$ep["UR"]==6 && x$ep["UF"]==5 && x$ep["UL"]==8 && x$ep["UB"]==7)
{
c <- Z_perm(x,c)$counter
x <- Z_perm(x,c)$cube
}
}
else if (x$ep["UR"]==5 && x$ep["UF"]==8 && x$ep["UL"]==7 && x$ep["UB"]==6 && x$cp["URF"]==1 && x$cp["UFL"]==3 && x$cp["ULB"]==2 && x$cp["UBR"]==4)
{
c <- F_perm(x,c)$counter
x <- F_perm(x,c)$cube
}
else if (x$ep["UR"]==6 && x$ep["UF"]==5 && x$ep["UL"]==7 && x$ep["UB"]==8 && x$cp["URF"]==4 && x$cp["UFL"]==2 && x$cp["ULB"]==3 && x$cp["UBR"]==1)
{
c <- J_perm(x,c)$counter
x <- J_perm(x,c)$cube
}
else if (x$ep["UR"]==5 && x$ep["UF"]==7 && x$ep["UL"]==6 && x$ep["UB"]==8 && x$cp["URF"]==1 && x$cp["UFL"]==3 && x$cp["ULB"]==2 && x$cp["UBR"]==4)
{
c <- J_perm_mirror(x,c)$counter
x <- J_perm_mirror(x,c)$cube
}
else if (x$ep["UR"]==5 && x$ep["UF"]==8 && x$ep["UL"]==7 && x$ep["UB"]==6 && x$cp["URF"]==1 && x$cp["UFL"]==4 && x$cp["ULB"]==3 && x$cp["UBR"]==2)
{
c <- N_perm(x,c)$counter
x <- N_perm(x,c)$cube
}
else if (x$ep["UR"]==5 && x$ep["UF"]==8 && x$ep["UL"]==7 && x$ep["UB"]==6 && x$cp["URF"]==3 && x$cp["UFL"]==2 && x$cp["ULB"]==1 && x$cp["UBR"]==4)
{
c <- N_perm_mirror(x,c)$counter
x <- N_perm_mirror(x,c)$cube
}
else if (x$ep["UR"]==6 && x$ep["UF"]==5 && x$ep["UL"]==7 && x$ep["UB"]==8 && x$cp["URF"]==1 && x$cp["UFL"]==2 && x$cp["ULB"]==4 && x$cp["UBR"]==3)
{
c <- R_perm(x,c)$counter
x <- R_perm(x,c)$cube
}
else if (x$ep["UR"]==5 && x$ep["UF"]==7 && x$ep["UL"]==6 && x$ep["UB"]==8 && x$cp["URF"]==1 && x$cp["UFL"]==2 && x$cp["ULB"]==4 && x$cp["UBR"]==3)
{
c <- R_perm_mirror(x,c)$counter
x <- R_perm_mirror(x,c)$cube
}
else if (x$ep["UR"]==7 && x$ep["UF"]==6 && x$ep["UL"]==5 && x$ep["UB"]==8 && x$cp["URF"]==4 && x$cp["UFL"]==2 && x$cp["ULB"]==3 && x$cp["UBR"]==1)
{
c <- T_perm(x,c)$counter
x <- T_perm(x,c)$cube
}
else if (x$ep["UR"]==5 && x$ep["UF"]==7 && x$ep["UL"]==6 && x$ep["UB"]==8 && x$cp["URF"]==3 && x$cp["UFL"]==2 && x$cp["ULB"]==1 && x$cp["UBR"]==4)
{
c <- V_perm(x,c)$counter
x <- V_perm(x,c)$cube
}
else if (x$ep["UR"]==5 && x$ep["UF"]==6 && x$ep["UL"]==8 && x$ep["UB"]==7 && x$cp["URF"]==3 && x$cp["UFL"]==2 && x$cp["ULB"]==1 && x$cp["UBR"]==4)
{
c <- Y_perm(x,c)$counter
x <- Y_perm(x,c)$cube
}
else if (x$ep["UR"]==5 && x$ep["UF"]==7 && x$ep["UL"]==8 && x$ep["UB"]==6 && x$cp["URF"]==3 && x$cp["UFL"]==1 && x$cp["ULB"]==2 && x$cp["UBR"]==4)
{
c <- G1_perm(x,c)$counter
x <- G1_perm(x,c)$cube
}
else if (x$ep["UR"]==8 && x$ep["UF"]==5 && x$ep["UL"]==7 && x$ep["UB"]==6 && x$cp["URF"]==2 && x$cp["UFL"]==4 && x$cp["ULB"]==3 && x$cp["UBR"]==1)
{
c <- G1_perm_mirror(x,c)$counter
x <- G1_perm_mirror(x,c)$cube
}
else if (x$ep["UR"]==8 && x$ep["UF"]==6 && x$ep["UL"]==5 && x$ep["UB"]==7 && x$cp["URF"]==1 && x$cp["UFL"]==3 && x$cp["ULB"]==4 && x$cp["UBR"]==2)
{
c <- G2_perm(x,c)$counter
x <- G2_perm(x,c)$cube
}
else if (x$ep["UR"]==7 && x$ep["UF"]==6 && x$ep["UL"]==8 && x$ep["UB"]==5 && x$cp["URF"]==4 && x$cp["UFL"]==2 && x$cp["ULB"]==1 && x$cp["UBR"]==3)
{
c <- G2_perm_mirror(x,c)$counter
x <- G2_perm_mirror(x,c)$cube
}
else
{
test_check <- 1
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c, "test_check"=test_check))
}
PLL_perms_repeated <- function(x,c,test_check)
{
for (i in 1:4)
{
test_check <- PLL_perms(x,c,test_check)$test_check
c <- PLL_perms(x,c,test_check)$counter
x <- PLL_perms(x,c,test_check)$cube
x <- move(x, moves = "y")
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c, "test_check"=test_check))
}
PLL <- function(x,c,test_check = 0)
{
test_check <- PLL_perms_repeated(x,c,test_check)$test_check
c <- PLL_perms_repeated(x,c,test_check)$counter
x <- PLL_perms_repeated(x,c,test_check)$cube
if (test_check == 1)
{
x <- move(x, moves = "U")
test_check <- PLL_perms_repeated(x,c,test_check)$test_check
c <- PLL_perms_repeated(x,c,test_check)$counter + 1
x <- PLL_perms_repeated(x,c,test_check)$cube
}
if (test_check == 1)
{
x <- move(x, moves = "U")
test_check <- PLL_perms_repeated(x,c,test_check)$test_check
c <- PLL_perms_repeated(x,c,test_check)$counter
x <- PLL_perms_repeated(x,c,test_check)$cube
}
if (test_check == 1)
{
x <- move(x, moves = "U")
test_check <- PLL_perms_repeated(x,c,test_check)$test_check
c <- PLL_perms_repeated(x,c,test_check)$counter
x <- PLL_perms_repeated(x,c,test_check)$cube
}
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
CFOP <- function(x,c)
{
c <- CROSS(x,c)$counter
x <- CROSS(x,c)$cube
c <- F2L(x,c)$counter
x <- F2L(x,c)$cube
c <- OLL(x,c)$counter
x <- OLL(x,c)$cube
c <- PLL(x,c,0)$counter
x <- PLL(x,c,0)$cube
plot(x)
#Sys.sleep(0.1)
return(list("cube"=x,"counter"=c))
}
#####################################################################################
check_solve <- function()
{
solved <- getCubieCube("Solved")
for (i in 801:1000)
{
x <- getCubieCube("Solved")
sc <- eval(parse(text=paste("scramble",i,sep = "")))
x <- move(x, moves = sc)
x <- solve_LBL(x,0)$cube
if (!(x == solved))
{
print(i)
}
}
}
|
12aa75f60c23c32f09e5c3d368161aa81e7c0613
|
4d4d672003cb95de01f224fc5d17b727bc6949f6
|
/man/getBenchMark.Rd
|
450415c967d8268ffdba3c72914b403a44ca47d3
|
[] |
no_license
|
lukas1421/chinaTrading
|
b2d98620cf8a1dda8df1aa79caac23446f173fec
|
a5b3ad12e96b45ecea62b56127024298d814d66c
|
refs/heads/master
| 2021-01-02T15:38:38.140732
| 2019-05-12T23:34:51
| 2019-05-12T23:34:51
| 99,306,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 370
|
rd
|
getBenchMark.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bench.R
\name{getBenchMark}
\alias{getBenchMark}
\title{get bench of a stock and output to folder (ticker, chn, index, correl, indexChn, indexChnCorrel)}
\usage{
getBenchMark()
}
\description{
get bench of a stock and output to folder (ticker, chn, index, correl, indexChn, indexChnCorrel)
}
|
fb6989179c81e975d0f63772d0962de375b481a0
|
197c4c93cc1c5417b7845c141b58a8f781213d74
|
/man/cksum.Rd
|
421b61c0b5cc4e3e8d9492bcf844e164c32a486e
|
[] |
no_license
|
genome-vendor/r-cran-bitops
|
89864b8e292984ba5b31ba694b97ab0bf754e4fe
|
f312c616ebab018c7e53f92302d2c88f72c4ec5a
|
refs/heads/master
| 2016-09-06T01:45:40.861289
| 2012-04-11T04:10:33
| 2012-04-11T04:10:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 999
|
rd
|
cksum.Rd
|
\name{cksum}
\alias{cksum}
\title{Compute Check Sum}
\description{
return a cyclic redundancy checksum for each element in the argument.
}
\usage{
cksum(a)
}
\arguments{
\item{a}{coerced to character vector}
}
\details{
NA's appearing in the argument are returned as NA's.
The default calculation is identical to that given in pseudo-code in the
ACM article (in the References).
}
\value{
numeric vector of length \code{a}.
}
\references{
Fashioned from \code{cksum(1)} UNIX command line utility, i.e.,
\code{man cksum}.
Dilip V. Sarwate (1988).
Computation of Cyclic Redundancy Checks Via Table Lookup,
\emph{Communications of the ACM}, August 1988.
\bold{vol} 31, No.8 page 1008-1013
}
\author{Steve Dutky \email{sdutky@terpalum.umd.edu}
}
% \seealso{ ~~objects to See Also as \code{\link{~~fun~~}}, ~~~ }
\examples{
b <- "I would rather have a bottle in front of me than frontal lobotomy\n"
cksum(b) == 1342168430 ## -> TRUE
}
\keyword{arith}
\keyword{utilities}
|
43e5993bb8b284bbb18e943d80dc87d51b82aa8c
|
df87bbaf8fd9b169c9a27607fd0af1c098435256
|
/man/RMS.Rd
|
24b1193e608c017e36618aae1c6b0b645086afda
|
[] |
no_license
|
cran/Convolutioner
|
714b0168dc2e805b4a27edde1dd058cf114a633d
|
187d871def5abca0293bb2be006a74065f45e326
|
refs/heads/master
| 2023-03-15T07:41:37.819585
| 2021-03-11T09:40:02
| 2021-03-11T09:40:02
| 346,752,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 756
|
rd
|
RMS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filters.R
\name{RMS}
\alias{RMS}
\title{Running median smoothing.}
\usage{
RMS(raw_data, buffer_size = 5)
}
\arguments{
\item{raw_data}{Data upon which the algorithm is applied}
\item{buffer_size}{number of points the algorithm use to compute the median}
}
\value{
Smoothed data using running median algorithm
}
\description{
This function return the data smoothed using the running median
algorithm. For each chunk of data of size equal to the buffer_size parameter
is calculated the median and this value is used as the i term of the newly
smoothed data.
For initial and final values zero padding is applied.
}
\examples{
raw_data = c(1:100)
smoothed_data = RMS(raw_data)
}
|
2b03a561d1d16704cde5460f728c9fc6e745ca59
|
dd9f94ac6181401f0ea48e78f9731f1c337b85ca
|
/inst/code/gen_original_results.R
|
2aa13b45bb3e5bae510a4772891fb009d79f648c
|
[
"MIT"
] |
permissive
|
unagpal/susieR
|
67de44e321ec21599d56342df409fa2c3dc8eef1
|
46d37a49ccd680b1ff5fa9dfecf8928ca09018cf
|
refs/heads/master
| 2023-07-02T20:50:26.853819
| 2021-07-22T21:13:29
| 2021-07-22T21:13:29
| 276,458,113
| 0
| 0
|
MIT
| 2020-07-01T18:52:45
| 2020-07-01T18:52:44
| null |
UTF-8
|
R
| false
| false
| 2,126
|
r
|
gen_original_results.R
|
## results from original susie
devtools::install_github("stephenslab/susieR")
library(susieR)
create_sparsity_mat = function(sparsity, n, p){
nonzero = round(n*p*(1-sparsity))
nonzero.idx = sample(n*p, nonzero)
mat = numeric(n*p)
mat[nonzero.idx] = 1
mat = matrix(mat, nrow=n, ncol=p)
return(mat)
}
set.seed(1)
n = 100
p = 200
beta = rep(0,p)
beta[1] = 10
beta[2] = 10
beta[3] = 10
beta[4] = 10
X.dense = create_sparsity_mat(0.99,n,p)
y = c(X.dense %*% beta + rnorm(n))
L = 10
residual_variance = 0.8
scaled_prior_variance = 0.2
s = list(alpha=matrix(1/p,nrow=L,ncol=p),
mu=matrix(2,nrow=L,ncol=p),
mu2=matrix(3,nrow=L,ncol=p),
Xr=rep(5,n), KL=rep(1.2,L),
sigma2=residual_variance, V=scaled_prior_variance * as.numeric(var(y)))
X = susieR:::set_X_attributes(X.dense)
Eb = rep(1, p)
Eb2 = rep(1, p)
s2 = residual_variance
V = scaled_prior_variance
objective.original.res = susieR::susie_get_objective(s)
saveRDS(objective.original.res, 'objective_original_res.rds')
Eloglik.original.res = susieR:::Eloglik(X,y,s)
saveRDS(Eloglik.original.res, 'Eloglik_original_res.rds')
ER2.original.res = susieR:::get_ER2(X,y,s)
saveRDS(ER2.original.res, 'ER2_original_res.rds')
SER.original.res = susieR:::SER_posterior_e_loglik(X,y,s2,Eb,Eb2)
saveRDS(SER.original.res, 'SER_original_res.rds')
singleReg.original.res = susieR:::single_effect_regression(y,X,V)
saveRDS(singleReg.original.res, 'singleReg_original_res.rds')
vbupdate.original.res = susieR:::update_each_effect(X, y, s)
saveRDS(vbupdate.original.res, 'vbupdate_original_res.rds')
susiefit.original.res = susie(X.dense,y)
saveRDS(susiefit.original.res, 'susiefit_original_res.rds')
susiefit.original.res2 = susie(X.dense, y, standardize = TRUE, intercept = FALSE)
susiefit.original.res3 = susie(X.dense, y, standardize = FALSE, intercept = TRUE)
susiefit.original.res4 = susie(X.dense, y, standardize = FALSE, intercept = FALSE)
saveRDS(susiefit.original.res2, 'susiefit_original_res2.rds')
saveRDS(susiefit.original.res3, 'susiefit_original_res3.rds')
saveRDS(susiefit.original.res4, 'susiefit_original_res4.rds')
|
c0de03416c8f81eeaf9f49d565f5954eb7f64fd8
|
09e4ea6bf480ff5620a7ab24636ad111681b7371
|
/Multiple replicates - changing sex ratio.R
|
cd8418ee10cc49e93175313a9cd4ff2dc74030b7
|
[] |
no_license
|
eringiglio/project_sneaker
|
6e96203c11e55f5807bd9a6c179055583b279ad3
|
1b7fa646424bc7f547199fff162e37af2f8352ac
|
refs/heads/master
| 2020-05-21T04:24:30.794870
| 2018-09-13T22:25:01
| 2018-09-13T22:25:01
| 53,076,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,692
|
r
|
Multiple replicates - changing sex ratio.R
|
#Collect runs
MODEL_OUTPUT_S<-data.frame(NULL)
MODEL_OUTPUT_T<-data.frame(NULL)
#Number of generations for simulation to run
#Also set number of replicates
#Useful for when we go to nested for loops or similar for multiple runs, to get summaries
REPLICATES<-100
GENERATIONS<-1000
for (j in 1:REPLICATES){
#Set initial parameter values - population size and allele frequency
POPULATION = 1000
FREQ_T = 0.5
FREQ_S = 1-FREQ_T
#Initial sex ratio
SEX_RATIO_F = 0.5
SEX_RATIO_M = 1-SEX_RATIO_F
#Get number of individuals for each sex and genotype
#Had to add t() here as for some idiotic reason it's giving me a column, rather than a row
#Might be specific to my lab desktop, but if this runs weird on another computer it will give lots of errors
#To fix, just remove the outer parenthesis and the t
#COlumn output makes data storage look weird
NUMBERS_FEMALE<-t(rmultinom(1,POPULATION*SEX_RATIO_F,c(FREQ_T^2,FREQ_T*2*FREQ_S,FREQ_S^2)))
NUMBERS_MALE<-t(rmultinom(1,POPULATION*SEX_RATIO_M,c(FREQ_T^2,FREQ_T*FREQ_S*2,FREQ_S^2)))
DATA_MALE<-NULL
DATA_FEMALE<-NULL
#Add initial values to data output for plotting
DATA_MALE<-rbind(DATA_MALE,NUMBERS_MALE)
DATA_FEMALE<-rbind(DATA_FEMALE,NUMBERS_FEMALE)
#Collect allele frequency data
DATA_S<-FREQ_S
DATA_T<-FREQ_T
#Collect total population data
DATA_POPULATION<-POPULATION
#Diagnostic variable
CHECK_FREQS<-NULL
for (i in 1:GENERATIONS){
#New generation
#Probabilities for different genotypes of offpring
#HS - homozygote sneaker, HT - homozygote territorial, HET - heterozygote
#Notation: e.g. HSTOHS - probability of HS female having HS offspring
SS_TO_SS<-(NUMBERS_MALE[3]/sum(NUMBERS_MALE))+(0.5*NUMBERS_MALE[2]/sum(NUMBERS_MALE))
SS_TO_ST<-(NUMBERS_MALE[1]/sum(NUMBERS_MALE))+(0.5*NUMBERS_MALE[2]/sum(NUMBERS_MALE))
TT_TO_TT<-(NUMBERS_MALE[1]/sum(NUMBERS_MALE))+(0.5*NUMBERS_MALE[2]/sum(NUMBERS_MALE))
TT_TO_ST<-(NUMBERS_MALE[3]/sum(NUMBERS_MALE))+(0.5*NUMBERS_MALE[2]/sum(NUMBERS_MALE))
ST_TO_TT<-((0.5*(NUMBERS_MALE[1]/sum(NUMBERS_MALE)))+(0.25*(NUMBERS_MALE[2]/sum(NUMBERS_MALE))))
ST_TO_SS<-((0.5*(NUMBERS_MALE[3]/sum(NUMBERS_MALE)))+(0.25*(NUMBERS_MALE[2]/sum(NUMBERS_MALE))))
ST_TO_ST<-(1-ST_TO_TT-ST_TO_SS)
#Now caclculating offspring numbers from females of each genotype...
OFFSPRING_OF_SS<-rmultinom(1,NUMBERS_FEMALE[3]*2,c(SS_TO_SS,SS_TO_ST))
OFFSPRING_OF_TT<-rmultinom(1,NUMBERS_FEMALE[1]*2,c(TT_TO_ST,TT_TO_TT))
OFFSPRING_OF_ST<-rmultinom(1,NUMBERS_FEMALE[2]*2,c(ST_TO_TT,ST_TO_ST,ST_TO_SS))
#For next generation, let's find out how many offspring of each genotype...
POPULATION<-sum(OFFSPRING_OF_TT,OFFSPRING_OF_ST,OFFSPRING_OF_SS)
TOTAL_SS<-sum(OFFSPRING_OF_SS[1],OFFSPRING_OF_ST[3])
TOTAL_ST<-sum(OFFSPRING_OF_SS[2],OFFSPRING_OF_ST[2],OFFSPRING_OF_TT[1])
TOTAL_TT<-sum(OFFSPRING_OF_ST[1],OFFSPRING_OF_TT[2])
FREQ_T<-((2*TOTAL_TT) + TOTAL_ST)/(POPULATION*2)
FREQ_S<-(2*(TOTAL_SS) + TOTAL_ST)/(POPULATION*2)
#Stochastic sex ratio - draw from a beta distribution
#Parameters are 50/50 - this is completely arbitrary and we can play with it
ALPHA<-50
BETA<-50
SEX_RATIO_F<-rbeta(1, ALPHA,BETA)
SEX_RATIO_M<-1-SEX_RATIO_F
#Calculating our numbers of males and females for the next gen...
NUMBERS_FEMALE<-c(ceiling(SEX_RATIO_F*TOTAL_TT),ceiling(SEX_RATIO_F*TOTAL_ST),ceiling(SEX_RATIO_F*TOTAL_SS))
NUMBERS_MALE<-c(floor(SEX_RATIO_M*TOTAL_TT),floor(SEX_RATIO_M*TOTAL_ST),floor(SEX_RATIO_M*TOTAL_SS))
DATA_MALE<-rbind(DATA_MALE,NUMBERS_MALE)
DATA_FEMALE<-rbind(DATA_FEMALE,NUMBERS_FEMALE)
DATA_POPULATION<-c(DATA_POPULATION,POPULATION)
DATA_S<-c(DATA_S,FREQ_S)
DATA_T<-c(DATA_T,FREQ_T)
CHECK_FREQS[i]<-(FREQ_S+FREQ_T)
}
#Get total number of males and females at each step
TOTAL_NUMBER_OF_FEMALES<-NULL
TOTAL_NUMBER_OF_MALES<-NULL
for(i in 1:GENERATIONS+1){
TOTAL_NUMBER_OF_FEMALES[i]<-sum(DATA_FEMALE[i,])
TOTAL_NUMBER_OF_MALES[i]<-sum(DATA_MALE[i,])
}
sum(CHECK_FREQS)==GENERATIONS
#Get output for each run
MODEL_OUTPUT_S<-rbind(MODEL_OUTPUT_S,DATA_S)
MODEL_OUTPUT_T<-rbind(MODEL_OUTPUT_T,DATA_T)
}
X<-seq(GENERATIONS+1)
OUTPUT_MEAN_S<-NULL
OUTPUT_MEAN_T<-NULL
for (i in 1:GENERATIONS+1){
OUTPUT_MEAN_S[i]<-mean(as.numeric(MODEL_OUTPUT_S[,i]))
}
#plot(cbind(X,DATA_T),ylim=c(0,1),col='red')
#points(cbind(X,DATA_S))
#plot(cbind(X,DATA_POPULATION))
#Marks initial population
#abline(h=1000)
#points(cbind(X,TOTAL_NUMBER_OF_MALES),col='red')
#points(cbind(X,TOTAL_NUMBER_OF_FEMALES),col='darkgoldenrod')
#Diagnostic, should be equal to generations
|
ce4396fbf87299553586b8ba54e54bc28fbb360d
|
6c4d4ed84895fc1fc8fad4f6470bf41d1932f573
|
/define_segments.R
|
a9ffc20b2cc8219ac00749e727a4490521c468f3
|
[] |
no_license
|
cise-midoglu/coverage-visualization
|
a94a45d8d994479d615c5fee3689193cd42b4321
|
319bf2b52c884cafc155ffc886fb53fc880f61b8
|
refs/heads/master
| 2020-03-19T14:24:11.787408
| 2018-06-08T13:46:58
| 2018-06-08T13:46:58
| 136,620,945
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,244
|
r
|
define_segments.R
|
#library(fields)
#require2(geosphere)
source("util.R")
get_total_dist <- function(path) {
dist_tot = 0
n <- nrow(path) - 1
for(i in 1:n) {
p1 <- as.numeric(path[i,])
p2 <- as.numeric(path[i+1,])
r <- haversine_dist2(p1, p2);
dist_tot = dist_tot + r
}
dist_tot
}
get_total_dist2 <- function(path) {
dist_tot = 0
n <- nrow(path) - 1
for(i in 1:n) {
p1 <- as.numeric(path[i,])
p2 <- as.numeric(path[i+1,])
dp <- p1 - p2
r = sqrt(sum(dp*dp))
dist_tot = dist_tot + r
}
dist_tot
}
get_polar <- function(path, i) {
p1 <- as.numeric(path[i,])
p2 <- as.numeric(path[i+1,])
dp <- p2 - p1
r <- sqrt(sum(dp*dp))
theta <- atan2(dp[2],dp[1])
c(r,theta)
}
get_vec <- function(p) {
c(p[1]*cos(p[2]), p[1]*sin(p[2]))
}
rebin <- function(path, w) {
n <- nrow(path) - 1
path2 <- path[1,]
dr <- w
r <- 0
for(i in 1:n) {
point <- path2[1,]
point <- point - point
p1 <- as.numeric(path[i+0,])
p2 <- as.numeric(path[i+1,])
dr <- dr - r
r <- haversine_dist2(p1,p2)
while(dr<r) {
p1 <- fraction_circle(p1, p2, dr/r)
row <- p1
path2 <- rbind(path2, point + row)
r <- r - dr
dr <- w
}
}
p1 <- as.numeric(path2[nrow(path2),])
p2 <- as.numeric(path[nrow(path)-0,])
r <- haversine_dist2(p1,p2)
p1 <- fraction_circle(p1, p2, 1.0)
path2 <- rbind(path2, point + p1)
}
resample = function(path) {
segment_size = 5.0
dist_tot <- get_total_dist(path)
bins <- as.integer(dist_tot/segment_size)
w <- dist_tot/bins
print(dist_tot)
print(w)
path2 <- rebin(path, w)
}
par(mfrow=c(2,1))
p = readRDS("paths.rds")
myplot = function(p) {
plot(p)
lines(p)
}
#plot(p[[1]], xlim = c(5,12), ylim=c(58, 64))
plot(p[[1]])
lines(p[[1]])
for(i in 2:length(p)) {
lines(p[[i]])
points(p[[i]])
}
pn = lapply(p, resample)
##plot(pn[[1]], xlim = c(5,12), ylim=c(58, 64))
plot(pn[[1]])
lines(pn[[1]])
for(i in 2:length(pn)) {
lines(pn[[i]])
points(pn[[i]])
}
##xsaveRDS(pn,"paths_fix.rds")
|
01f405f893fc5b0ecbada9ea2cadcfcef9e932d8
|
5cc023e4961c5a6ab8a2bce04c69e01de18d5400
|
/Twitter Analysis.R
|
2f50089d0a45b700da301520813969ec9a471ae5
|
[] |
no_license
|
pranavsinha88/pranavsinha88-Code-Repo
|
34883af0425238a9c66f7841852b4b9a9cf3cfe7
|
d1fe51c1b3ea6f16ec84bd205939febf4276ed54
|
refs/heads/master
| 2021-01-25T06:49:14.530018
| 2017-06-07T09:06:30
| 2017-06-07T09:06:30
| 93,615,116
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,910
|
r
|
Twitter Analysis.R
|
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages() [, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only=TRUE)
}
tweet_analysis_package <- c("tm" , "NLP" , "twitterMap", "openxlsx", "xlsx", "topicmodels", "lda", "sna", "twitteR",
"ROAuth", "ggplot2", "wordcloud", "igraph", "Rgraphviz","sentR", "qdap", "lazy","plyr",
"Rcpp")
ipak(tweet_analysis_package)
consumer_key <- "VeHLTFMWhVQjwIWfjHpZpTw0d"
consumer_secret <- "vhx2fR9Ac9Psj0CJruCiJTENBjMVcn5z4k1AbKIRD5cfMRaQyO"
access_token <- "809685705710407680-48HBe2AKpFAnPipBxmSGKZ4eyw2w5KK"
access_secret <- "2ZrjRAdZoywAU39AeeDM6rbIRWSXIAE6hcFE4DloyMcwO"
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
tweets <- userTimeline("ABBgroupnews", n = 3200)
tweets_df <- do.call("rbind", lapply(tweets, as.data.frame))
write.csv(tweets_df,file="ABBtwitterList.csv")
n.tweet <- length(tweets)
tweets.df <- twListToDF(tweets)
tweets.df[100, c("id", "created", "screenName", "replyToSN", "favoriteCount", "retweetCount",
"longitude", "latitude", "text")]
writeLines(strwrap(tweets.df$text[100], 60))
data.to.clean <- read.csv("ABBtwitterList.csv")
dataworking <- data.to.clean
names(dataworking)
# You need to use one variable at a time
varName = c("text") ### THIS IS THE VARIABLE(CONTENTS) THAT NEEDS TO BE CLEANED
newVarName=c("cleansed")
removeEmails = TRUE
removeUrls = TRUE
removePhoneNumber = TRUE
removeNumber = TRUE
removePunctuations = TRUE
removeStopwords = TRUE
stripWhitespaces = TRUE
stemDoc = TRUE
dataHandling <- function(varName, newVarName)
{
# Regular expressions to match 1. Email, 2. URL, 3. Phone number
#---------------------------------------------------------------
email.expression <- "[A-Za-z0-9-]+[.A-Za-z0-9-]*@[A-Za-z0-9-]+(\\.com|\\.co.in|\\.net|\\.org|\\.info|\\.edu|\\.mil|\\.gov|\\.biz|\\.ws|\\.us|\\.tv|\\.cc|\\.aero|\\.arpa|\\.coop|\\.int|\\.jobs|\\.museum|\\.name|\\.pro)|\\.travel|\\.nato)"
url.expression <- "(http://|https://|www.)[[:alnum:]~!#$%&+-=?,:/;._]*"
phonenumber.expression <- "\\+?(\\d{2,3})[- ]?\\(?(\\d{3,5})\\)?[- ]?(\\d{3,5})[- ]?(\\d{4})?"
# To read data from a single csv file and create a dataset of required column
#----------------------------------------------------------------------------
varIndex <- which(colnames(dataworking)==varName)
corpus <- tolower(dataworking[,varIndex])
# To remove emails from dataset
#----------------------------------------------------------------------------
if(removeEmails) {
corpus <- gsub(email.expression,' ', corpus, ignore.case = TRUE)
}
# To remove urls from dataset
#----------------------------------------------------------------------------
if(removeUrls) {
corpus <- gsub(url.expression,' ', corpus, ignore.case = TRUE)
}
# To remove phone numbers from dataset
#----------------------------------------------------------------------------
if(removePhoneNumber) {
corpus <- gsub(phonenumber.expression,' ', corpus, ignore.case = TRUE)
}
# split into distinct words
w <- strsplit( corpus , " " )
corpus1<-c()
for(n in 1:length(w)){
# calculate the length of each word
x <- nchar( w[[n]] )
# keep only words with length 3 to 200
y <- w[[n]][ x %in% 3:200 ]
# string 'em back together
y <- paste( unlist( y ), collapse = " " )
corpus1<- c(corpus1,y)
}
# To covert dataset into a Corpus; required for executing 'tm_map' functions
#----------------------------------------------------------------------------
corpus <- Corpus(VectorSource(corpus1))
# To remove stopwords from corpus
#----------------------------------------------------------------------------
if(removeStopwords) {
corpus <- tm_map(corpus, removeWords, stopwords("english")[!(stopwords("english") %in% c("no","nor","not"))])
}
# To remove numbers from corpus
#----------------------------------------------------------------------------
if(removeNumber) {
corpus <- tm_map(corpus, removeNumbers)
}
# To remove punctuations from corpus
#----------------------------------------------------------------------------
if(removePunctuations) {
corpus <- tm_map(corpus, removePunctuation, preserve_intra_word_dashes = TRUE)
}
# To remove additional whitespaces from corpus
#----------------------------------------------------------------------------
if(stripWhitespaces) {
corpus <- tm_map(corpus, stripWhitespace)
}
# To add data post pre-processing as a new column in the original dataset
#----------------------------------------------------------------------------
dataSize <- nrow(dataworking)
newCol <- unlist(corpus[1:dataSize])
x=NULL
i=1
while(i<=length(newCol))
{
x[i] = newCol[i]
i=i+12
}
newCol=x[!is.na(x)]
tmDataSetNew <- as.data.frame(newCol)
newColIndex <- which(colnames(tmDataSetNew)=='newCol')
colnames(tmDataSetNew) = paste(newVarName,varName,sep="_")
# To clear all variable used
#----------------------------------------------------------------------------
rm(list=c("filePath","fileName","fileLoc", "varName","newVarName","tmDataSet"
,"corpus","newCol","newColIndex","varIndex","dataSize"))
dataworking <- cbind(dataworking,tmDataSetNew)
assign("dataworking",dataworking,envir=.GlobalEnv)
}
dataHandling(varName, newVarName)
dataworking$cleansed_text <- iconv(dataworking$cleansed_text, "latin1", "ASCII", sub="")
write.csv(dataworking,"cleansed_finaldata.csv")
mycorpus <- dataworking
names(mycorpus)
mycorpus$cleansed_text <- tolower(mycorpus$cleansed_text)
corpus <- Corpus(VectorSource(mycorpus$cleansed_text))
dtm_train <- DocumentTermMatrix(corpus)
sparse_tr <- as.matrix(removeSparseTerms(dtm_train, .995))
col_tr <- (colnames(sparse_tr))
col_tr1 <- col_tr
class(corpus)
CorpusCopy <- corpus
corpus <- tm_map(corpus, stemDocument)
writeLines(strwrap(corpus[[100]]$content, 60))
stemCompletion2 <- function(x, dictionary) {
x <- unlist(strsplit(as.character(x), " "))
x <- x[x != ""]
x <- stemCompletion(x, dictionary=dictionary)
x <- paste(x, sep="", collapse=" ")
PlainTextDocument(stripWhitespace(x))
}
corpus <- lapply(corpus, stemCompletion2, dictionary=CorpusCopy)
corpus <- Corpus(VectorSource(corpus))
writeLines(strwrap(corpus[[100]]$content, 60))
tdm <- TermDocumentMatrix(corpus, control = list(wordLengths = c(1, Inf)))
freq.terms <- findFreqTerms(tdm, lowfreq = 7)
freq.terms
term.freq <- rowSums(as.matrix(tdm))
term.freq <- subset(term.freq, term.freq >=7)
df <- data.frame (term = names(term.freq), freq = term.freq)
ggplot(df, aes(x=term, y=freq)) + geom_bar(stat = "identity") + xlab("Terms") +ylab("Count") + coord_flip() +
theme(axis.text = element_text(size=10))
m <- as.matrix(tdm)
word.freq <- sort(rowSums(m), decreasing = T)
pal <- brewer.pal(9, "BuGn") [-(1:4)]
pal2 <- brewer.pal(8,"Dark2")
wordcloud(words = names(word.freq), freq = word.freq, min.freq = 3, random.order = F, colors = pal2)
findAssocs(tdm, "abb", 0.2)
findAssocs(tdm, "technologies", 0.2)
source("http://bioconductor.org/biocLite.R")
biocLite("Rgraphviz")
plot(tdm, term = freq.terms, corThreshold = 0.1, weighting = T)
dtm <- as.DocumentTermMatrix(tdm)
lda <- LDA(dtm, k = 8)
term <- terms(lda, 7)
(term <- apply(term, MARGIN = 2, paste, collapse = ", "))
topics <- topics(lda)
topics <- data.frame(date=as.Date(tweets.df$created), topic=topics)
ggplot(topics, aes(date, fill = term[topic])) + geom_density(position = "stack")
install.packages(c('devtools','curl'))
library("devtools")
require(devtools)
library(plyr)
install_github("sentiment140","okugami79")
library(sentiment)
library(sentiment)
sentiments <- sentiment(tweets.df$text)
table(sentiments$polarity)
sentiments$score <- 0
sentiments$score[sentiments$polarity == "positive"] <- 1
sentiments$score[sentiments$polarity == "negative"] <- -1
sentiments$date <- as.Date(tweets.df$created)
result <- aggregate(score ~ date, data = sentiments, sum)
plot(result, type = "l")
user <- getUser("ABBgroupnews")
user$toDataFrame()
friends <- user$getFriends()
followers <- user$getFollowers()
#followers2 <- followers[[1]]$getFollowers()
table(tweets.df$retweetCount)
selected <- which(tweets.df$retweetCount >= 38 )
dates <- strptime(tweets.df$created, format="%Y-%m-%d")
plot(x=dates, y=tweets.df$retweetCount, type="l", col="grey", xlab="Date", ylab="Times retweeted")
colors <- rainbow(10)[1:length(selected)]
points(dates[selected], tweets.df$retweetCount[selected], pch=19, col=colors)
text(dates[selected], tweets.df$retweetCount[selected], tweets.df$text[selected], col=colors, cex=.9)
|
67610d2e35bbf04bfa4b876520e5df328db3ca65
|
92a0b69e95169c89ec0af530ed43a05af7134d45
|
/man/load.source.directory.Rd
|
6dad5e0c739d7ec9c8413ba2d2e19365d3d1316c
|
[] |
no_license
|
gelfondjal/IT2
|
55185017b1b34849ac1010ea26afb6987471e62b
|
ee05e227403913e11bf16651658319c70c509481
|
refs/heads/master
| 2021-01-10T18:46:17.062432
| 2016-01-20T17:51:29
| 2016-01-20T17:51:29
| 21,449,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 525
|
rd
|
load.source.directory.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/load_source_directory.R
\name{load.source.directory}
\alias{load.source.directory}
\title{Runs all source files within the directory source.directory}
\usage{
load.source.directory(source.directory)
}
\arguments{
\item{source.directory}{is a directory with R source files to load}
}
\value{
source file list
}
\description{
Runs all source files within the directory source.directory
}
\details{
Looks for files with .R or .r suffixes.
}
|
8280c20d6adf2bdb4538d2df1ab634495d3da6f4
|
e8aa4ec68533b288ee18c609328086430b2322e4
|
/man/uv_charts.Rd
|
691d9bf8ed6025a1194f0279d5da452d121a612a
|
[] |
no_license
|
JohnCoene/uvcharts
|
473b40bcbcdcbd1e98ad154d5bd4e88def00ec57
|
bf5c7c9a2a768c59979b4a4f7a61ffb7222d679b
|
refs/heads/master
| 2021-01-21T10:30:06.544566
| 2017-04-05T08:30:39
| 2017-04-05T08:30:39
| 83,441,711
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,103
|
rd
|
uv_charts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uvcharts.R
\name{uv_charts}
\alias{uv_area}
\alias{uv_bar}
\alias{uv_charts}
\alias{uv_donut}
\alias{uv_line}
\alias{uv_percentarea}
\alias{uv_percentbar}
\alias{uv_pie}
\alias{uv_polar}
\alias{uv_stackarea}
\alias{uv_stackbar}
\alias{uv_stepupbar}
\alias{uv_waterfall}
\title{Setup a uvchart}
\usage{
uv_charts(data, x, type = "Line", width = "100\%", height = "100\%",
elementId = NULL)
uv_bar(data, x, width = "100\%", height = "100\%", elementId = NULL)
uv_line(data, x, width = "100\%", height = "100\%", elementId = NULL)
uv_area(data, x, width = "100\%", height = "100\%", elementId = NULL)
uv_stackbar(data, x, width = "100\%", height = "100\%", elementId = NULL)
uv_stackarea(data, x, width = "100\%", height = "100\%", elementId = NULL)
uv_pie(data, x, width = "100\%", height = "100\%", elementId = NULL)
uv_donut(data, x, width = "100\%", height = "100\%", elementId = NULL)
uv_percentbar(data, x, width = "100\%", height = "100\%",
elementId = NULL)
uv_percentarea(data, x, width = "100\%", height = "100\%",
elementId = NULL)
uv_stepupbar(data, x, width = "100\%", height = "100\%", elementId = NULL)
uv_polar(data, x, width = "100\%", height = "100\%", elementId = NULL)
uv_waterfall(data, x, width = "100\%", height = "100\%", elementId = NULL)
}
\arguments{
\item{data}{data.frame of data to plot.}
\item{x}{\code{x} variable.}
\item{type}{type of chart to plot, see details.}
\item{width, height}{Must be a valid CSS unit (like \code{'100\%'},
\code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
string and have \code{'px'} appended.}
\item{elementId}{id of div element containing chart.}
}
\description{
Initiate a uvchart
}
\examples{
mtcars \%>\%
uv_charts(mpg, type = "Area") \%>\%
uv_add(qsec)
mtcars \%>\%
uv_bar(mpg) \%>\%
uv_add(qsec)
mtcars \%>\%
uv_line(mpg) \%>\%
uv_add(qsec)
mtcars \%>\%
uv_pie(mpg) \%>\%
uv_add(qsec)
mtcars \%>\%
uv_stackbar(mpg) \%>\%
uv_add(qsec)
mtcars \%>\%
uv_percentarea(mpg) \%>\%
uv_add(qsec)
}
|
ebca8173b3826e96abd44636716c07bec7f34fde
|
f79cd4e052c5cbb24e7ef3e4bec1c39f9ce4e413
|
/BEMTOOL-ver2.5-2018_0901/src/biol/bmtALADYM/ALADYM-ver12.3-2017_0501/gui/guicontrols/fisheryControls/deactivate_FishingEffort_unused_params.r
|
1e10edbea5023fe58ea63eb44c807b72f8b33563
|
[] |
no_license
|
gresci/BEMTOOL2.5
|
4caf3dca3c67423af327a8ecb1e6ba6eacc8ae14
|
619664981b2863675bde582763c5abf1f8daf34f
|
refs/heads/master
| 2023-01-12T15:04:09.093864
| 2020-06-23T07:00:40
| 2020-06-23T07:00:40
| 282,134,041
| 0
| 0
| null | 2020-07-24T05:47:24
| 2020-07-24T05:47:23
| null |
UTF-8
|
R
| false
| false
| 5,068
|
r
|
deactivate_FishingEffort_unused_params.r
|
# ALADYM Age length based dynamic model - version 12.3
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# ALADYM is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
deactivate_FishingEffort_unused_params <-function(w){
gtkWidgetSetSensitive(VESSELS.treeview, TRUE)
gtkWidgetSetSensitive(entry_VESSELS_seedvalue, TRUE)
gtkWidgetSetSensitive(btn_load_seed_VESSELS, TRUE)
gtkWidgetSetSensitive(DAYS.treeview, TRUE)
gtkWidgetSetSensitive(entry_DAYS_seedvalue, TRUE)
gtkWidgetSetSensitive(btn_load_seed_DAYS, TRUE)
gtkWidgetSetSensitive(GT.treeview, TRUE)
gtkWidgetSetSensitive(entry_GT_seedvalue, TRUE)
gtkWidgetSetSensitive(btn_load_seed_GT, TRUE)
gtkWidgetSetSensitive(FISHINGEFFORT.treeview, TRUE)
gtkWidgetSetSensitive( btn_load_seed_FISHINGEFFORT, TRUE)
gtkWidgetSetSensitive( entry_FISHINGEFFORT_seedvalue, TRUE)
gtkWidgetSetSensitive(button_load_fishingcoeff, TRUE)
gtkWidgetSetSensitive(button_load_effortdata, TRUE)
# gtkWidgetSetSensitive(button_exp_fishingcoeff, TRUE)
gtkWidgetSetSensitive(button_exp_effortdata, TRUE)
if ((!IN_BEMTOOL) | (IN_BEMTOOL & phase=="FORECAST")) {
gtkWidgetSetSensitive(button_load_fc_fore, TRUE)
gtkWidgetSetSensitive(button_load_effortdata_fore, TRUE)
gtkWidgetSetSensitive(VESSELS_fore.treeview, TRUE)
gtkWidgetSetSensitive(DAYS_fore.treeview, TRUE)
gtkWidgetSetSensitive(GT_fore.treeview, TRUE)
gtkWidgetSetSensitive(FISHINGEFFORT_fore.treeview, TRUE)
gtkWidgetSetSensitive(button_saveall_effortdata_fore, TRUE)
# gtkWidgetSetSensitive(button_saveall_fc_fore, TRUE)
}
if ( gtkToggleButtonGetActive(radio_effortdata) ) {
gtkWidgetSetSensitive(VESSELS.treeview, TRUE)
gtkWidgetSetSensitive(entry_VESSELS_seedvalue, TRUE)
gtkWidgetSetSensitive(btn_load_seed_VESSELS, TRUE)
gtkWidgetSetSensitive(DAYS.treeview, TRUE)
gtkWidgetSetSensitive(entry_DAYS_seedvalue, TRUE)
gtkWidgetSetSensitive(btn_load_seed_DAYS, TRUE)
gtkWidgetSetSensitive(GT.treeview, TRUE)
gtkWidgetSetSensitive(entry_GT_seedvalue, TRUE)
gtkWidgetSetSensitive(btn_load_seed_GT, TRUE)
gtkWidgetSetSensitive(FISHINGEFFORT.treeview, FALSE)
gtkWidgetSetSensitive( btn_load_seed_FISHINGEFFORT, FALSE)
gtkWidgetSetSensitive( entry_FISHINGEFFORT_seedvalue, FALSE)
gtkWidgetSetSensitive(button_load_effortdata, TRUE)
gtkWidgetSetSensitive(button_load_fishingcoeff, FALSE)
# gtkWidgetSetSensitive(button_exp_fishingcoeff, FALSE)
gtkWidgetSetSensitive(button_exp_effortdata, TRUE)
if ((!IN_BEMTOOL) | (IN_BEMTOOL & phase=="FORECAST")) {
gtkWidgetSetSensitive(button_load_fc_fore, FALSE)
gtkWidgetSetSensitive(button_load_effortdata_fore, TRUE)
gtkWidgetSetSensitive(VESSELS_fore.treeview, TRUE)
gtkWidgetSetSensitive(DAYS_fore.treeview, TRUE)
gtkWidgetSetSensitive(GT_fore.treeview, TRUE)
gtkWidgetSetSensitive(FISHINGEFFORT_fore.treeview, FALSE)
gtkWidgetSetSensitive(button_saveall_effortdata_fore, TRUE)
#gtkWidgetSetSensitive(button_saveall_fc_fore, FALSE)
}
} else if ( gtkToggleButtonGetActive(radio_fishingcoeff) ) {
gtkWidgetSetSensitive(VESSELS.treeview, FALSE)
gtkWidgetSetSensitive(entry_VESSELS_seedvalue, FALSE)
gtkWidgetSetSensitive(btn_load_seed_VESSELS, FALSE)
gtkWidgetSetSensitive(DAYS.treeview, FALSE)
gtkWidgetSetSensitive(entry_DAYS_seedvalue, FALSE)
gtkWidgetSetSensitive(btn_load_seed_DAYS, FALSE)
gtkWidgetSetSensitive(GT.treeview, FALSE)
gtkWidgetSetSensitive(entry_GT_seedvalue, FALSE)
gtkWidgetSetSensitive(btn_load_seed_GT, FALSE)
gtkWidgetSetSensitive(FISHINGEFFORT.treeview, TRUE)
gtkWidgetSetSensitive( btn_load_seed_FISHINGEFFORT, TRUE)
gtkWidgetSetSensitive( entry_FISHINGEFFORT_seedvalue, TRUE)
gtkWidgetSetSensitive(button_load_effortdata, FALSE)
gtkWidgetSetSensitive(button_load_fishingcoeff, TRUE)
# gtkWidgetSetSensitive(button_exp_fishingcoeff, TRUE)
gtkWidgetSetSensitive(button_exp_effortdata, FALSE)
if ((!IN_BEMTOOL) | (IN_BEMTOOL & phase=="FORECAST")) {
gtkWidgetSetSensitive(button_load_fc_fore, TRUE)
gtkWidgetSetSensitive(button_load_effortdata_fore, FALSE)
gtkWidgetSetSensitive(button_saveall_effortdata_fore, FALSE)
# gtkWidgetSetSensitive(button_saveall_fc_fore, TRUE)
gtkWidgetSetSensitive(VESSELS_fore.treeview, FALSE)
gtkWidgetSetSensitive(DAYS_fore.treeview, FALSE)
gtkWidgetSetSensitive(GT_fore.treeview, FALSE)
gtkWidgetSetSensitive(FISHINGEFFORT_fore.treeview, TRUE)
}
}
}
|
f2f032f660ded7c6165fe31cf1437fe28038bd13
|
6c812b8136e52e760b4064de4d6fdf24ffe1f590
|
/man/f2apply.Rd
|
5d6ee66ae499cf5e9dcc30add80bd15aea17ead2
|
[] |
no_license
|
cran/FuzzyNumbers.Ext.2
|
39d848d6de545fafd85f971edde282dcbee175b6
|
65239db610a722ef40ae261619b632cdf21c906e
|
refs/heads/master
| 2021-01-21T17:28:01.400250
| 2017-09-05T06:29:09
| 2017-09-05T06:29:09
| 85,421,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,938
|
rd
|
f2apply.Rd
|
\name{f2apply}
\alias{f2apply}
\title{
Apply a two-variable function on two fuzzy numbers
}
\description{
Suppose that we are going to put two fuzzy numbers \eqn{x} and \eqn{y} into the monotonic two-variable function \eqn{f(x,y)}. A usual approach is using Zadeh's extension Principle which has a complex computation.
Function \code{f2apply} applies easily two fuzzy numbers to a monotonic two-variable function. Although the theory of \code{f2apply} computation is based on the Zadeh's extension Principle, but it works with the \eqn{\alpha}-cuts of two inputted fuzzy numbers for all \eqn{\alpha \in (0,1]}. It must be mentioned that the ability of computing \eqn{\alpha}-cuts of the result is added to the Version 2.0.
}
\usage{
f2apply(x, y, fun, knot.n=10, I.O.plot="TRUE", ...)
}
\arguments{
\item{x}{
the first fuzzy number, which must be according to the format of \code{FuzzyNumbers} package
}
\item{y}{
the second fuzzy number, which must be according to the format of \code{FuzzyNumbers} package
}
\item{fun}{
a two-variable function which is monotone function on the supports of \code{x} and \code{y} fuzzy numbers
}
\item{knot.n}{
the number of knots; see package \code{FuzzyNumbers}
}
\item{I.O.plot}{
a logical argument with default \code{TRUE}. If \code{I.O.plot=TRUE}, then three membership functions of \eqn{x}, \eqn{y} (Inputted fuzzy numbers) and \eqn{f(x,y)} (Outputted fuzzy number) are drawn in a figure. If \code{I.O.plot=FALSE}, then just the membership function of Outputted fuzzy number \eqn{f(x,y)} will be shown in figure.
}
\item{\dots}{
additional arguments passed from \code{plot}
}
}
\value{
This function returns piecewise linear fuzzy number \eqn{f(x,y)} and also plot the result.
\item{fun.rep }{describes the monotonic behavior of the considered function}
\item{cuts }{returns the \eqn{\alpha}-cuts of the computed fuzzy number \eqn{f(x,y)} }
\item{core }{returns the core of the computed fuzzy number \eqn{f(x,y)} }
\item{support }{returns the support of the computed fuzzy number \eqn{f(x,y)} }
}
\references{
Gagolewski, M., Caha, J., FuzzyNumbers Package: Tools to Deal with Fuzzy Numbers in R. R package version 0.4-1, 2015. https://cran.r-project.org/web/packages=FuzzyNumbers
Klir, G.J., Yuan, B., Fuzzy Sets and Fuzzy Logic: Theory and Applications, Prentice Hall PTR, New Jersey (1995).
Viertl, R., Statistical methods for fuzzy data. New York: John Wiley & Sons (2011)
Zadeh, L.A., Fuzzy sets. Information and Control 8, 338-359 (1965)
Zadeh, L.A., Probability measures of fuzzy events. Journal of Mathematical Analysis and Applications 23, 421-427 (1968)
}
\author{
Abbas Parchami
}
\note{
\code{f2apply} is an extended version of \code{fapply} from package \code{FuzzyNumbers}. The duty of functions \code{fapply} and \code{f2apply} are applying one-variable and two-variable function on fuzzy numbers.
Two imported fuzzy numbers into \code{f2apply} must be piecewised by \code{PiecewiseLinearFuzzyNumber} function in package \code{FuzzyNumbers}. Moreover, the considered function \eqn{f(x,y)} must be monotone on \eqn{x} and \eqn{y}.
}
\seealso{
See \code{PiecewiseLinearFuzzyNumber}, \code{as.PiecewiseLinearFuzzyNumber} and \code{piecewiseLinearApproximation} from package \code{FuzzyNumbers}.
}
\examples{
library(FuzzyNumbers) # For Loud 'FuzzyNumbers' package, after its instalation
# Example 1: Four different cases of function (in respect to increasing/decreasing on x and y)
x = TriangularFuzzyNumber(1,2,5)
y = TrapezoidalFuzzyNumber(3,4,5,6)
g1 = function(x,y) 2*x+y
f2apply(x, y, g1, knot.n=5, type="l", I.O.plot=TRUE)
f2apply(x, y, g1, knot.n=10, xlim=c(0,18), col=4, type="b", I.O.plot=FALSE)
plot(2*x+y, col=2, lty=4, lwd=3, add=TRUE) #Compare the result from "FuzzyNumbers" package
g2 = function(x,y) -2*pnorm(x)+y
f2apply(x, y, g2, type="b")
g3 = function(x,y) 2*x-punif(y, min=1, max=8)
f2apply(x, y, g3, type="l")
g4 = function(x,y) -2*x-y^3
f2apply(x, y, g4, knot.n=20, type="b" )
# Example 2:
knot.n = 10
A <- FuzzyNumber(-1, .5, 1, 3,
lower=function(alpha) qbeta(alpha,0.4,3),
upper=function(alpha) (1-alpha)^4
)
B = PowerFuzzyNumber(1,2,2.5,4, p.left=2, p.right=0.5)
f2apply(A, B, function(x,y) -2*x-y^3, knot.n=knot.n, type="l", col=2, lty=5, lwd=3, I.O.plot=FALSE)
f2apply(A, B, function(x,y) -2*x-y^3, knot.n=knot.n, type="l", col=2, lty=5, lwd=3)
# As another example, change the function and work with the cuts of result:
Result <- f2apply(A, B, function(x,y) abs(y+x-10),knot.n=knot.n,type="l",I.O.plot=TRUE,col=3,lwd=2)
Result
class(Result)
#The result of alphacut for alpha=0.444:
Result$cuts["0.444",] #Or equivalently,
Result$cuts[6,]
# Upper bounds of alphacuts:
Result$cuts[,"U"] #Or equivalently,
Result$cuts[,2]
#The core of the result:
Result$core
# The support of the result:
Result$support # Or, equivalently: Result$s
# Example 3:
knot.n = 10
x = PowerFuzzyNumber(0,1,1,1.3, p.left=1, p.right=1)
y = PowerFuzzyNumber(3,4,4,6, p.left=1, p.right=1)
f = function(x,y) 3*x - 2*y
f2apply(x, y, f, knot.n=knot.n, type="l", I.O.plot=TRUE)
g = function(x,y) exp(x^2) + 3*log(sqrt(y+4))
f2apply(x, y, g, knot.n=knot.n, type="l", I.O.plot=TRUE)
# Example 4:
knot.n = 20
A = PowerFuzzyNumber(.1,.5,.5,.6, p.left=2, p.right=0.5)
B <- FuzzyNumber(.5, .6, .7, .9,
lower=function(alpha) qbeta(alpha,0.4,3),
upper=function(alpha) (1-alpha)^4
)
fun1 <- function(x,y) qnorm(x)-qgamma(y,2,4)
f2apply(A, B, fun1, knot.n=knot.n, type="l", I.O.plot=TRUE, col=2, lwd=2)
fun2 <- function(x,y) 0.3*sin(qnorm(x))+tan(qgamma(y,2,4))
f2apply(A, B, fun2, knot.n=knot.n, type="l", I.O.plot=TRUE)
# Example 5: It may be one of considered inputs are crisp.
knot.n = 10
A = 27
B = PowerFuzzyNumber(1,2,2.5,4, p.left=2, p.right=0.5)
f2apply(A, B, function(x,y) -2*x-y^3, knot.n=knot.n, I.O.plot=TRUE)
f2apply(x=4, y=3, function(x,y) sqrt(x)*y^2, knot.n=knot.n, I.O.plot=TRUE)
f2apply(x=4, y=TriangularFuzzyNumber(2,3,5), function(x,y) sqrt(x)-y^2,knot.n=knot.n,I.O.plot=TRUE)
f2apply(x=TriangularFuzzyNumber(2,4,6), y=3, function(x,y) sqrt(x)-y^2,knot.n=knot.n,I.O.plot=TRUE)
f2apply(x=TriangularFuzzyNumber(2,4,6), y=TriangularFuzzyNumber(2,3,5), function(x,y) sqrt(x)-y^2,
knot.n=knot.n, I.O.plot=TRUE)
## The function is currently defined as
function (x, y, fun, knot.n = 10, I.O.plot = "TRUE", ...)
{
x.input <- x
y.input <- y
if (class(x) == "numeric") {
x <- x.input.fuzzy <- TriangularFuzzyNumber(x, x, x)
}
if (class(x) == "TriangularFuzzyNumber" | class(x) == "TrapezoidalFuzzyNumber") {
x.input.fuzzy <- x
x <- as.PiecewiseLinearFuzzyNumber(x, knot.n)
}
if (class(x) == "FuzzyNumber" | class(x) == "PowerFuzzyNumber" |
class(x) == "PiecewiseLinearFuzzyNumber" ){
x.input.fuzzy <- x
x <- piecewiseLinearApproximation(x, method = "Naive")
}
if (class(y) == "numeric") {
y <- y.input.fuzzy <- TriangularFuzzyNumber(y, y, y)
}
if (class(y) == "TriangularFuzzyNumber" | class(y) == "TrapezoidalFuzzyNumber") {
y.input.fuzzy <- y
y <- as.PiecewiseLinearFuzzyNumber(y, knot.n)
}
if (class(y) == "FuzzyNumber" | class(y) == "PowerFuzzyNumber" |
class(y) == "PiecewiseLinearFuzzyNumber" ){
y.input.fuzzy <- y
y <- piecewiseLinearApproximation(y, method = "Naive")
}
step.x = length(supp(x))/30
step.y = length(supp(y))/30
if (class(x.input) == "numeric") {
is.inc.on.x <- TRUE
is.dec.on.x <- FALSE
}
else {
is.inc.on.x = is.increasing.on.x(fun, x.bound = supp(x),
y.bound = supp(y), step.x)
is.dec.on.x = is.decreasing.on.x(fun, x.bound = supp(x),
y.bound = supp(y), step.x)
}
if (class(y.input) == "numeric") {
is.inc.on.y <- TRUE
is.dec.on.y <- FALSE
}
else {
is.inc.on.y = is.increasing.on.y(fun, x.bound = supp(x),
y.bound = supp(y), step.y)
is.dec.on.y = is.decreasing.on.y(fun, x.bound = supp(x),
y.bound = supp(y), step.y)
}
if ((is.inc.on.x == TRUE) & (is.inc.on.y == TRUE)) {
fun.rep = "fun is an increasing function from x and y on introduced bounds"
L.result = fun(alphacut(x.input.fuzzy, seq(0, 1, len = knot.n))[,
"L"], alphacut(y.input.fuzzy, seq(0, 1, len = knot.n))[,
"L"])
U.result = fun(alphacut(x.input.fuzzy, seq(0, 1, len = knot.n))[,
"U"], alphacut(y.input.fuzzy, seq(0, 1, len = knot.n))[,
"U"])
result = c(L.result, U.result[length(U.result):1])
}
else {
if ((is.dec.on.x == TRUE) & (is.inc.on.y == TRUE)) {
fun.rep = "fun is a decreasing function on x and increasing function on y on introduced bounds"
L.result = fun(alphacut(x.input.fuzzy, seq(0, 1,
len = knot.n))[, "U"], alphacut(y.input.fuzzy,
seq(0, 1, len = knot.n))[, "L"])
U.result = fun(alphacut(x.input.fuzzy, seq(0, 1,
len = knot.n))[, "L"], alphacut(y.input.fuzzy,
seq(0, 1, len = knot.n))[, "U"])
result = c(L.result, U.result[length(U.result):1])
}
else {
if ((is.inc.on.x == TRUE) & (is.dec.on.y == TRUE)) {
fun.rep = "fun is an increasing function on x and decreasing function on y on introduced bounds"
L.result = fun(alphacut(x.input.fuzzy, seq(0,
1, len = knot.n))[, "L"], alphacut(y.input.fuzzy,
seq(0, 1, len = knot.n))[, "U"])
U.result = fun(alphacut(x.input.fuzzy, seq(0,
1, len = knot.n))[, "U"], alphacut(y.input.fuzzy,
seq(0, 1, len = knot.n))[, "L"])
result = c(L.result, U.result[length(U.result):1])
}
else {
if ((is.dec.on.x == TRUE) & (is.dec.on.y == TRUE)) {
fun.rep = "fun is a decreasing function from x and y on introduced bounds"
L.result = fun(alphacut(x.input.fuzzy, seq(0,
1, len = knot.n))[, "U"], alphacut(y.input.fuzzy,
seq(0, 1, len = knot.n))[, "U"])
U.result = fun(alphacut(x.input.fuzzy, seq(0,
1, len = knot.n))[, "L"], alphacut(y.input.fuzzy,
seq(0, 1, len = knot.n))[, "L"])
result = c(L.result, U.result[length(U.result):1])
}
else {
return(print("fun is not a monoton function on x and y for the introduced bounds.
Therefore this function is not appliable for computation."))
}
}
}
}
if (class(x.input) == "numeric" | class(y.input) == "numeric") {
fun.rep = "supports of one/both inputted points are crisp and the exact report on function
is not needed"
}
Alphacuts = c(seq(0, 1, len = knot.n), seq(1, 0, len = knot.n))
if (I.O.plot == TRUE) {
op <- par(mfrow = c(3, 1))
if (class(x.input) == "numeric") {
plot(TriangularFuzzyNumber(x.input, x.input, x.input),
ylab = "membership func. of x")
}
else {
plot(x.input, ylab = "membership func. of x")
}
if (class(y.input) == "numeric") {
plot(TriangularFuzzyNumber(y.input, y.input, y.input),
xlab = "y", ylab = "membership func. of y")
}
else {
plot(y.input, col = 1, xlab = "y", ylab = "membership func. of y")
}
plot(result, Alphacuts, xlab = "fun(x,y)", ylab = "membership func. of fun(x,y)",
...)
abline(v = fun(core(x), core(y)), lty = 3)
par(op)
}
if (I.O.plot == "FALSE") {
plot(result, Alphacuts, xlab = "fun(x,y)", ylab = "membership func. of fun(x,y)",
...)
}
result2 <- c(L.result[length(L.result):1], U.result[length(U.result):1])
cuts <- matrix(result2, ncol = 2, byrow = FALSE, dimnames = list(round((length(L.result) -
1):0/(length(L.result) - 1), 3), c("L", "U")))
return(list(fun.rep = noquote(fun.rep), cuts = cuts, core = cuts[1,
], support = cuts[dim(cuts)[1], ]))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
\keyword{ fapply }
\keyword{ f2apply }
\keyword{ FuzzyNumbers }
\keyword{ monoton function }
\keyword{ is.increasing }
\keyword{ is.decreasing }
\keyword{ is.increasing.on.x }
\keyword{ is.decreasing.on.x }
\keyword{ is.increasing.on.y }
\keyword{ is.decreasing.on.y }
|
ad2a48d64953ff61640b125151d580c33f4d7cd5
|
a5597207c2e2c6bff92c045b3a2e0cdbd0e2ee7a
|
/R_databases/LS_ROM_data/plot_bars_ROM.R
|
ff2d20ecd37887e62860bc303535ce1c79ef59f9
|
[] |
no_license
|
Gavinlenton/loadSharing_processing
|
a3a7a3dd3c5bb5fdb2f096023a1fef3b06eea369
|
a16faf77df2e74316afc384be3417f55898bde5e
|
refs/heads/master
| 2021-01-20T18:20:25.903996
| 2018-02-22T01:21:19
| 2018-02-22T01:21:19
| 60,587,136
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,313
|
r
|
plot_bars_ROM.R
|
## Plots the input data using a column chart design
## data: a data frame.
## ylimits: a row vector specifying the min and max limits of the y-axis
## xData: data to plot on x-axis
## yData: data to plot on y-axis
## fillData: labels to use in the legend
## scale_cont_breaks: vector specifying the spacing of the y-axis (e.g., 0:9*10 starts at zero and increments in 10 until 90)
plot_bars_ROM = function(data = NULL, ylimits, xData = NULL, yData = NULL, fillData = NULL,
scale_cont_breaks) {
# Use ggplot to create the plot
ggplot(data, aes(x=xData, y=yData, fill=fillData)) +
geom_bar(position=position_dodge(0.9), colour = "black", stat="identity") +
geom_errorbar(aes(ymin=yData-se, ymax=yData+se),
width=.2, # Width of the error bars
position=position_dodge(0.9)) +
xlab("Armour type") + # Labels
ylab("Range of motion (deg)") +
coord_cartesian(ylim=ylimits) +
scale_fill_manual(name="Mass", # Legend label, use darker colors
breaks=c("15", "30"),
labels=c("15 kg", "30 kg"),
values = c("#D5D5D5","#545354")) +
scale_y_continuous(breaks=scale_cont_breaks) + theme_light(base_size = 12, base_family = "Calibri")
}
|
b35daec06ae559a06de0e5ef285ba2345b5ad561
|
46e8512ab84cb14900ff6ed3bfbc9c93328a657d
|
/Stimates.R
|
970da657d3ff25ab0ec417c7a9b5f8727cbb0e98
|
[] |
no_license
|
Gabo226/R
|
eefaec0ac4a37e1d2f5d4fcba914fa8773e0659b
|
3bca72ee7ad632c386dbdc65eacdc44e337a2013
|
refs/heads/master
| 2022-06-17T22:29:39.286270
| 2020-05-14T22:44:16
| 2020-05-14T22:44:16
| 255,984,712
| 0
| 0
| null | 2020-04-15T17:15:31
| 2020-04-15T17:07:54
| null |
UTF-8
|
R
| false
| false
| 142
|
r
|
Stimates.R
|
d = read.csv("assoctest.csv")
table(d)
tab = table(d$allele, d$case)
chisq.test(tab)
tab = table(d$allele, d$case)
fisher.test(tab)
|
4d3f94fd1b449bfea56b9da363e84ddcf316a242
|
c3ee012b76453254dd76ed3d69ba8a4c97a9a90d
|
/TestMVST/R/install-Rpkg.R
|
4ddd54be90cc3aa86cab9559e91e3ac112e2ac04
|
[] |
no_license
|
shazhe/glbm
|
7cca75e150a106261bae446b128bbfc6fe6b35b6
|
567f3314b446240dd3a7c2aeba09ad31274bbe1d
|
refs/heads/master
| 2021-01-13T15:12:30.348361
| 2018-01-11T15:15:28
| 2018-01-11T15:15:28
| 76,252,126
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,958
|
r
|
install-Rpkg.R
|
#######################################################################
## Install R packages in a cluster environment with job scheduler ##
## Run this script before runing any BHM scripts. ##
## Script copied from https://orfe.princeton.edu/help/r-packages ##
## Note BlueCrystal does not allow installing from remote reop, ##
## so all required packages are installed from source files under ##
## ../myPkgs ##
#######################################################################
## Create the personal library if it doesn't exist.
## Ignore a warning if the directory already exists.
dir.create(Sys.getenv("R_LIBS_USER"), showWarnings = FALSE, recursive = TRUE)
## Packages need special care
## rgdal -- need to load module gdal and proj and user config path
install.packages('~/myPkgs/rgdal_1.2-5.tar.gz', type = "source",
configure.args= c('--with-proj-include=/cm/shared/libraries/gnu_builds/proj-4.9.3/include',
'--with-proj-lib=/cm/shared/libraries/gnu_builds/proj-4.9.3/lib'))
install.packages('~/myPkgs/gpclib_1.5-5.tar.gz', type = "source")
## Some basic packages
pkgs1 <- c("R.utils", "magic", "rgeos")
install.packages(pkgs1, Sys.getenv("R_LIBS_USER"), repos = "http://www.stats.bris.ac.uk/R/",
dependencies = TRUE)
## Packages MVST depends on
pkgs2 <- c("spam", "deldir", "SDMTools", "network", "fields", "matlab", "actuar", "akima", "geometry", "GEOmap")
install.packages(pkgs2, Sys.getenv("R_LIBS_USER"), repos = "http://www.stats.bris.ac.uk/R/",
dependencies = TRUE)
## Finally install INLA and MVST
install.packages('INLA', Sys.getenv("R_LIBS_USER"),
repos="https://www.math.ntnu.no/inla/R/stable")
install.packages('~/myPkgs/MVST_1.0.1.tar.gz', type = "source")
## Submit the command R CMD BATCH install-Rpkg.R to the cluster's queue or job scheduler.
## Confirm installation by listing the contents of ~/R.
## Retrieve any error messages from install-Rpkg.Rout,
## which is generated as a result of running install-Rpkg.R.
#### Some other examples
## Install a package that you have copied to the remote system.
## Standard packages
#system("ls ~/myPkgs/std | grep 'tar.gz' > pkgnames.txt")
#pkgs <- paste0("~/myPkgs/std/", scan("pkgnames.txt", "char"))
#install.packages(pkgs, Sys.getenv("R_LIBS_USER"), repos = NULL)
#install.packages("~/myPkgs/gpclib_1.5-5.tar.gz", Sys.getenv("R_LIBS_USER"))
#install.packages("~/myPkgs/MVST_1.0.1.tar.gz", Sys.getenv("R_LIBS_USER"))
## install.packages("~/myPkgs/MVST_1.0.1.tar.gz", Sys.getenv("R_LIBS_USER"))
## Install packages from other source
## install.packages("INLA", Sys.getenv("R_LIBS_USER"),
## repos="https://www.math.ntnu.no/inla/R/stable")
## Install Views
## library("ctv")
## install.views("Spatial")
## Install the packages.
## install.packages(c("rgdal", "rgeos", "dplyr"), Sys.getenv("R_LIBS_USER"),
## repos = "https://cran.r-project.org")
|
759fb5b15a6f91540fd92cf337bb35aeaf750e76
|
95b9aa43e1158b082930318cc4b46d187e658a4d
|
/df.R
|
820f3aa1de87ab958232c8785e0978b1f7ec09c8
|
[] |
no_license
|
bbolker/hmm_temphet
|
8a4317bec6b940dd52685c2ab1d7507a3aef0fd8
|
1b333433608cc76c89af34c428ded2e7b32aa91f
|
refs/heads/master
| 2021-01-20T12:06:28.396963
| 2016-12-22T06:39:49
| 2016-12-22T06:39:49
| 44,646,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 112
|
r
|
df.R
|
default = 1
num <- nrow(cat)
iter <- 500
if(default==0){
num <- 500
iter <- 30
}
cat <- head(cat,num)
|
01763e7c03678dbf6cee9e5b22fca6cd9592792c
|
49e8370414d355292412bf7f7ae03a7851506403
|
/prep23.r
|
37b95c90a7e4fbcd90adb8aaa2c7b65c8b148340
|
[] |
no_license
|
hamparmin/causal-inference-2018
|
d3578d38741585ebe08a15c22d162ed01a472564
|
580cc509414e7a9f656b52fba3ce50d249b788a1
|
refs/heads/master
| 2020-04-30T08:31:05.308309
| 2019-03-20T11:25:45
| 2019-03-20T11:25:45
| 176,718,437
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 913
|
r
|
prep23.r
|
fred <- read.csv("fred.csv")
#fred test
x<- fred$size
y <- fred$sweetness
z <- fred$price
#fred training -RMSE
fit_best <- lm(z~x+y, data=fred)
fit_second <-loess(z~x, span = 1, degree = 1)
summary(fit_best)
RMSE_best <- sqrt( mean( (z - predict(fit_best))^2 ) )
RMSE_best2 <- sqrt( mean( (z - predict(fit_second))^2 ) )
#fred test - RMSE
fred_test <- read.csv("apple.csv")
x1 <- fred_test$size
y1 <- fred_test$sweetness
z1 <- fred_test$price
RMSE_test_best <- sqrt(mean((z1-predict(fit_best))^2))
RMSE_test_loess <- sqrt(mean((z1-predict(fit_second))^2))
summary(fit_best)
#own data - without random element
year <- c(seq(1801,2000,1))
infected <- c(rnorm(200, 1000, 90))
virulence <- 50+((2000-year)/4)*infected/100
#virulence <- jitter(virulence, factor = 100)
virus_df <- data.frame(virulence,year, infected)
fit_virus <- lm(virulence~year+infected)
RMSE_virus <- sqrt( mean( (z - predict(fit_virus))^2 ) )
|
b9b9ece9d93a9d2d5f054c518c04fe05bf75efbc
|
ecf24c2aae9f8da0527d89963616f1a39089798c
|
/cachematrix.R
|
87bf6f3aaf25834fed1ac8ff53822d5d3d45674a
|
[] |
no_license
|
HLueckhoff/ProgrammingAssignment2
|
52dd98dbd093a92dfc66baa81f81b19c00c0a6f0
|
e3553179f457e3ab307794c8994dd816c242ee17
|
refs/heads/master
| 2021-01-18T05:12:30.703691
| 2014-10-26T19:01:45
| 2014-10-26T19:01:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,997
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Creates a cache object for the inverse of an invertible matrix. The cache object contains gettters and setters for the
## original matrix as well as the original matirx and its inverse (once it has been computed).
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Returns the inverse of a cache object of an invertible object created by calling makeCacheMatrix(). The inverse is only computed
## the first time cacheSolve() is called. For all subsequent calls the value is retrieved from the cache object.
## Instructions are somewhat unclear what parameters could be passed via '...'. It is assumed that a matrix could be passed. In that case
## the value of this matrix is compared to the cached matrix. If it is different then the inverse for this new matrix will be computed
## and cached
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# first check if new matrix is identical to cached one
paramLength <- length(match.call(expand.dots=TRUE))
mcached <- x$get()
if (paramLength > 2) {
## a new matrix has been passed. Checke whether it is different than the cached one
if (!identical(mcached, ...)) {
## reset cached inverse
x$setinv(NULL)
## reset data
x$set(...)
}
}
inv <- x$getinv()
if(!is.null(inv)) {
## getting the cached value
message("getting cached data")
return(inv)
}
data <- x$get()
m <- solve(data)
x$setinv(m)
m
}
|
e0424331da8994beae360e9b13636db551f24688
|
afd286a06eff008fc7dd8b4716454575647f0a57
|
/R/connect_analysis.R
|
71f1ed9dc38bd30744c9484b3059930aadf41be2
|
[] |
no_license
|
luiscartor/PACCproject
|
5845cb86182c7492939f20aa719836f605693d7c
|
58c5e3fc721a3583cc38876e6dd70cb5fc61b0f6
|
refs/heads/master
| 2020-04-25T12:35:39.962882
| 2019-05-10T20:04:40
| 2019-05-10T20:04:40
| 172,777,671
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,206
|
r
|
connect_analysis.R
|
# connect_analysis.R
# on 3 May 2019
# by Luis
# compares connectivity (protconnbound) between 2011 and present
library(rgdal)
library(maptools)
library(rgeos)
library(sf)
library(raster)
library(ggplot2)
library(viridis)
library(plyr)
library(scales)
library(broom)
library(mapproj)
library(shades)
# 1. INPUTS
OUTconnectfolder <- '/home/lcarrasco/Documents/research/protectedareas/connectivity/results/'
INtable2010 <- '/home/lcarrasco/Documents/research/protectedareas/connectivity/results/protconnbound_till2010_dis10.txt'
INtableall <- '/home/lcarrasco/Documents/research/protectedareas/connectivity/results/protconnbound_allyears_dis10.txt'
INgadmfolder <- '/home/lcarrasco/Documents/research/protectedareas/data/GADM/'
INgadmfile <- 'gadm36_0_simplify'
#INgadmfile <- 'gadm36_0_simplify_robinson_buff0'
#INgdpfile <- '/home/lcarrasco/Documents/research/protectedareas/data/GDP/IMF_GDPPPP_Data.csv'
# 2. READ DATA
table_2010 <- read.table(INtable2010,header = TRUE)
table_all <- read.table(INtableall,header = TRUE)
gadm <- readOGR(INgadmfolder, INgadmfile)
#gdp <- read.csv2(INgdpfile, header = TRUE, sep=",", stringsAsFactors=FALSE)
# 3. ANALYSIS
# Creates column with proportion of connected/protected
table_2010$connprop <- 100*(table_2010$protconnbound/table_2010$prot)
table_all$connprop <- 100*(table_all$protconnbound/table_all$prot)
# Select common countries (can be the case of countries without PA in 2010 but present in allyears)
commoncoun <- intersect(table_2010$country, table_all$country)
table_2010 <- table_2010[table_2010$country %in% commoncoun,]
table_all <- table_all[table_all$country %in% commoncoun,]
table_all$propdiff <- (table_all$connprop - table_2010$connprop)
table_all$diff <- (table_all$protconnbound - table_2010$protconnbound)
table_all$oldprotconnbound <- table_2010$protconnbound
# Add columns to gadm
colnames(table_all)[1] <- "GID_0"
gadm@data <- merge(gadm@data,table_all[,c("GID_0","propdiff","diff")],all.x=TRUE,all=TRUE)
# 4. PLOTS
# 4.1 MAPS: Prepare plots
# Delete Antartica
gadm <- subset(gadm, GID_0 != "ATA")
# Create df for ggplot maps
gadm@data$id <- rownames(gadm@data)
gadm_df <- fortify(gadm, region="id")
gadm_df <- join(gadm_df, gadm@data, by="id")
# We want log(diff)
# First we put negative values and zeroes to very small values
gadm_df$logdiff <- gadm_df$diff
gadm_df$logdiff[gadm_df$logdiff <= 0] <- 0.00000001
# Then we transform to log
gadm_df$logdiff <- log(gadm_df$logdiff)
# Ticks for log-transformed legend
logbreaks <- c(log(20),log(1),log(0.01),log(0.00000001))
loglabels <- c(20,1,0.01,0)
# Plot protconn diff
ggplot(gadm_df) +
geom_polygon(aes(long, lat, group=group, fill = logdiff), color="white",size=0.15) +
scale_fill_viridis(option = 'plasma', breaks=logbreaks, labels=loglabels)+
labs(fill = "Difference in\nconnected\nand protected (%)")+
ggtitle("Changes in protected and connected areas from 2011 to present",
subtitle = "Difference in % of protected and connected area using the protconnbound index")+
theme_bw()+
theme(axis.line=element_blank(),axis.text.x=element_blank(),axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),axis.title.y=element_blank(),panel.border = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
#ggsave(file=paste(OUTconnectfolder,"map_protconndif.eps",sep=""))
# PROPDIFF PLOT
# Transform propdiff values
gadm_df$logpropdiff <- asinh(gadm_df$propdiff)
# Then we transform to log
gadm_df$logpropdiff <- log(abs(gadm_df$propdiff+0.0000001))
gadm_df$logpropdiff <- sign(gadm_df$propdiff+0.0000001)*(gadm_df$logpropdiff+17)
# Ticks for log-transformed legend
logbreaks <- c(asinh(20),asinh(2),asinh(0),asinh(-2),asinh(-15))
loglabels <- c(20,2,0,-2,-15)
# Plot percentage of protconn diff between years
ggplot(gadm_df) +
geom_polygon(aes(long, lat, group=group, fill = logpropdiff), color="white",size=0.15) +
scale_fill_viridis(option = 'plasma', breaks=logbreaks, labels=loglabels)+
labs(fill = "Difference in proportion \nof connected PA in \nrespect of total \nprotected land (%)")+
ggtitle("Changes in proportion of connected areas in respect of total protected areas from 2011 to present",
subtitle = "Difference in % proportion of connected areas using the protconnbound index")+
theme_bw()+
theme(axis.line=element_blank(),axis.text.x=element_blank(),axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),axis.title.y=element_blank(),panel.border = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
#ggsave(file=paste(OUTconnectfolder,"map_connproportiondif.eps",sep=""))
# 4.2 Scatter plot New PAs increase VS total increased area
#tdivtable$outweighted <- tdivtable$difpas_out/tdivtable$patot
# Plot PAs topdiv difference with available land, against total new PA
table_all_df <- data.frame(table_all)
ggplot(table_all_df, aes(y=diff, x=prot))+
geom_point(aes(size = countryarea, colour=connprop)) +
ylim(-3, 30)+
xlim(-5,100)+
geom_text(data = subset(table_all_df, diff > 8 | prot > 66), aes(label=country), size=4, hjust = -0.27)+
scale_size_continuous(range=c(1,30),
name= expression("Country's total\n area (km"^2*")"), breaks=c(1e+11,1e+12,1e+13))+
scale_colour_viridis(name="Proportion of\nconnected\nand total\n(areas)")+
ylab("Difference in connected and protected between 2010 and present")+
xlab(expression("Country protected area (%)"))
#ggsave(file=paste(OUTconnectfolder,"scatter_diffVSpaarea.eps",sep=""))
# 4.3 PIE charts
# 4.2.1 Diff in protconn
# % of countries with certain level of ccvel increase
table_all_df$diff_class <- table_all_df$diff
table_all_df$diff_class[table_all_df$diff_class > 10] <- 400
table_all_df$diff_class[table_all_df$diff_class <= 10 & table_all_df$diff_class >= 1] <- 300
table_all_df$diff_class[table_all_df$diff_class < 1 & table_all_df$diff_class >= 0.1] <- 200
table_all_df$diff_class[table_all_df$diff_class < 0.1] <- 100
table_all_df$diff_class <- as.factor(table_all_df$diff_class)
table_all_df$diff_class <- revalue(table_all_df$diff_class, c("400"="Increase > 10", "300"="Increase > 1",
"200"="Increase > 0.1", "100"="Increase < 0.1"))
diffclass_df <- data.frame(table(table_all_df$diff_class))
# Reverse order
#outclass_df <- outclass_df[nrow(outclass_df):1, ]
diffclass_df
#mycols <- c("#0073C2FF", "#EFC000FF", "#868686FF", "#CD534CFF")
ggplot(diffclass_df, aes(x = "", y = Freq, fill = Var1)) +
geom_bar(width = 1, stat = "identity", color = "white") +
geom_text(aes(y = c(0,19,43,132), label=Freq), color = "darkgray", size=10)+
scale_fill_viridis(option = 'plasma',discrete = TRUE,direction=-1)+
coord_polar("y", start = 0)+
#scale_fill_manual(values = mycols) +
ggtitle("Number of countries certain increase in connected and protected area")+
theme_void()+
theme(legend.title = element_blank(),legend.text = element_text(size=11))+
guides(fill = guide_legend())
#ggsave(file=paste(OUTconnectfolder,"pie_conndiff.eps",sep=""))
|
f9f85a2c45ed2eb7f9576515cb0d191d31f98d75
|
08945878c824498f99548b4b0b4171b9bc6f2091
|
/fig_for_e2.R
|
882f83e3d8d3362b8e4185b79a7143e6fd9f2a0c
|
[
"MIT"
] |
permissive
|
klaricch/TransposonFigures
|
3fc3eeb0f081e31a045e49f656f2b5d1db761081
|
41396eee6ec62cad6e4d2b04dea8e86a7eb5bdf6
|
refs/heads/master
| 2021-01-19T04:37:36.786432
| 2017-05-20T20:03:22
| 2017-05-20T20:03:22
| 43,438,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,840
|
r
|
fig_for_e2.R
|
#!/usr/bin/R
#1. Correlations of absence with insertion and reference with insertion (square 4x4 inch, 16 point fonts, no title)
#2. Numbers of TE families, numbers of TEs, etc. I can probably get them from the results section, but I don't know if it is done enough for me to copy
#3. Manhattan plots for our good traits (9 inches wide, 2.5 inches high, 14 point font, no title, family on the side) - just like your paper figure, maybe just your paper figure
#4. Plot of TE locations across genome by type (9 inches wide, 4.5 inches high, 16 point font, no title)
#5. Plot of TE insertions into genomic features (9 inches wide, 4.5 inches high, 16 point font, no title)
#############################################################################
#############################################################################
#############################################################################
#5
library(ggplot2)
library(grid)
library(dplyr)
library(cowplot)
setwd("/Users/kristen/Documents/transposon_figure_data/data")
data <- read.table("essentiality_nonredundant_GO.txt",sep="\t",header=TRUE,stringsAsFactors = F)
data<-filter(data, Method=="new")
# simplify UTRs
data<-mutate(data, Region=ifelse(Region=="three_prime_UTR"|Region=="five_prime_UTR","UTR",Region))
data<-distinct(data,Chromosome, TE_start)
# simplify Biotypes
data<-mutate(data,final_bio=ifelse(Region=="intergenic","Intergenic",ifelse(Biotype=="pseudogene"|Biotype=="transposon_pseudogene","Pseudogene","Genic")))
#-split plot: A) intergenic, genic, pseudogene, B) CDS, promoter, intron
#-potential table with pseudogenes for loss of function caused by TE
a <- ggplot(data,aes(x=TE_start/1e6,fill=final_bio))
a <- a + geom_histogram(binwidth=.25)+
facet_grid(.~Chromosome, scale="free", space="free_x")+
scale_y_continuous(expand = c(0,0)) + scale_x_continuous(expand = c(0,0))+
geom_point(aes(y=30), alpha=0)+
labs(x="", y= "Count")+
theme(strip.background = element_blank(),
strip.text = element_text(size = 16, colour = "black", face = "bold"),
panel.margin = unit(.25, "lines"),
panel.border = element_rect(fill=NA, colour="black",size=1, linetype="solid"),
panel.background = element_blank(),
legend.title = element_blank(),
legend.text=element_text(size=16),
legend.text.align = 0,
plot.margin=unit(c(.1,.1,-.5,.1), "cm"),
axis.title = element_text(size=16,face="bold"),
axis.text.y = element_text(colour="black", size=16,face="bold"),
axis.text.x = element_blank(),
#axis.text.x = element_text(colour="black", size=11,face="bold"),
axis.ticks = element_line(colour="black"),
axis.line.y = element_line(colour = "black"),
axis.line.x = element_line(colour = "black"))+
scale_fill_manual(values = c('Genic'="gray17",'Intergenic' = "gray60", "Pseudogene"="tan3"))
a
max_y<-ggplot_build(a)$panel$ranges[[1]]$y.range
max_y<-max_y[2]
a<- a + scale_y_continuous(expand = c(0,0),limits=c(0,max_y*1.075))
a
protein_coding<-filter(data,final_bio=="Genic", Biotype=="protein_coding")
protein_coding<-filter(protein_coding,Region!="exon")
protein_coding<-filter(protein_coding,Region!="gene")
protein_coding$Region <- factor(protein_coding$Region,
levels = c("promoter", "CDS","intron","UTR"),
labels = c("Promoter", "CDS","Intron","UTR"))
b <- ggplot(protein_coding,aes(x=TE_start/1e6,fill=Region))
b <- b + geom_histogram(binwidth=.25)+
facet_grid(.~Chromosome, scale="free", space="free_x")+
scale_y_continuous(expand = c(0,0)) + scale_x_continuous(expand = c(0,0))+
geom_point(aes(y=25), alpha=0)+
theme(strip.background = element_blank(),
strip.text = element_blank(),
#strip.text = element_text(size = 11, colour = "black", face = "bold"),
panel.margin = unit(.25, "lines"),
panel.background = element_blank(),
panel.border = element_rect(fill=NA, colour="black",size=1, linetype="solid"),
legend.title = element_blank(),
legend.text=element_text(size=16),
legend.text.align = 0,
#plot.margin=unit(c(-.5,.1,.1,.1), "cm"),
axis.title = element_text(size=16,face="bold"),
axis.text.y = element_text(colour="black", size=16,face="bold"),
axis.text.x = element_blank(),
#axis.text.x = element_text(colour="black", size=11,face="bold"),
axis.ticks = element_line(colour="black"),
axis.line.y = element_line(colour = "black"),
axis.line.x = element_line(colour = "black"))+
labs(x="Chromosome Position (Mb)", y= "Count")+
scale_fill_manual(values = c('CDS'="orange", 'Intron' = "plum2", 'Promoter' = "cornflowerblue","UTR"="olivedrab3"))
b
max_y<-ggplot_build(b)$panel$ranges[[1]]$y.range
max_y<-max_y[2]
b<- b + scale_y_continuous(expand = c(0,0),limits=c(0,max_y*1.075))
b
all<-plot_grid(a,b,ncol=1,align="v" )+ background_grid(major = "xy", minor = "none")
all
setwd("/Users/kristen/Documents/transposon_figure_data/fig_for_e2")
ggsave(filename="Genic_Features.tiff",
dpi=300,
width=9,
height=4.5,
units="in")
b<-b+theme(strip.background = element_blank(),
strip.text = element_text(size = 16, colour = "black", face = "bold"))
ggsave(b,filename="Genic_Features_b.tiff",
dpi=300,
width=10,
height=4,
units="in")
#############################################################################
#############################################################################
#############################################################################
#4
library(ggplot2)
library(grid)
library(dplyr)
setwd("/Users/kristen/Documents/transposon_figure_data/data")
summarydata <- read.table("CtCp_all_nonredundant.txt")
names(summarydata)
names(summarydata)<-c("chr","start","end","TE","orientation","method","strain","class")
#3X-BIN .25MB
summarydata <- distinct(summarydata, chr,start,method, orientation,class)
# Add y coordinates for "phantom" points
names(summarydata)
summarydata$top <- NA
summarydata$top[summarydata$method=="absent"] <- 6
summarydata$top[summarydata$method=="new"] <- 30
summarydata$top[summarydata$method=="reference"] <- 8
levels(summarydata$class)
#revalue classes
summarydata$class <- factor(summarydata$class,
levels = c("dnatransposon", "retrotransposon","unknown"),
labels = c("DNA Transposon", "Retrotransposon", "Unknown"))
#revalue methods
summarydata$method <- factor(summarydata$method,
levels = c("new","reference","absent"),
labels = c("Insertion", "Reference","Absence"))
m <- ggplot(summarydata, aes(x=start/1e6,fill=class))
m <-m + geom_histogram(binwidth=.25)+
scale_y_continuous(expand = c(0,0)) + scale_x_continuous(expand = c(0,0))+
facet_grid(method ~ chr,scale="free",space = "free_x")+
geom_point(data = subset(summarydata, method=="Absence"),aes(y=top),alpha=0) +
geom_point(data = subset(summarydata, method=="Insertion"),aes(y=top),alpha=0) +
geom_point(data = subset(summarydata, method=="Reference"),aes(y=top),alpha=0) +
labs(x="Chromosome Position (Mb)", y="Number of Sites")+
theme(strip.background = element_blank(),
strip.text = element_text(size = 16, colour = "black",face="bold"),
#panel.margin = unit(.25, "lines"),
panel.border = element_rect(fill=NA, colour="black",size=1, linetype="solid"),
panel.background = element_blank(),
panel.margin.y=unit(.75,"cm"),
plot.margin=unit(c(.1,.1,0,.1), "cm"),
#panel.margin = unit(.75, "cm"),
#panel.margin = unit(c(.5,.5,.5,.5), "cm"),
#panel.margin = unit(c(.5,.5,.5,.5), "cm"),
axis.title=element_text(size=16,face="bold"),
axis.text.y = element_text(colour = "black",size=16),
axis.text.x=element_blank(),
#axis.text.x = element_text(colour = "black",size=9),
axis.ticks =element_line(colour = "black"),
axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
legend.title=element_blank(),
legend.position="none",
legend.key.size=unit(1,"cm"),
legend.text=element_text(size=16))+
#scale_fill_manual(values = c("navy", "brown3", "darkgoldenrod2"))
scale_fill_manual(values = c("DNA Transposon" = "navy", "Retrotransposon"="brown3","Unknown"="darkgoldenrod2"))
m <- m
m
setwd("/Users/kristen/Documents/transposon_figure_data/fig_for_e2")
ggsave(filename="Chromosome_Distribution.tiff",
dpi=300,
width=9,
height=4.5,
units="in")
#############################################################################
#############################################################################
#############################################################################
#3
library(dplyr)
library(ggplot2)
library(data.table)
library(grid)
library(stringr)
library(gridExtra)
library(tidyr)
library(scales)
library(gtable)
library(cowplot)
setwd("/Users/kristen/Documents/transposon_figure_data/data")
load("Processed_Transposon_Mappings_SUBSET2.Rda")
unique(processed_mapping_df$trait)
load("count_QTL.Rda")
# pull unique combos, remove strain column(don't need specific strain info at this point)
processed_mapping_df<- processed_mapping_df %>% distinct(trait,marker,strain)
#create family and method columns
processed_mapping_df$family <- stringr::str_split_fixed(processed_mapping_df$trait, "_TRANS_",2)[,2]
processed_mapping_df$method <- stringr::str_split_fixed(processed_mapping_df$trait, "_TRANS_",2)[,1]
#read in position data and create family column
positions <- read.table("CtCp_all_nonredundant.txt")
names(positions)<-c("CHROM","start","end","TE","orientation","method","strain","class")
positions$family<- stringr::str_split_fixed(positions$TE, regex("_(non-)?reference"),2)[,1]
positions$family<- paste(stringr::str_split_fixed(positions$family, "_",4)[,3],stringr::str_split_fixed(positions$family, "_",4)[,4],sep="_")
positions$family <- gsub("_$" ,"",positions$family)
positions$family <- gsub("_non-reference(.*)$" ,"",positions$family)
#select traits above BF.....this step not needed, double checking everything is above BF
selection<-filter(processed_mapping_df, log10p > BF)
#extract the count base traits
base_traits <-selection[(selection$method=="absent"| selection$method=="new" |selection$method=="reference"|selection$method=="ZERO_new"|selection$method=="ONE_new"), ]
counts<-subset(base_traits, grepl("_C$", base_traits$family))
counts$family <- gsub("_C$" ,"",counts$family)
processed_mapping_df <- distinct(select(processed_mapping_df, -strain,-allele,-value))
processed_mapping_df<- processed_mapping_df %>% distinct(trait,marker)
#pull out only position traits from mappings dataframe
position_traits<-subset(selection,
grepl('^I', selection$trait) |
grepl('^V', selection$trait) |
grepl('^X', selection$trait))
#create family column
position_traits$family <- paste(stringr::str_split_fixed(position_traits$trait, "_",4)[,3],stringr::str_split_fixed(position_traits$trait, "_",4)[,4],sep="_")
position_traits$family <- gsub("_$" ,"",position_traits$family)
position_traits$family <- gsub("_non-reference(.*)$" ,"",position_traits$family)
# add position TRAIT_col family info to processed_mapping_df
processed_mapping_df<-processed_mapping_df %>%mutate(family = ifelse(processed_mapping_df$trait %in% position_traits$trait, (paste(stringr::str_split_fixed(processed_mapping_df$trait, "_",4)[,3],stringr::str_split_fixed(processed_mapping_df$trait, "_",4)[,4],sep="_")), processed_mapping_df$family))
selection<-counts
#strip count marker and remnant marks from dataframes
selection$trait <- gsub("_C$" ,"",selection$trait)
processed_mapping_df$trait <- gsub("_C$" ,"",processed_mapping_df$trait)
processed_mapping_df$family <- gsub("_C$" ,"",processed_mapping_df$family)
processed_mapping_df$family <- gsub("_$" ,"",processed_mapping_df$family)
processed_mapping_df$family <- gsub("_non-reference(.*)$" ,"",processed_mapping_df$family)
processed_mapping_df<-mutate(processed_mapping_df,ID=paste(trait,peak_id,sep="_"))
copy<-processed_mapping_df
processed_mapping_df<-mutate(processed_mapping_df,SNP_col=ifelse(ID %in% count_QTL$trait,"PASS","FAIL" ))
count_QTL<-mutate(count_QTL, trait2=gsub("_\\d+$","",trait))
selection <- filter(selection, (trait %in% count_QTL$trait2))
processed_mapping_df<-filter(processed_mapping_df,CHROM != "MtDNA")
class_subset<- positions %>% distinct(class,family) %>% select(class,family)
selection <-merge(selection, class_subset, by="family")
selection<-arrange(selection,class,family,method)
unique(selection$trait)
count<-0
for (i in unique(selection$trait)){
specific_trait<- processed_mapping_df[processed_mapping_df$trait == i, ]
empty <-specific_trait[specific_trait$method==NA,]
#specific_trait_mx <- max(specific_trait$log10p)
class_TE<-unique(filter(selection,trait==i)$class)
pvalues<-filter(specific_trait,log10p !="Inf") #
specific_trait_mx <- max(pvalues$log10p) #
TE<-specific_trait$family[1]
rect_data<-filter(specific_trait,SNP_col=="PASS")
plot_title<-gsub(".*_TRANS_","",i)
plot_title<-gsub("_CE$","",plot_title)
plot_title<-gsub("WBTransposon","WBT",plot_title)
A<- processed_mapping_df %>%
filter(trait == i)%>%
.[order(.$peak_id,na.last=FALSE),]%>%
ggplot(.)+
aes(x=POS/1e6,y=log10p,fill=BF)+ #fill to get legend
geom_rect(data=rect_data,mapping=aes(xmin=startPOS/1e6, xmax=endPOS/1e6, ymin=0, ymax= Inf),fill="thistle1", alpha=1) +
geom_point(aes( color=ifelse(log10p> BF & SNP_col=="PASS", 'red', 'black')),size=1)+
facet_grid(.~CHROM,scale="free_x",space = "free_x") + #scale_color_identity() +
geom_hline(aes(yintercept=BF),color="grey60",linetype="dashed")+
theme(strip.background = element_blank(),
strip.text.x = element_blank(),
panel.background = element_rect(fill = "white"),
panel.border = element_rect(color="black", size=0.5, linetype="solid", fill=NA),
panel.margin = unit(.6, "lines"),
panel.background = element_blank(),
axis.ticks =element_line(colour = "black"),
axis.text.x = element_blank(),
axis.text.y = element_text(colour = "black",size=14),
axis.title.y = element_text(size=14,colour=ifelse(class_TE=="dnatransposon","navy",ifelse(class_TE=="retrotransposon","brown3","darkgoldenrod2"))),
axis.title=element_text(size=14),
plot.margin=unit(c(.05,.30,-.5,.30), "cm"),
legend.title = element_text(size = 14, colour = ifelse(class_TE=="dnatransposon","navy",ifelse(class_TE=="retrotransposon","brown3","darkgoldenrod2")), angle = 270),
legend.text = element_blank(),
legend.key.size=unit(0,"cm"),
legend.key = element_rect(colour = "pink"),
legend.position=('right'))+
labs(x="",y="",colour="black",size=16,face="bold")+
scale_color_identity()+
scale_fill_continuous(name=plot_title)+
scale_y_continuous(breaks= pretty_breaks(),expand=c(0,0),limits=c(0,specific_trait_mx+.075*specific_trait_mx),labels = function(x) format(x,width = 4))
A
if (count==0){B<-A+theme(strip.background = element_rect(fill = "white"),
strip.text.x = element_text(size = 14, colour = "black",face="bold"));
first<-B}
if (count==1){second<-A}
if (count==2){third<-A}
if (count==3){fourth<-A}
if (count==4){fifth<-A}
if (count==5){sixth<-A}
count<-count+1
}
a_all<-plot_grid(first,second,third,fourth,fifth,ncol=1) #ZER)_new_TRANS_NeSL-1_C no longer in here so don't need fifth
label<-expression(bold(-log["10"](p)))
a_all<- a_all + draw_label(label, x = .04, y = 0.5, hjust = .5, vjust = .5,
fontfamily = "", fontface = "bold", colour = "black", size = 14,
angle = 90, lineheight = 0.9, alpha = 1)
df <- data.frame(1,2)
blank_plot<-ggplot(df,aes(x=1,y=1)) + geom_point(color="white") + theme(axis.line=element_blank(),axis.text =element_blank(),axis.ticks =element_blank(),axis.title =element_blank(),panel.background = element_blank(),panel.grid = element_blank())
a_all<-plot_grid(a_all,blank_plot,ncol=1,rel_heights = c(1, .03))
a_all<- a_all + draw_label("Chromosome Position (Mb)", x = .5, y = 0.020, hjust = .5, vjust = .5,
fontfamily = "", fontface = "bold", colour = "black", size = 14,
angle = 0, lineheight = 0.9, alpha = 1)
setwd("/Users/kristen/Documents/transposon_figure_data/fig_for_e2")
ggsave(filename="five_trait_QTL.tiff",
dpi=300,
width=7.5,
height=12.5,
units="in")
fourth<-fourth+theme(strip.background = element_rect(fill = "white"),
plot.margin=unit(c(.05,.30,.1,.30), "cm"),
axis.title=element_text(size=16,colour="black",face="bold"),
strip.text.x = element_text(size = 14, colour = "black",face="bold"))+
labs(x="Chromosome Position (Mb)",y="",colour="black",size=16)
fourth<-plot_grid(fourth) + draw_label(label, x = .04, y = 0.5, hjust = .5, vjust = .5,
fontfamily = "", fontface = "bold", colour = "black", size = 16,
angle = 90, lineheight = 0.9, alpha = 1)
fourth
ggsave(fourth,filename="fourth.tiff",
dpi=300,
width=7.5,
height=2.5,
units="in")
#############################################################################
#############################################################################
#############################################################################
#1
library(ggplot2)
library(dplyr)
library(tidyr)
library(stringr)
library(cowplot)
library(grid)
setwd("/Users/kristen/Documents/transposon_figure_data/data")
summarydata <- read.table("T_kin_C_matrix_full.txt",header=TRUE)
#remove ZERO_new traits
summarydata<-subset(summarydata,!grepl('^ZERO_new', summarydata$trait))
summarydata<-subset(summarydata,!grepl('^coverage', summarydata$trait))
#clean trait names
summarydata$trait <- gsub("_C$" ,"",summarydata$trait)
summarydata$trait <- gsub("^ONE_new" ,"new",summarydata$trait)
#new column that specifies what caller was used
summarydata$method<- stringr::str_split_fixed(summarydata$trait, "_TRANS_",2)[,1]
#new column that specifies TE family
summarydata$transposon<- stringr::str_split_fixed(summarydata$trait, "_TRANS_",2)[,2]
summarydata<-filter(summarydata,transposon=="total") # this will get total ins,ref,abs calls, NOT total DNA, Retro, Unknonwn
unique(summarydata$transposon)
#names(summarydata)
summarydata<-gather(summarydata, "sample","value",2:(ncol(summarydata)-2))
summarydata<-rename(summarydata,total_tes=value)
#reformat the data
total_absence<-filter(summarydata,method=="absent")
total_reference<-filter(summarydata,method=="reference")
total_insertion<-filter(summarydata,method=="new")
#SCATTER
final_merge<- Reduce(function(x, y) merge(x, y, all=TRUE,by="sample"), list(total_absence, total_reference, total_insertion))
names(final_merge)<-c("sample", "trait.x", "method.x", "transposon.x", "total_absences", "trait.y", "method.y", "transposon.y", "total_references", "trait", "method", "transposon", "total_insertions")
#1 ABSENCE vs INSERTION
#spearman correlation
correlation<-cor.test(final_merge$total_absences, final_merge$total_insertions,method="spearman",exact=FALSE)
rho<-round(correlation$estimate,3)
max_insertions<-max(final_merge$total_insertions)
max_absences<-max(final_merge$total_absences)
la <- paste("italic(rho) == ", rho)
m1 <- ggplot(final_merge, aes(x=total_insertions, y=total_absences))
m1 <- m1 + geom_point(size=1.25) + xlim(0,max_insertions)+ ylim(0,max_insertions)+
geom_smooth(method="lm",se=FALSE,col="red")+
geom_abline(slope=1,linetype="dashed",colour="gray52")+
annotate("text", x=.2*max_insertions, y=.9*max_insertions,label=la,parse=TRUE, colour="red",size=4.5)+
theme(strip.text.x = element_text(size = 6, colour = "black"),
strip.background = element_blank(),
legend.position=c(.90,0.75),
legend.background = element_rect(fill=FALSE),
legend.text=element_text(size=16),
panel.background = element_rect(fill = "white"),
axis.ticks =element_line(colour = "black"),
axis.text.y = element_text(colour = "black",size=16),
axis.text.x = element_text(colour = "black",size=16),
axis.line.y = element_line(colour = "black"),
axis.line.x = element_line(colour = "black"),
axis.title=element_text(size=16,face="bold"))+
guides(fill=FALSE) +
labs(x = "Insertion Sites", y = "Absence Sites")
m1
setwd("/Users/kristen/Documents/transposon_figure_data/figures")
ggsave(filename="Absence_vs_Insertion.tiff",
dpi=300,
width=4,
height=4,
units="in")
#3 INSERTION vs REFERENCE
#spearman correlation
correlation<-cor.test(final_merge$total_insertions, final_merge$total_references,method="spearman",exact=FALSE)
rho<-round(correlation$estimate,3)
max_references<-max(final_merge$total_references)
max_insertions<-max(final_merge$total_insertions)
la <- paste("italic(rho) == ", rho)
max_references<-max(final_merge$total_references)
m3 <- ggplot(final_merge, aes(x=total_references, y=total_insertions))
m3 <- m3 + geom_point(size=1.25) + xlim(0,max_references)+ ylim(0,max_references)+
geom_smooth(method="lm",se=FALSE,col="red")+
geom_abline(slope=1,linetype="dashed",colour="gray52")+
annotate("text", x=.2*max_references, y=.9*max_references,label=la,parse=TRUE, colour="red",size=4.5)+
theme(strip.text.x = element_text(size = 16, colour = "black"),
strip.background = element_blank(),
legend.position=c(.90,0.75),
legend.background = element_rect(fill=FALSE),
legend.text=element_text(size=16),
panel.background = element_rect(fill = "white"),
axis.ticks =element_line(colour = "black"),
axis.text.y = element_text(colour = "black",size=16),
axis.text.x = element_text(colour = "black",size=16),
axis.line.y = element_line(colour = "black"),
axis.line.x = element_line(colour = "black"),
axis.title=element_text(size=16,face="bold"))+
guides(fill=FALSE) +
labs(x = "Reference Sites", y = "Insertion Sites")
ggsave(filename="Insertion_vs_Reference.tiff",
dpi=300,
width=4,
height=4,
units="in")
setwd("/Users/kristen/Documents/transposon_figure_data/fig_for_e2")
plot_grid(m1, m3,ncol=2,labels=c('A', 'B'))+ background_grid(major = "xy", minor = "none")
ggsave(filename="All_vs_All.tiff",
dpi=300,
width=8,
height=4,
units="in")
#TRANSPOSONS vs STRAINS
names(summarydata)
#INSERTIONS
insertions<-summarydata[summarydata$method=="new",]
insertions<-(insertions[ order(insertions$total_tes), ])
#plot(insertions$total_tes~insertions$sample)
#pdf(file = "insertions_per_strain.pdf")
m1 <- ggplot(insertions, aes(x=reorder(insertions$sample,insertions$total_tes), y=insertions$total_tes))
m1<- m1 + geom_point(size=.75) +aes(group=1)+
theme(axis.text.x = element_text(color="black",size=8,angle=90,hjust=1),
axis.text.y = element_text(color="black",size=16,face="bold"),
axis.title = element_text(color="black",size=16,face="bold"),
axis.line.y = element_line(colour = "black"),
axis.line.x = element_line(colour = "black"),
axis.ticks =element_line(colour = "black"))+
labs(x="", y="Number of Insertion Sites")
m1
ggsave(filename="Insertions_per_Strain.tiff",
dpi=300,
width=7.5,
height=10,
units="in")
#ABSENCES
absences<-summarydata[summarydata$method=="absent",]
absences<-(absences[ order(absences$total_tes), ])
#plot(absences$total_tes~absences$sample)
#pdf(file = "absences_per_strain.pdf")
m2 <- ggplot(absences, aes(x=reorder(absences$sample,absences$total_tes), y=absences$total_tes))
m2<- m2 + geom_point(size=.75) +aes(group=1)+
theme(axis.text.x = element_text(color="black",size=8,angle=90,hjust=1),
axis.text.y = element_text(color="black",size=16,face="bold"),
axis.title = element_text(color="black",size=16,face="bold"),
axis.line.y = element_line(colour = "black"),
axis.line.x = element_line(colour = "black"),
axis.ticks =element_line(colour = "black"))+
labs(x="", y="Number of Absence Sites")
m2
ggsave(filename="Absences_per_Strain.tiff",
dpi=300,
width=7.5,
height=10,
units="in")
plot_grid(m1, m2,ncol=1)
ggsave(filename="All_per_Strain.tiff",
dpi=300,
width=10.5,
height=7,
units="in")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.