blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b82f5cb510af8682299b1b19234bb1a8259437d
|
80d1f66c8183cd557d745fbbd674338c53c45ac2
|
/man/required_pkgs.step.Rd
|
664451e986ef53eb215a1576c00991ff6c5ec71a
|
[
"MIT"
] |
permissive
|
tidymodels/textrecipes
|
ece87dc5add9c0f0f120fddd80d5e9caadbd966c
|
436eb6e59415d214934a426167928d8719280b55
|
refs/heads/main
| 2023-08-20T05:16:01.508229
| 2023-08-17T21:41:30
| 2023-08-17T21:41:30
| 148,230,862
| 141
| 16
|
NOASSERTION
| 2023-08-17T21:41:32
| 2018-09-10T23:15:56
|
R
|
UTF-8
|
R
| false
| true
| 2,852
|
rd
|
required_pkgs.step.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_levels.R, R/clean_names.R,
% R/dummy_hash.R, R/lda.R, R/lemma.R, R/ngram.R, R/pos_filter.R,
% R/sequence_onehot.R, R/stem.R, R/stopwords.R, R/text_normalization.R,
% R/textfeature.R, R/texthash.R, R/tf.R, R/tfidf.R, R/tokenfilter.R,
% R/tokenize.R, R/tokenize_bpe.R, R/tokenize_sentencepiece.R,
% R/tokenize_wordpiece.R, R/tokenmerge.R, R/untokenize.R, R/word_embeddings.R
\name{required_pkgs.step_clean_levels}
\alias{required_pkgs.step_clean_levels}
\alias{required_pkgs.step_clean_names}
\alias{required_pkgs.step_dummy_hash}
\alias{required_pkgs.step_lda}
\alias{required_pkgs.step_lemma}
\alias{required_pkgs.step_ngram}
\alias{required_pkgs.step_pos_filter}
\alias{required_pkgs.step_sequence_onehot}
\alias{required_pkgs.step_stem}
\alias{required_pkgs.step_stopwords}
\alias{required_pkgs.step_text_normalization}
\alias{required_pkgs.step_textfeature}
\alias{required_pkgs.step_texthash}
\alias{required_pkgs.step_tf}
\alias{required_pkgs.step_tfidf}
\alias{required_pkgs.step_tokenfilter}
\alias{required_pkgs.step_tokenize}
\alias{required_pkgs.step_tokenize_bpe}
\alias{required_pkgs.step_tokenize_sentencepiece}
\alias{required_pkgs.step_tokenize_wordpiece}
\alias{required_pkgs.step_tokenmerge}
\alias{required_pkgs.step_untokenize}
\alias{required_pkgs.step_word_embeddings}
\title{S3 methods for tracking which additional packages are needed for steps.}
\usage{
\method{required_pkgs}{step_clean_levels}(x, ...)
\method{required_pkgs}{step_clean_names}(x, ...)
\method{required_pkgs}{step_dummy_hash}(x, ...)
\method{required_pkgs}{step_lda}(x, ...)
\method{required_pkgs}{step_lemma}(x, ...)
\method{required_pkgs}{step_ngram}(x, ...)
\method{required_pkgs}{step_pos_filter}(x, ...)
\method{required_pkgs}{step_sequence_onehot}(x, ...)
\method{required_pkgs}{step_stem}(x, ...)
\method{required_pkgs}{step_stopwords}(x, ...)
\method{required_pkgs}{step_text_normalization}(x, ...)
\method{required_pkgs}{step_textfeature}(x, ...)
\method{required_pkgs}{step_texthash}(x, ...)
\method{required_pkgs}{step_tf}(x, ...)
\method{required_pkgs}{step_tfidf}(x, ...)
\method{required_pkgs}{step_tokenfilter}(x, ...)
\method{required_pkgs}{step_tokenize}(x, ...)
\method{required_pkgs}{step_tokenize_bpe}(x, ...)
\method{required_pkgs}{step_tokenize_sentencepiece}(x, ...)
\method{required_pkgs}{step_tokenize_wordpiece}(x, ...)
\method{required_pkgs}{step_tokenmerge}(x, ...)
\method{required_pkgs}{step_untokenize}(x, ...)
\method{required_pkgs}{step_word_embeddings}(x, ...)
}
\arguments{
\item{x}{A recipe step}
}
\value{
A character vector
}
\description{
Recipe-adjacent packages always list themselves as a required package so that
the steps can function properly within parallel processing schemes.
}
\keyword{internal}
|
eae6a1e9e6bd325354460e19c5bd2428ccdb82e5
|
97032e64d741ae596886b73f33678f2fe447d05a
|
/inst/doc/Analysis_pipelines_for_working_with_sparkR.R
|
2e8efc53c84e1e3e9cda6953782d8d1c8e761615
|
[] |
no_license
|
cran/analysisPipelines
|
1c3f75776d79169be3df4e20f3c12fcf73cde70f
|
e8b5e3257583489b1e34501b02f055ce7c4fb355
|
refs/heads/master
| 2021-07-12T05:00:09.754080
| 2020-06-12T07:00:02
| 2020-06-12T07:00:02
| 164,692,478
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,911
|
r
|
Analysis_pipelines_for_working_with_sparkR.R
|
## ----eval = F------------------------------------------------------------
# devtools::install_github('apache/spark@v2.x.x', subdir='R/pkg')
## ------------------------------------------------------------------------
knitr::opts_chunk$set(
eval = FALSE
)
## ---- include=FALSE------------------------------------------------------
#
# library(ggplot2)
# library(analysisPipelines)
# library(SparkR)
#
# ## Define these variables as per the configuration of your machine. This is just an example.
# sparkHome <- "/Users/naren/softwares/spark-2.3.1-bin-hadoop2.7/"
# sparkMaster <- "local[1]"
# sparkPackages <- c("org.apache.spark:spark-sql-kafka-0-10_2.11:2.3.1")
# # Set spark home variable if not present
# if(Sys.getenv("SPARK_HOME") == "") {
# Sys.setenv(SPARK_HOME = sparkHome)
# }
## ------------------------------------------------------------------------
# sparkRSessionCreateIfNotPresent(master = sparkMaster, sparkPackages = sparkPackages)
## ------------------------------------------------------------------------
# inputDataset <- iris
#
# # Replacing '.' in column names with '_' as SparkR is not able to deal with '.' in column names
# colnames(inputDataset) <- gsub(".", "_", colnames(inputDataset), fixed = T)
#
# pipelineObj <- AnalysisPipeline(input = iris)
## ------------------------------------------------------------------------
# meanByGroup <- function(inputDataset, groupByColumn, colToSummarize) {
# groupSummary <- SparkR::summarize( SparkR::groupBy(inputDataset,inputDataset[[groupByColumn]]),
# avg = SparkR::mean(inputDataset[[colToSummarize]]))
# return(groupSummary)
# }
## ------------------------------------------------------------------------
# # Register user-defined functions
# registerFunction("meanByGroup", "Mean By Group",
# engine = "spark")
#
# # List all registered functions
# getRegistry()
#
# # Define pipeline from list of registered functions
# pipelineObj %>% meanByGroup_spark(groupByColumn = "Species", colToSummarize = "Sepal_Length", storeOutput = T) %>%
# meanByGroup_spark(groupByColumn = "Species", colToSummarize = "Petal_Length", storeOutput = T) -> pipelineObj
#
# pipelineObj %>>% getPipeline
# pipelineObj %>>% visualizePipeline
## ----fig.width=6, fig.height=3-------------------------------------------
# pipelineObj %>% generateOutput -> pipelineObj
#
# sepalLengthBySpecies <- pipelineObj %>>% getOutputById(1)
# sepalLengthBySpeciesDf <- as.data.frame(sepalLengthBySpecies)
# DT::datatable(head(sepalLengthBySpeciesDf),options = list(scrollX = T, scrollY = T))
#
# petalLengthBySpecies <- pipelineObj %>>% getOutputById(2)
# petalLengthBySpeciesDf <- as.data.frame(petalLengthBySpecies)
# DT::datatable(head(petalLengthBySpeciesDf),options = list(scrollX = T, scrollY = T))
|
49bdccfb51ff08438af21ba4dd894a8a2bc7f2cd
|
6ae2f0e4cf4b0faaad5fea6c14e13a7875e1630f
|
/FinalRProject.R
|
d09a03e02482a817a87c719faa344980f9a6ba4d
|
[] |
no_license
|
swiesenfeld0/OpenDataInR
|
9d8bc7c361a1d3a353ef76f74b1b0bbadd1b9711
|
a7b5bc77620ba26a15c3d7a53b78142effdfa3e2
|
refs/heads/master
| 2020-07-26T12:52:22.865159
| 2019-09-15T20:06:20
| 2019-09-15T20:06:20
| 208,650,360
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,143
|
r
|
FinalRProject.R
|
# Open Data with R - Final Project
## Sophia Wiesenfeld (swiesenfeld0@gatech.edu)
## September 20th, 2019
#Packages Used: installing and loading
#install.packages("corrplot")
#install.packages("tidyverse")
#install.packages("skimr")
#install.packages("Hmisc")
#install.packages("manifestoR")
#install.packages("ggplot2")
#install.packages("predict3d")
library(tidyverse)
library(skimr)
library(Hmisc)
library(ggplot2)
library(manifestoR)
library(corrplot)
library(predict3d)
#Accessing and Loading the Manifesto Project Database-- account needed for API key
#save your generated API key in a txt file named manifesto_apikey.txt
#Codebook found at https://manifesto-project.wzb.eu/down/data/2019a/codebooks/codebook_MPDataset_MPDS2019a.pdf
mp_setapikey("manifesto_apikey.txt")
fullDataset <- mp_maindataset()
###################################
#Data Transformations - for ease of use
#filter by USA manifestos
manifestoUSA <- filter(fullDataset, country=="61")
manifestoUSA <- manifestoUSA %>% mutate(fraKai = franzmann_kaiser(manifestoUSA))
#Removing the two outlier parties that exist in the dataset, only Republican and Democrat manifestos remain
manifestoUSA <- filter(manifestoUSA, parfam != 20)
manifestoUSA <- filter(manifestoUSA, parfam != 90)
#Making the dataset numeric and simplifying some columns
#date is in the format YYYYMM
#parfam - "Tentative grouping of political parties and alliances into party families"
#per101-per706 are mentions of political stances
#100s - External Relations (ie Internationalism)
#200s - Freedom and Democracy
#300s - Political System
#400s - Economy
#500s - Welfare and Quality of Life (ie Education Expansion)
#600s - Fabric of Society (ie Positive/Negative Multiculturalism)
#700s - Social Groups (ie Labor Groups, Minority Groups)
#RILE is the most common method of calculating a manifesto's left-right position based on selected per-values by Laver and Budge
mpNumeric <- select(manifestoUSA,date,parfam,
per101,per102,per103,per104,per105,per106,per107,per108,per109,per110,
per201,per202,per203,per204,
per301,per302,per303,per304,per305,
per401,per402,per403,per404,per405,per406,per407,per408,per409,per410,per411,per412,per413,per414,per415,per416,
per501,per502,per503,per504,per505,per506,per507,
per601,per602,per603,per604,per605,per606,per607,per608,
per701,per702,per703,per704,per705,per706,
rile,planeco,markeco,welfare,intpeace, fraKai)
#Creating a value 'politParty' of 0 or 1 for the Democratic and Republican Parties, respectively.
#For future linear regression model
mpNumericRepublicanDemocratOnly <- mpNumericRepublicanDemocratOnly %>% mutate(politParty = parfam/30 - 1)
############################
#Exploratory Data Analysis
#Investigating the hypothesis that USA manifestos reflect the idea that the US emerged as a global super power after WWII, was that belief reflected at that time as well as now?
#Seeking correlation between potential per-values that might reflect this
#per107 - Internationalism: Positive. Need for international co-operation, including co-operation with specific countries other than those coded in 101.
#per202 - Democracy. Favorable mentions of democracy as the "only game in town".
#per305 - Political Authority. References the manifesto party's competence to govern and/or other party's lack of such competence. Also includes favorable mentions of the desirability of a strong and/or stable government in general.
#per601 - National Way of Life: Positive. Favorable mentions of the manifesto country's nation, history, and general appeals.
explDataAnalysis <- select(mpNumericRepublicanDemocratOnly, date, politParty, per107, per202, per305, per601)
#Showing general correlations
corrplot(cor(explDataAnalysis))
#Shows spike in political authority ideology for both political parties after WWII
ggplot(explDataAnalysis, aes(x=date, y=per305, color=politParty)) + geom_point()
#and that spike is not driven by or limited to either Party
ggplot(explDataAnalysis, aes(x=date, y=per305, color=politParty)) + geom_point() +geom_line() + facet_grid(. ~ politParty)
#############################
#Data Models
#Comparing left/right scaling methods for determining political party stance by per-values alone
# manifestoR package very necessary
#Variation in RILE score for Democratic Party manifestos and Republican manifestos(Laver & Budge 1992)
ggplot(mpNumericRepublicanDemocratOnly, aes(x=date, y=rile, color=politParty)) + geom_point()
#Variations in fraKai score, another L/R scaling method (Franzmann & Kaiser 2006)
ggplot(mpNumericRepublicanDemocratOnly, aes(x=date, y=fraKai, color=politParty)) + geom_point()
#vanilla
mpNumericRepublicanDemocratOnly <- mpNumericRepublicanDemocratOnly %>% mutate(vanilla = vanilla(manifestoUSA))
ggplot(mpNumericRepublicanDemocratOnly, aes(x=date, y=vanilla, color=politParty)) + geom_point()
#creating the models:
scalingModelRile <- glm(politParty ~ rile,
family=binomial(link=logit), data=mpNumericRepublicanDemocratOnly)
summary(scalingModelRile)
scalingModelfraKai <- glm(politParty ~ fraKai + date,
family=binomial(link=logit), data=mpNumericRepublicanDemocratOnly)
summary(scalingModelfraKai)
#it seems like the fraKai model doesn't work, potentially due to the NA values created by the fraKai method
scalingModelVanilla <- glm(politParty ~ vanilla,
family=binomial(link=logit), data=mpNumericRepublicanDemocratOnly)
summary(scalingModelVanilla)
#visualizing the differences
ggpredict(scalingModelRile,se=TRUE,interactive=TRUE, show.text=FALSE)
ggpredict(scalingModelVanilla,se=TRUE,interactive=TRUE, show.text=FALSE)
#It seems like both RILE and vanilla scores can predict a political party with a high level of accuracy
|
1313d8e20c732dfab41ea76aaed0e3611fb5e569
|
b184c4c14b8710675ecbeffa432b53ad406850f0
|
/3rd_homework(03-2).R
|
87378b26b825c2fe975d41ad4bc7ff144e06117b
|
[] |
no_license
|
penpen-dongE/R-test
|
070236010352870de92e6961f5df9c118a5d8d69
|
12f6e9526d14c8365afeb5301dff688704cbc1ec
|
refs/heads/master
| 2020-07-16T16:09:29.255144
| 2019-10-05T05:44:31
| 2019-10-05T05:44:31
| 205,821,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,769
|
r
|
3rd_homework(03-2).R
|
setwd("C:/education/R/workspace")
customer <- read.csv("./data/loarn/customer.csv",
sep="|",header = TRUE,
stringsAsFactors=FALSE)
View(customer)
library(lubridate)
end <- parse_date_time(2015, "%Y")
end
start <- parse_date_time(customer$AGE, "%Y")
start
diff_in_yr=(end-start)/365
diff_in_yr
customer$AGE <- floor(diff_in_yr)
View(customer)
findInterval(25, c(10,29,39,49,59,90))
findInterval(45,c(10,29,39,49,59,90))
customer$AGE <- findInterval(customer$AGE,c(10,29,39,49,59,90))
View(customer)
customer$AGE <- factor(customer$AGE, labels=c("20s&below",
"30s","40s","50s",
"60s&up"))
View(customer)
loarn <- read.csv("./data/loarn/loarn.csv",
sep="|",header = TRUE, stringsAsFactors = FALSE)
View(loarn)
start <- parse_date_time("199312","%Y%m")
start_ln <- parse_date_time(loarn$LN_FROM,"%Y%m")
start_ln
end_ln <- parse_date_time(loarn$YM,"%Y%m")
end_ln
diff_in_month=as.double(difftime(end_ln,start_ln,
units="days")*12/365)
loarn$DUR_LN <- floor(diff_in_month)
View(loarn)
library(dplyr)
loarn_1512 <- loarn %>% dplyr::filter(YM == 201512)
View(loarn_1512)
overdue <- read.csv("./data/loarn/overdue.csv",
sep="|", header = TRUE)
View(overdue)
start_dlq <- parse_date_time(overdue$DLQ_FROM,"%Y%m")
end_dlq <- parse_date_time(as.character(overdue$YM),"%Y%m")
diff_in_month=as.double(difftime(end_dlq,start_dlq,units = "days")*12/365)
overdue$DUR_DLQ <- floor(diff_in_month)
View(overdue)
overdue_1512 <- overdue %>% dplyr::filter(YM==201512)
View(overdue_1512)
left_join(loarn_1512,customer,by=c("CustomerId"))
df<-left_join(loarn_1512,customer,by=c("CustomerId"))
View(df)
JOIN_KEY <- df %>% group_by(CustomerId) %>%
summarise(NUM_LN=n())
x <- right_join(customer,JOIN_KEY,by="CustomerId")
View(x)
LN_AMT <- df %>%
group_by(CustomerId) %>%
summarise(AMT_LN=sum(LN_AMT))
x <- right_join(x, LN_AMT, by="CustomerId")
View(x)
NUM_NBLN <-
df %>% dplyr::filter(SCTR_CD!=1) %>%
group_by(CustomerId) %>%
dplyr::summarise(NUM_NBLN=n())
x<-right_join(x, NUM_NBLN, by="CustomerId")
View(x)
NBLN_AMT <- df %>%
dplyr::filter(SCTR_CD != 1) %>%
group_by(CustomerId) %>%
dplyr::summarise(NBLN_AMT=sum(LN_AMT))
x<-right_join(x,NBLN_AMT,by="CustomerId")
View(x)
library(plyr)
df1 <- join(loarn_1512, overdue_1512,by=c("CustomerId", "COM_KEY"))
View(df1)
df1$DLQ_1512 <- ifelse(is.na(df1$DLQ_CD_1),0,1)
View(df1)
NUM_DLQ_1512 <- overdue_1512 %>%
group_by(CustomerId) %>%
dplyr::summarise(NUM_DLQ_1512=n())
y <- left_join(x, NUM_DLQ_1512, by="CustomerId")
y$NUM_DLQ_1512[is.na(y$NUM_DLQ_1512)]<-0
View(y)
overdue_2016 <-
overdue %>% dplyr::filter(YM>201512& YM<=201612)
NUM_DLQ_2016 <- overdue_2016 %>%
group_by(CustomerId) %>%
dplyr::summarise(NUM_DLQ_2016=n())
y <- left_join(y, NUM_DLQ_2016, by="CustomerId")
y$NUM_DLQ_2016[is.na(y$NUM_DLQ_2016)]<-0
DLQ_AMT_1512 <- overdue_1512 %>%
group_by(CustomerId) %>%
dplyr::summarise(DLQ_AMT_1512=sum(DLQ_AMT))
y <- join(y, DLQ_AMT_1512, by="CustomerId")
y$DLQ_AMT_1512[is.na(y$DLQ_AMT_1512)]<-0
View(y)
y$DLQ_AMT_1512 <- ifelse(y$NUM_DLQ_1512>0,1,0)
View(y)
library(data.table)
NUM_COM <- data.frame(setDT(df)[, .(NUM_COM=uniqueN(COM_KEY)),
.(CustomerId)])
View(NUM_COM)
z <- left_join(y, NUM_COM, by="CustomerId")
View(z)
z$NUM_DLQ_2016 <- ifelse(z$NUM_DLQ_2016>0,1,0)
input <- z
str(input)
input$NUM_DLQ_2016 <- factor(input$NUM_DLQ_2016)
input$NUM_DLQ_1512 <- factor(input$NUM_DLQ_1512)
input$GENDER <- factor(input$GENDER)
str(input)
input <- input %>%
select(-CustomerId)
View(input)
|
7c61ad32b8b765eaf86770da5d70d0bea26b17e4
|
d7bcf93820a7d4666886a74d425bc771a55a0ddb
|
/R/svg.R
|
fddaf8277cc261b25f717ea3eca8a77a1d9e630a
|
[] |
no_license
|
adamwk97/ggiraph
|
f74f32421c1f624081c13078bb99a3e952f13ebf
|
0475eefe51762c703f4e54ef625610d5fad2553a
|
refs/heads/master
| 2021-03-10T20:30:47.906710
| 2019-12-15T11:30:59
| 2019-12-15T11:30:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,298
|
r
|
svg.R
|
#' Reads the interactive attributes from xml and assigns them to their svg elements.
#' @noRd
#' @importFrom purrr walk
#' @importFrom xml2 xml_find_first xml_add_child xml_cdata
set_svg_attributes <- function(data, canvas_id) {
comments <- xml_find_all(data, "//*[local-name() = 'comment']")
if (length(comments) == 0) {
return()
}
idprefix <- paste0(canvas_id, "_el_")
env_data_hover <- new.env(parent = emptyenv())
env_key_hover <- new.env(parent = emptyenv())
env_theme_hover <- new.env(parent = emptyenv())
env_data_selected <- new.env(parent = emptyenv())
env_key_selected <- new.env(parent = emptyenv())
env_theme_selected <- new.env(parent = emptyenv())
errored <- 0
walk(comments, function(comment) {
targetIndex <- xml_attr(comment, "target")
attrName <- xml_attr(comment, "attr")
attrValue <- xml_text(comment)
target <- xml_find_first(data,
paste0("//*[@id='", idprefix, targetIndex, "']"))
if (!inherits(target, "xml_missing")) {
if (attrName == "hover_css") {
# collect unique combinations of hover_css per data/key/theme id
collect_css(target,
attrValue,
env_data_hover,
env_key_hover,
env_theme_hover)
} else if (attrName == "selected_css") {
# collect unique combinations of selected_css per data/key/theme id
collect_css(target,
attrValue,
env_data_selected,
env_key_selected,
env_theme_selected)
} else {
# set the attribute directly
xml_attr(target, attrName) <- attrValue
}
} else {
errored <<- errored + 1
}
})
# now place the individual styles
css <- make_css(env_data_hover,
"data-id",
"hover_",
canvas_id)
css <- c(css,
make_css(env_key_hover,
"key-id",
"hover_key_",
canvas_id))
css <- c(css,
make_css(env_theme_hover,
"theme-id",
"hover_theme_",
canvas_id))
css <- c(css,
make_css(env_data_selected,
"data-id",
"selected_",
canvas_id))
css <- c(css,
make_css(env_key_selected,
"key-id",
"selected_key",
canvas_id))
css <- c(css,
make_css(env_theme_selected,
"theme-id",
"selected_theme",
canvas_id))
if (length(css) > 0) {
style_tag <-
xml_add_child(data, "style", type = "text/css", .where = 0)
xml_add_child(style_tag, xml_cdata(paste(css, collapse = '\n')))
}
# clear the comments
xml_remove(comments)
if (errored > 0) {
stop("Could not set svg attributes for some elements (",
errored,
" cases)")
}
}
collect_css <- function(target,
attrValue,
env_data,
env_key,
env_theme) {
data_id <- xml_attr(target, "data-id")
key_id <- xml_attr(target, "key-id")
theme_id <- xml_attr(target, "theme-id")
if (!is.null(data_id) && !is.na(data_id)) {
env_data[[data_id]] <- attrValue
} else if (!is.null(key_id) && !is.na(key_id)) {
env_key[[key_id]] <- attrValue
} else if (!is.null(theme_id) && !is.na(theme_id)) {
env_theme[[theme_id]] <- attrValue
}
}
make_css <- function(envir, data_attr, cls_prefix, canvas_id) {
lapply(ls(envir, all.names = TRUE, sorted = FALSE), function(x) {
check_css(
css = get(x, envir = envir),
default = "",
cls_prefix = cls_prefix,
canvas_id = canvas_id,
filter = paste0("[", data_attr, " = \"", x , "\"]")
)
})
}
#' CSS creation helper
#'
#' It allows specifying individual styles for various SVG elements.
#'
#' @param css The generic css style
#' @param text Override style for text elements (svg:text)
#' @param point Override style for point elements (svg:circle)
#' @param line Override style for line elements (svg:line, svg:polyline)
#' @param area Override style for area elements (svg:rect, svg:polygon, svg:path)
#' @param image Override style for image elements (svg:image)
#'
#' @return css as scalar character
#' @examples
#' library(ggiraph)
#'
#' girafe_css(
#' css = "fill:orange;stroke:gray;",
#' text = "stroke:none; font-size: larger",
#' line = "fill:none",
#' area = "stroke-width:3px",
#' point = "stroke-width:3px",
#' image = "outline:2px red"
#' )
#' @export
girafe_css <- function(css,
text = NULL,
point = NULL,
line = NULL,
area = NULL,
image = NULL) {
css <- paste("/*GIRAFE CSS*/ ._CLASSNAME_ {", css, "}\n")
if (!is.null(text))
css <- paste(css, paste("text._CLASSNAME_ {", text, "}\n"))
if (!is.null(point))
css <- paste(css, paste("circle._CLASSNAME_ {", point, "}\n"))
if (!is.null(line))
css <- paste(css,
paste("line._CLASSNAME_, polyline._CLASSNAME_ {", line, "}\n"))
if (!is.null(area))
css <- paste(css,
paste(
"rect._CLASSNAME_, polygon._CLASSNAME_, path._CLASSNAME_ {",
area,
"}\n"
))
if (!is.null(image))
css <- paste(css, paste("image._CLASSNAME_ {", image, "}\n"))
return(css)
}
#' Helper to check css argument, given in other functions
#' @noRd
#' @importFrom rlang is_scalar_character
check_css <- function(css,
default,
cls_prefix,
name = cls_prefix,
canvas_id = "SVGID_",
filter = NULL) {
if (is.null(css)) {
css <- default
} else if (!is_scalar_character(css)) {
stop(name, ": css must be a scalar character", call. = FALSE)
}
pattern = "\\/\\*GIRAFE CSS\\*\\/"
if (!grepl(pattern, css)) {
css <- girafe_css(css)
}
css <- gsub("_CLASSNAME_", paste0(cls_prefix, canvas_id, filter), css)
css <- gsub(pattern, "", css)
css
}
|
20c88ab8bc082e8c959bf2356b8dc6044871f904
|
c2a66d7be82f722cf15c3cfa4c116d7ef9fa912d
|
/scripts/pls_mk_spm_betas_X_design.R
|
c04021d915c0413ac55aa71e75a30a6a86825f14
|
[] |
no_license
|
jennyrieck/NARPS
|
b6ebbfd54b9e5cd50c086233fe7c340ddb20cb6a
|
14c514114ad30bd42b17a48c7b52c9398f204cde
|
refs/heads/master
| 2021-07-07T19:45:17.137827
| 2020-12-28T20:37:49
| 2020-12-28T20:37:49
| 218,890,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,512
|
r
|
pls_mk_spm_betas_X_design.R
|
## various changes made which need to be undone...
rm(list=ls())
library(oro.nifti)
library(TExPosition)
library(neuroim)
library(ours)
use.parametric <- T
pls.dir <- '../../pls/'
mask <- loadVolume('./masks/HarvardOxford-cort_subcort_cereb_binary_mask-thr0_2mm.nii')
mask.ind <- which(mask@.Data==1)
all.subs <- read.delim('../../event_tsvs_Nov16/participants.tsv', sep='\t',stringsAsFactors = F)
first.level.dir<-'parametric_new_preproc'
condition.labels<-c('gain_increase', 'loss_increase')
n.cond<-length(condition.labels)
nii.dir<-paste0(pls.dir,'spm_con_first_level/',first.level.dir)
spm.betas<-matrix(0,dim(all.subs)[1]*n.cond,length(mask.ind))
aggregate.design <- matrix(NA,dim(all.subs)[1]*n.cond, 3)
colnames(aggregate.design) <- c("SUBJECT","GROUP","CONDITION")
for(s in 1:dim(all.subs)[1]){
for(c in 1:n.cond){
this.con <- loadVector(paste0(nii.dir,'/',all.subs$group[s],'_',all.subs$participant_id[s],'_con_',condition.labels[c],'.nii'),mask=mask)
this.row <- (s-1)*n.cond + c
spm.betas[this.row,] <- this.con@data
aggregate.design[this.row,"SUBJECT"] <- all.subs$participant_id[s]
aggregate.design[this.row,"GROUP"] <- all.subs$group[s]
aggregate.design[this.row,"CONDITION"] <- condition.labels[c]
}
print(s)
}
rownames(aggregate.design) <- aggregate.design[,"SUBJECT"]
save(aggregate.design,file='../../rdata/aggregate.design_2mm.rda')
save(spm.betas,file='../../rdata/spm.betas_2mm.rda')
|
bc2b708287b9c36d201605353f0065c834ea23fb
|
dbc1e9de2d73e674d7345bf863360eba20a2f9c1
|
/rscript/4_va_mustela_lem_fullmodel_withindicators.R
|
c8a10d0148d295d25e5e832dfc7d6aed3c9378e3
|
[] |
no_license
|
efkleiven/MustelidsAndRodents-
|
71b245fc1e44c38b462e0784d1b95fd970d085a3
|
822e5b4229be770ab4dc95438086bb0f2a07562c
|
refs/heads/main
| 2023-06-16T21:08:22.643348
| 2021-06-25T09:00:53
| 2021-06-25T09:00:53
| 351,065,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,283
|
r
|
4_va_mustela_lem_fullmodel_withindicators.R
|
###################################################################################################################
## A dynamic occupancy model for interacting species with two spatial scales model analyzing data from ##
## a long-term monitoring program of small mammals on the arctic tundra ##
## by Eivind Flittie Kleiven and Frederic Barraquand ##
## ##
###################################################################################################################
rm(list=ls())
# Call jags(and other packages)
library(jagsUI)
require(rjags)
## import data
setwd("./data") # set wd to where the data is stored
load("occm_var_lem_rmNA.rda")
#load("case_study_data.RData")
yb <-occm_va_lem # change name of imported object to fit with the rest of the code
dim(yb) # check that dimensions are ok
## import hab cov
load("hab.rda")
## import seasonal cov
load("season_cov.rda")
load("season_cov_block.rda")
seas <- season_cov
seas_block <- season_cov_block
#
seas[seas==1]<-0
seas[seas==2]<-1
seas_block[seas_block==1]<-0
seas_block[seas_block==2]<-1
# give data
data <-list(nseason = dim(yb)[3], nblock = dim(yb)[2], nsite = dim(yb)[1], nsurvey = dim(yb)[4],
nout=4, y = yb, hab=hab, seas=seas, seas_block=seas_block)
# naming some parameters for loops further down in this script
nseason = dim(yb)[3]; nblock = dim(yb)[2]; nsite = dim(yb)[1]; nsurvey = dim(yb)[4]
# Initial values for state
sp_inits <- apply(yb,c(1,2,3),max)
# loop to make cases where both state 2 and 3 is observed within the same primary occasion have initial value 4
dataL <- array(0,dim=c(nsite,nblock,nseason))
for(j in 1:nsite){
for(b in 1:nblock){
for(i in 1:nseason){
if (is.element(0, match(c(2,3),yb[j,b,i,], nomatch = FALSE, incomparables = FALSE)))
dataL[j,b,i] <- "FALSE"
else
dataL[j,b,i] <- "TRUE"
}}}
for(j in 1:nsite){
for(b in 1:nblock){
for(i in 1:nseason){
if(dataL[j,b,i]==TRUE){
sp_inits[j,b,i] <- 4}
}}}
# replace NA in initial values with the highest state
#sp_inits[is.na(sp_inits)] <- 4
# give initial values
inits=function(){list(
z = sp_inits, alphaA0=runif(1,0.1,0.9), alphaB0=runif(1,0.1,0.9),
beta0_gamA=runif(1,0.1,0.9), beta0_gamB=runif(1,0.1,0.9), beta0_gamAB=runif(1,0.1,0.9), beta0_gamBA=runif(1,0.1,0.9),
beta0_epsA=runif(1,0.1,0.9), beta0_epsB=runif(1,0.1,0.9), beta0_epsAB=runif(1,0.1,0.9), beta0_epsBA=runif(1,0.1,0.9),
beta0_GamA=runif(1,0.1,0.9), beta0_GamB=runif(1,0.1,0.9), beta0_GamAB=runif(1,0.1,0.9), beta0_GamBA=runif(1,0.1,0.9),
beta0_EpsA=runif(1,0.1,0.9), beta0_EpsB=runif(1,0.1,0.9), beta0_EpsAB=runif(1,0.1,0.9), beta0_EpsBA=runif(1,0.1,0.9),
beta=runif(28, 0.1, 0.9), ind = rep(0,14)
)}
# Parameters monitored
params <- c("gamA","gamB","gamAB","gamBA","epsA","epsB","epsAB","epsBA","psi",
"GamA","GamB","GamAB","GamBA","EpsA","EpsB","EpsAB","EpsBA", "pA","pB","z","x",
"alphaA0","alphaB0",
"beta0_gamA", "beta0_gamAB", "beta0_gamB", "beta0_gamBA",
"beta0_epsA", "beta0_epsAB", "beta0_epsB", "beta0_epsBA",
"beta0_GamA", "beta0_GamAB", "beta0_GamB", "beta0_GamBA",
"beta0_EpsA", "beta0_EpsAB", "beta0_EpsB", "beta0_EpsBA",
"beta", "ind", "pmdl" )
# MCMC settings
#ni <- 10 ; nt <- 1 ; nb <- 0 ; nc <- 4 ; na <- 0
# run model in jags
setwd("../")
#va_mustela_lemming_fullmod_withindicators <- jags(data, inits=inits, params, "fullmod_withindicators.txt", n.chains = nc,
# n.thin = nt, n.iter = ni, n.burnin = nb, n.adapt=na, parallel = T)
model <- jags.model("fullmod_withindicators.txt",
data = data,
inits = inits)
update(model, n.iter = 10)
output <- coda.samples(model = model,
variable.names = params,
n.iter = 10,
thin = 1,
n.adapt=5)
# Save model
setwd("./model_output")
save(va_mustela_lemming_habseas, file="va_mustela_lemming_fullmod_withindicators.rda")
# extra stuff
output <- as.mcmc(output)
print(summary(output))
ind <- function(p){
if(p == 0) {return(t <- 0)}
else if(p == 1) {return(t <- rbind(0, 1))}
else if(p == 2) {return(t <- rbind(c(0, 0), c(1, 0), c(0, 1), c(1, 1)))}
else {
t <- rbind(cbind(ind(p - 1), rep(0, 2^(p - 1))),
cbind(ind(p - 1), rep(1, 2^(p - 1))))
return(t)
}
}
# cree toutes les combi possibles si p=5 covariables
# la premiere ligne de mat.modele correspond au modele avec intercept seulement
# la derniere ligne de mat.modele correspond au modele avec toutes les covariables
mat.modeles <- ind(14)
mat.modeles
nrow(mat.modeles)
output[1,]
grep("pmdl", names(output[1,]))
pmdl <- output[,grep("pmdl", names(output[1,]))]
pmp <- apply(pmdl,2,mean) # probabilite a posteriori des modeles
# classe les proba a posteriori des modeles de la plus grande a la plus petite
ii <- order(pmp, decreasing = T)
# affiche les modeles
res <- cbind(mat.modeles[ii,], pmp[ii])
res
#~ End of script
|
aaa7d3f9089470cfe6aad9eb22ac9573109a2501
|
bc47e3e767aa32732018de0241c45611fbab9f7d
|
/man/LightResponseCurveFitter_optimLRCBounds.Rd
|
cbe9409188025da9f2dd9759df7d80f4c591ddf6
|
[] |
no_license
|
sundawei/REddyProc
|
7fb6eb6a7c91553526cc2c387dd0481f341509c7
|
0068e19fe4c543a902a45f3dc4050c8738e57e14
|
refs/heads/master
| 2022-04-08T04:45:24.056822
| 2020-02-07T12:03:08
| 2020-02-07T12:03:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,051
|
rd
|
LightResponseCurveFitter_optimLRCBounds.Rd
|
\name{LightResponseCurveFitter_optimLRCBounds}
\alias{LightResponseCurveFitter_optimLRCBounds}
\title{LightResponseCurveFitter optimLRCBounds}
\description{Optimize parameters with refitting with some fixed parameters if outside bounds}
\usage{LightResponseCurveFitter_optimLRCBounds(theta0,
parameterPrior, ..., lastGoodParameters,
ctrl)}
\arguments{
\item{theta0}{initial parameter estimate}
\item{parameterPrior}{prior estimate of model parameters}
\item{\dots}{further parameters to \code{.optimLRC}, such as \code{dsDay}}
\item{lastGoodParameters}{parameters vector of last successful fit}
\item{ctrl}{list of further controls, such as
\code{isNeglectVPDEffect = TRUE}}
}
\details{If parameters alpha or k are outside bounds (Table A1 in Lasslop 2010),
refit with some parameters fixed
to values from fit of previous window.
No parameters are reported if alpha<0 or RRef < 0 or beta0 < 0
or beta0 > 250
Not parameters are reported if the data did not contain records that
are near light saturation.
This is checked by comparing the prediction at highest PAR with the
beta parameter}
\value{list result of optimization as of
\code{\link{LightResponseCurveFitter_optimLRCOnAdjustedPrior}} with entries
\item{theta}{ numeric parameter vector that includes the fixed components}
\item{iOpt}{ integer vector of indices of the vector that have been optimized}
\item{convergence}{ scalar integer indicating bad conditions on fitting
(see \code{\link{LightResponseCurveFitter_fitLRC}}) }}
\author{TW, MM
Department for Biogeochemical Integration at MPI-BGC, Jena, Germany <REddyProc-help@bgc-jena.mpg.de> [cph], Thomas Wutzler <twutz@bgc-jena.mpg.de> [aut, cre], Markus Reichstein <mreichstein@bgc-jena.mpg.de> [aut], Antje Maria Moffat <antje.moffat@bgc.mpg.de> [aut, trl], Olaf Menzer <omenzer@bgc-jena.mpg.de> [ctb], Mirco Migliavacca <mmiglia@bgc-jena.mpg.de> [aut], Kerstin Sickel <ksickel@bgc-jena.mpg.de> [ctb, trl], Ladislav Šigut <sigut.l@czechglobe.cz> [ctb]}
\seealso{\code{\link{LightResponseCurveFitter_fitLRC}}}
|
62aad19bba564fd660ee2cdfeed5bec53b29ba91
|
4c675085168d08e59a36e175cd56ecf4aefeebfb
|
/SM-(P-E) feedbacks/era5/ERA5_sm_w.R
|
46344f0ce9cf541b4b2eb32536387c5ff8fc6196
|
[] |
no_license
|
shazhou09/dryland_water_availability
|
3574677ccbddd77d7cc2a7577d824b966e27acf3
|
81124d54bc979fa556daf3deaa996a3ebe0c7e0b
|
refs/heads/master
| 2022-12-20T11:34:01.888790
| 2020-08-27T18:51:44
| 2020-08-27T18:51:44
| 287,588,206
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,010
|
r
|
ERA5_sm_w.R
|
library(ncdf4)
library(parallel)
library(plsRglm)
library(plsdof)
library(boot)
pls_reg<-function(x){
if (sum(is.na(x))>400){
return(rep(NA,each=4))
}
tt<-length(x)/2
pr<-x[1:tt]
sm<-x[(1+tt):(tt*2)]
if (max(abs(sm))<1e-3){
return(rep(NA,each=4))
}else{
tryCatch({
dim(pr)<-c(12,tt/12)
pr_mm<-apply(pr,1,mean,na.rm=T)
pr01<-rep(pr_mm,times=tt/12)
pr1<-pr-pr01
dim(pr1)<-tt
dim(sm)<-c(12,tt/12)
sm_mm<-apply(sm,1,mean,na.rm=T)
sm01<-rep(sm_mm,times=tt/12)
sm1<-sm-sm01
dim(sm1)<-tt
tm<-1:tt
data<-cbind(pr1,tm)
data<-na.omit(data)
pr02<-pr1-tm*lm(data[,1]~data[,2])$coefficients[2]
data<-cbind(sm1,tm)
data<-na.omit(data)
sm02<-sm1-tm*lm(data[,1]~data[,2])$coefficients[2]
apr1<-pr02[1:(tt-1)]
asm1<-sm02[1:(tt-1)]
apr2<-pr02[2:tt]
asm2<-sm02[2:tt]
out<-array(NA,dim=4)
data1<-cbind(asm2,apr1,asm1)
data2<-cbind(apr2,asm1,apr1)
data1<-na.omit(data1)
data2<-na.omit(data2)
reg1<-plsRglm(data1[,1],data1[,2:3],2)
reg2<-plsRglm(data2[,1],data2[,2:3],2)
reg1_boot<-bootpls(reg1, R=500, verbose=FALSE)
reg2_boot<-bootpls(reg2, R=500, verbose=FALSE)
conf1<-boot.ci(reg1_boot, conf = 0.95, type = "bca",index=2)$bca[4:5]
conf2<-boot.ci(reg2_boot, conf = 0.95, type = "bca",index=2)$bca[4:5]
out[1]<-reg1$Std.Coeffs[2]
out[2]<-reg2$Std.Coeffs[2]
if (conf1[2]*conf1[1]>0){
out[3]<-1
}
if (conf2[2]*conf2[1]>0){
out[4]<-1
}
return(out)
},error=function(e){return(rep(NA,each=4))})
}
}
##########################################
CAN<-list.files('/rigel/glab/users/sz2766/paper_four/ERA5M/var/',pattern='.nc',full.names=TRUE)
###################01#####################
nc<-nc_open(CAN[10])
v1<-nc$var[[1]]
vv<-ncvar_get(nc,v1)
dim(vv)<-c(360*181,480)
nc<-nc_open(CAN[8])
v1<-nc$var[[1]]
sm1<-ncvar_get(nc,v1)
dim(sm1)<-c(360*181,480)
v1<-nc$var[[2]]
sm2<-ncvar_get(nc,v1)
dim(sm2)<-c(360*181,480)
v1<-nc$var[[3]]
sm3<-ncvar_get(nc,v1)
dim(sm3)<-c(360*181,480)
sm<-(sm1*7+sm2*21+sm3*72)/100
#################
input1<-cbind(vv,sm)
cl<-makeCluster(getOption("cl.cores",5))
clusterEvalQ(cl,library(parallel))
clusterEvalQ(cl,library(ncdf4))
clusterEvalQ(cl,library(plsRglm))
clusterEvalQ(cl,library(plsdof))
clusterEvalQ(cl,library(boot))
out1<-parRapply(cl,input1,pls_reg)
dim(out1)<-c(4,360,181)
lat<-ncvar_get(nc,varid = 'latitude')
long<-ncvar_get(nc,varid = 'longitude')
result1<-1:4
dimlat<-ncdim_def('latitude','deg',lat)
dimlong<-ncdim_def('longitude','deg',long)
dimout1<-ncdim_def('result','',result1)
ncdem<-ncvar_def('sc','',list(dimout1,dimlong,dimlat),-9999,longname="sensitivity coefficient",prec='double')
ncout<-nc_create('/rigel/glab/users/sz2766/paper_four/ERA5M/out/ERA5M_plsr_boot_2var_month_vv.nc',ncdem)
ncvar_put(ncout,varid=ncdem,out1)
nc_close(ncout)
|
0387a9f9761947216c6b7c5091be2a140c9db327
|
f3cd398ea5574667526bfdd9ddae4dcadbcb4ef4
|
/man/plot.LocallyWeightedPolynomial.Rd
|
66b847ecffdf8316b1974cf65eae429a94fdce0a
|
[] |
no_license
|
cran/SiZer
|
8448d5deefd6c36c0836051062f183bd1411ae33
|
48c153668abae423d1b9841d21ea50ac1c86bc6e
|
refs/heads/master
| 2022-08-02T20:18:51.493445
| 2022-07-09T18:40:02
| 2022-07-09T18:40:02
| 17,693,675
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,294
|
rd
|
plot.LocallyWeightedPolynomial.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locally.weighted.polynomial.R
\name{plot.LocallyWeightedPolynomial}
\alias{plot.LocallyWeightedPolynomial}
\title{Creates a plot of an object created by \code{locally.weighted.polynomial}.}
\usage{
\method{plot}{LocallyWeightedPolynomial}(
x,
derv = 0,
CI.method = 2,
alpha = 0.05,
use.ess = TRUE,
draw.points = TRUE,
...
)
}
\arguments{
\item{x}{LocallyWeightedPolynomial object}
\item{derv}{Derivative to be plotted. Default is 0 - which plots the smoothed function.}
\item{CI.method}{What method should be used to calculate the confidence interval about the estimated line.
The methods are from Hannig and Marron (2006), where 1 is the point-wise estimate, and 2 is
the row-wise estimate.}
\item{alpha}{The alpha level such that the CI has a 1-alpha/2 level of significance.}
\item{use.ess}{ESS stands for the estimated sample size. If at any point along the x-axis, the ESS is
too small, then we will not plot unless use.ess=FALSE.}
\item{draw.points}{Should the data points be included in the graph? Defaults to TRUE.}
\item{\dots}{Additional arguments to be passed to the graphing functions.}
}
\description{
Creates a plot of an object created by \code{locally.weighted.polynomial}.
}
|
bc299eadd61a504d976fab55a00521b2aeba5840
|
c7e9a7fe3ee4239aad068c6c41149a4a09888275
|
/OLD_GALLERY_RSCRIPT/#46_genetic_map.R
|
73d89b6852a85786cd0dffedf0e5a6c03888b083
|
[
"MIT"
] |
permissive
|
holtzy/R-graph-gallery
|
b0dfee965ac398fe73b3841876c6b7f95b4cbae4
|
7d266ad78c8c2d7d39f2730f79230775930e4e0b
|
refs/heads/master
| 2023-08-04T15:10:45.396112
| 2023-07-21T08:37:32
| 2023-07-21T08:37:32
| 31,253,823
| 591
| 219
|
MIT
| 2023-08-30T10:20:37
| 2015-02-24T09:53:50
|
HTML
|
UTF-8
|
R
| false
| false
| 1,235
|
r
|
#46_genetic_map.R
|
#Intro
png("#46_genetic_map.png" , width = 480, height = 480 )
marker=paste("marker" , sample(seq(1,1200) , 1200 ) , sep="_")
chromosome=as.factor(sort(rep(seq(1,6) , 200)))
position=abs(c( rep(seq(1,200)+runif(200 , -20 , 100)) , rep(seq(1,200)+runif(200 , -20 , 100)) ,rep(seq(1,200)+runif(200 , -20 , 10)) ,rep(seq(1,200)+runif(200 , -20 , 100)) ,rep(seq(1,200)+runif(200 , -20 , 100)) ,rep(seq(1,200)+runif(200 , -50 , 300)) ))
type=sample(c("SNP","Dart") , 1200 , replace=T)
map=data.frame(chromosome, marker, position , type)
#Réalisation du dessein
par(mar=c(1,1,0,0))
par(bg="grey")
nb_K=nlevels(map$chromosome)
num=0
plot(0,0 , xlim=c(1,nb_K) , ylim = rev(range(map$position)) , bty="n" , xaxt="n" , yaxt="n" )
abline(h=seq(0 , max(map$position) , 50) , col="white" , lwd=0.2)
for (i in levels(map$chromosome)){
num=num+1
text(num-0.3 , 0 , i)
a=map[map$chromosome==i , ]
points( rep(num,nrow(a)) , a$position , pch=20 , cex=1.5, col=ifelse(a$type=="Dart" , rgb(1,0.1,0.1,0.4) , rgb(0.1,0.1,1,0.4) ) )
}
legend("bottomleft", legend = c("SNP markers" , "Darts markers" ) , col = c(rgb(0.1,0.1,1,0.4) , rgb(1,0.1,0.1,0.4)) , bty = 1 , pch=20 , pt.cex = 2, cex = 0.8, horiz = FALSE, inset = c(0.1, 0.1))
dev.off()
|
d80d39fd90e6bd91f8deca92433808197bf77388
|
582f3296bc60ebaf9570701ae0d0b11d9e00884b
|
/man/childrenOrNothing.Rd
|
bdec1fa61a241206ea1491c3e91a68369c1885bd
|
[
"MIT"
] |
permissive
|
cwhd/hdp-r
|
71fa3119380a060571e56f9acf0699a4cabbb1da
|
95a44cd4f3ff631a716d64ed68aae4f806339715
|
refs/heads/master
| 2021-06-05T00:50:21.957276
| 2018-09-30T23:41:59
| 2018-09-30T23:41:59
| 139,089,737
| 3
| 3
|
NOASSERTION
| 2020-02-14T06:19:17
| 2018-06-29T02:13:40
|
R
|
UTF-8
|
R
| false
| true
| 435
|
rd
|
childrenOrNothing.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.r
\name{childrenOrNothing}
\alias{childrenOrNothing}
\title{If a node has children return them, otherwise return nothing}
\usage{
childrenOrNothing(currentNode)
}
\arguments{
\item{currentNode}{the node you want to check}
}
\description{
Sometimes you want to either the children of a node or nothing at all. This
function handles that for you.
}
|
3c6d9f1f933ea60b791ecf29925734fc221c42b7
|
630f2306b108431bbc99fdf8ff24bed3171c4a1a
|
/R/SplitAt.R
|
7304ed9bb866d8459e5588b2af412349b8a36ed0
|
[] |
no_license
|
senhu/FCBMA
|
69820c51fde3d350de0e1e1d99630f5bad1ac5d2
|
bede23284d19acba899730eb412bb4ee7cf9d6fd
|
refs/heads/master
| 2022-01-17T23:42:33.474468
| 2019-08-05T17:34:49
| 2019-08-05T17:34:49
| 107,567,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 359
|
r
|
SplitAt.R
|
#' Split a vector at a certain location/position
#'
#' Split a vector at a certain location/position
#'
#' @param x a vector
#' @param pos position within the vector x
#' @return a list of splited vectors
#' @examples
#' x <- c(1,2,3,4,5)
#' SplitAt(x, 3)
#'
#' @export SplitAt
SplitAt <- function(x, pos) unname(split(x, cumsum(seq_along(x) %in% (pos+1))))
|
1be3305e9bf8c4593aa6a811c64291cd33035e58
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/cwhmisc/examples/normalize.Rd.R
|
deca1598c4b1ece5b7f1c653042445d78d46fa6a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,535
|
r
|
normalize.Rd.R
|
library(cwhmisc)
### Name: normalize
### Title: base power and multiplier, significant places
### Aliases: normalize normalize1 Nd sigplaces checkNormalize
### Keywords: math
### ** Examples
(xx <- c(exp(1),pi,NA, Inf, -Inf,10,100,c(1,10)*exp(1)) )
(x2 <- normalize(xx,2))
# A B C D E F G H I
# a 1.3591409 1.5707963 NA Inf Inf 1.25 1.5625 1.3591409 1.6989261
# e 1.0000000 1.0000000 0 0 0 3.00 6.0000 1.0000000 4.0000000
# b 2.0000000 2.0000000 2 2 2 2.00 2.0000 2.0000000 2.0000000
(x32 <- normalize1(xx,2))
# A B C D E F G H I
# a 0.67957046 0.785398 NA Inf Inf 0.625 0.78125 0.67957 0.849463
# e 2.00000000 2.000000 1 1 1 4.000 7.00000 2.00000 5.000000
# b 2.00000000 2.000000 2 2 2 2.000 2.00000 2.00000 2.000000
(x10 <- normalize(xx,10))
# A B C D E F G H I
# a 2.7182818 3.1415927 NA Inf Inf 1 1 2.7182818 2.7182818
# e 0.0000000 0.0000000 0 0 0 1 2 0.0000000 1.0000000
# b 10.0000000 10.0000000 10 10 10 10 10 10.0000000 10.0000000
(x7 <- normalize(xx,7))
# A B C D E F G H I
# a 2.7182818 3.1415927 NA Inf Inf 1.42857 2.0408 2.71828 3.8832598
# e 0.0000000 0.0000000 0 0 0 1.00000 2.0000 0.00000 1.0000000
# b 7.0000000 7.0000000 7 7 7 7.00000 7.0000 7.00000 7.0000000
sigplaces(-9.999) #
sigplaces(pi/100) #
all.equal(checkNormalize(x2), checkNormalize(x7)) # TRUE
|
7a9f1b965ce28aae6cb3d989f28ef65667c4d48b
|
49b8ff57b4184c137dde8ed358b3372f3020d9b0
|
/RStudioProjects/mbDiscoveryR/learnBN/parentsList2BN.R
|
48d5480bbde0f926423931e5d39b51231d60f138
|
[] |
no_license
|
kelvinyangli/PhDProjects
|
c70bad5df7e4fd2b1803ceb80547dc9750162af8
|
db617e0dbb87e7d5ab7c5bfba2aec54ffa43208f
|
refs/heads/master
| 2022-06-30T23:36:29.251628
| 2019-09-08T07:14:42
| 2019-09-08T07:14:42
| 59,722,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 621
|
r
|
parentsList2BN.R
|
# resultList is a list returned by netica2bnlearn, it contains nodes and parentsList
parentsList2BN = function(resultList) {
dag = empty.graph(resultList$node)
for (i in 1:length(resultList$parent)) {# add directed arc from its parents to each node i
if (length(resultList$parent[[i]]) > 0) {# only add arc if there is at least 1 parent for node i
for (j in 1:length(resultList$parent[[i]])) {# for each parent j
dag = set.arc(dag, resultList$parent[[i]][j], resultList$node[i])
}# end for j
}# end if
}# end for i
return(dag)
}
|
df705b47e0234db8572a7807c1d656b6449dacd4
|
19be8b8e92bdcdb3a185232f27495b9acb10a3c5
|
/tests/testthat/test_makePeptideTable.R
|
14c51d3fbe428868f9dd385ceefceb075c45806b
|
[
"MIT"
] |
permissive
|
bartongroup/Proteus
|
b087402b9fba7edd39ec992dca3c1a3e65fbcaca
|
b83c8a797fd13943ccd0b83db7fdd9fb491a68e7
|
refs/heads/master
| 2023-09-03T06:14:14.076690
| 2023-04-17T11:43:43
| 2023-04-17T11:43:43
| 93,496,297
| 37
| 6
| null | 2020-06-15T11:45:07
| 2017-06-06T08:45:18
|
R
|
UTF-8
|
R
| false
| false
| 1,927
|
r
|
test_makePeptideTable.R
|
library(testthat)
# expected result
tab.1 <- structure(
c(7, 7, 4, 2, 5, 5, 5, 5, 8, 7,
8, NA, 2, 4, 4, 9, 2, 2, NA, NA,
9, 5, 4, NA, 7, NA, 2, 4, 5, 3),
.Dim = c(10L, 3L),
.Dimnames = list(
c("AA", "AB", "AC", "AD", "AE", "BA", "BB", "BC", "BD", "BE"),
c("WT1", "WT2", "KO1")
)
)
tab.2 <- structure(
c(3, 8, 2, 7, 2, 8, 5, 2, 2, NA,
4, 1, 2, NA, 1, 7, NA, 2, NA, NA,
1, NA, 6, NA, 5, 6, 1, 7, 3, 2),
.Dim = c(10L, 3L),
.Dimnames = list(
c("AA", "AB", "AC", "AD", "AE", "BA", "BB", "BC", "BD", "BE"),
c("WT1", "WT2", "KO1")
)
)
# input data:
evi <- read.table("../testdata/data_makePeptide_evi.txt", header=TRUE, sep="\t")
meta <- read.table("../testdata/data_makePeptide_meta.txt", header=TRUE, sep="\t")
# make sure even if levels order is not correct, makePeptideTable sorts this out:
meta.ordered <- meta
meta.ordered$sample <- factor(meta.ordered$sample, levels=meta.ordered$sample)
meta.ordered$condition <- factor(meta.ordered$condition)
context("Casting evidence into peptide table")
test_that("Test makePeptide unlabelled", {
pep <- makePeptideTable(evi, meta, ncores=1)
expect_equal(pep$tab, tab.1)
expect_equal(pep$metadata[,c("experiment", "measure", "sample", "condition", "batch")], meta.ordered)
expect_equal(pep$content, "peptide")
expect_equal(pep$measures, "Intensity")
expect_equal(pep$peptides, row.names(tab.1))
expect_equal(pep$proteins, c("A", "B"))
expect_true(is(pep, "proteusData"))
})
meta$measure <- "Ratio"
test_that("Test makePeptide SILAC", {
pep <- makePeptideTable(evi, meta, aggregate.fun = aggregateMedian, measure.cols=c(ratio="Ratio"), experiment.type="SILAC", ncores=1)
expect_equal(pep$tab, tab.2)
expect_equal(pep$metadata$batch, c(1, 2, 2))
expect_equal(pep$measures, "Ratio")
expect_equal(pep$peptides, row.names(tab.2))
expect_equal(pep$proteins, c("A", "B"))
expect_true(is(pep, "proteusData"))
})
|
4e8e7024f41e65c8ab54a2cbbfd4ea9a917c07b8
|
979747f6a1623c4be3f86fb2a57b9e9f58c0ca57
|
/R_Code/R - Module 17/P8130_SLR_Inf.R
|
f07d4b2221048eddd887194bd21245c657836d12
|
[] |
no_license
|
zl2974/latexhypothesis.github.io
|
be20b9e3a0d5ed726ea14df6f98f36b5b2c529e4
|
e878e815d7fcd8be28e2ba0c6a696f5e8d6b087a
|
refs/heads/main
| 2023-01-23T08:22:20.120166
| 2020-12-10T06:19:18
| 2020-12-10T06:19:18
| 308,181,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,474
|
r
|
P8130_SLR_Inf.R
|
################################################################
# Biostatistical Methods I #
# Simple Linear Regression - Inferences #
################################################################
rm(list = ls())
# Load libraries
library(faraway)
library(broom)
library(dplyr)
# Read data 'Hospitals'
data_hosp<-read.csv("Hospital.csv")
names(data_hosp)
# Look at data structure
str(data_hosp)
# Scatter plot (Y) vs (X)
# LOS: length of stay(Y)
# BEDS: number of beds(X)
data_hosp %>%
ggplot(aes(BEDS, LOS)) + geom_point(color='blue') + theme_bw(base_size=20) +
labs(x="Number of beds", y="Length of stay (days)")
# Simple linear regression
reg_hos<-lm(data_hosp$LOS~data_hosp$BEDS)
# Analyze the regression results
summary(reg_hos)
# Get the ANOVA table
anova(reg_hos)
# Residual st error: MSE=sigma^2
glance(reg_hos)$sigma
# Scatter plot with regression line overlaid
data_hosp %>%
ggplot(aes(BEDS, LOS)) + geom_point(color='blue') + theme_bw(base_size=20) +
geom_smooth(method='lm', se=FALSE, color='red') +
labs(x="Number of beds", y="Length of stay (days)")
# Scatter plot with regression line overlaid and 95% confidence bands
data_hosp %>%
ggplot(aes(BEDS, LOS)) + geom_point(color='blue') + theme_bw(base_size=20) +
geom_smooth(method='lm', se=TRUE, color='red') +
labs(x="Number of beds", y="Length of stay (days)")
# How do we calculate the 95% CI for the slope?
# Interpretation: 95% CI for the expected/mean difference in LOS for 1 bed differene
# Get the critical t value for alpha=0.05 and n-2 df
qt(0.975,111) # In data hospital, df=n-2=113-2=111
coef<-summary(reg_hos)$coefficients[2,1]
err<-summary(reg_hos)$coefficients[2,2]
slope_int<-coef + c(-1,1)*err*qt(0.975, 111)
# CIs for both slope and intercept
confint(reg_hos)
confint(reg_hos,level=0.95)
# How do we calculate the 95% CI for 100 beds difference?
coef<-summary(reg_hos)$coefficients[2,1]
err<-summary(reg_hos)$coefficients[2,2]
slope_int100<-100*coef + c(-1,1)*(100*err)*qt(0.975, 111)
slope_int100
#############################################################################
# Calculate 95% CIs using predict function
# If 'newdata' is omitted the predictions are based on the data used for the fit, like in the case below.
pred.clim <- predict.lm(reg_hos, interval="confidence")
datapred <- data.frame(cbind(data_hosp$BEDS, data_hosp$LOS, pred.clim))
plot(datapred[,1],datapred[,2],xlab="Number of Beds", ylab="Length of stay (days)")
#abline(reg_hos,lwd=2,col=2)
lines(datapred[,1],datapred[,3], lwd=2)
lines(datapred[,1],datapred[,5], lty=1, col=3, type='l')
lines(datapred[,1],datapred[,4], lty=1, col=3,type='l')
# Calculate 95% PIs for fitted values using predict function
# Compare to prediction intervals: of course that the PIs are wider than CIs
pred.plim <- predict.lm(reg_hos, interval="prediction")
datapred1 <- data.frame(cbind(data_hosp$BEDS, data_hosp$LOS, pred.plim))
#abline(reg_hos,lwd=2,col=2)
lines(datapred1[,1],datapred1[,3], lwd=2)
lines(datapred1[,1],datapred1[,5], lty=1, col=2, type='l')
lines(datapred1[,1],datapred1[,4], lty=1, col=2,type='l')
##############################################################
# Calculate the correlation coefficient between LOS and BEDS
cor(data_hosp$LOS, data_hosp$BEDS)
# Look at the R_squared. How does it compare to the correlation? Same value, but only for SLR.
cor(data_hosp$LOS, data_hosp$BEDS)^2
|
26610980bd7eae823b5979b2bc7e87dc5326b476
|
945f4bec180f1f77feee0e8a66b5c80bb3278340
|
/modelling.R
|
70918bd12657de5aa0099918bca9079d21dd5000
|
[] |
no_license
|
yuand23/wearable
|
5dabd2114a9132ac9288c7cf95eb47bed331ed8b
|
cbfebd2aa43c571499d5de97b87000e3bcc772b6
|
refs/heads/master
| 2023-08-13T17:31:49.189254
| 2021-10-11T03:52:50
| 2021-10-11T03:52:50
| 236,018,788
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,827
|
r
|
modelling.R
|
train1 <- read.csv("train1.csv",header=T,stringsAsFactors = F)
train2 <- read.csv("train2_5.csv",header=T,stringsAsFactors = F)
train3 <- read.csv("train2_6.csv",header=T,stringsAsFactors = F)
train4 <- read.csv("train3_1.csv",header=T,stringsAsFactors = F)
train5 <- read.csv("train3_2.csv",header=T,stringsAsFactors = F)
train6 <- read.csv("train4_1.csv",header=T,stringsAsFactors = F)
train7 <- read.csv("train4_2.csv",header=T,stringsAsFactors = F)
train8 <- read.csv("train5_1.csv",header=T,stringsAsFactors = F)
train9 <- read.csv("train5_2.csv",header=T,stringsAsFactors = F)
train10 <- read.csv("train6_1.csv",header=T,stringsAsFactors = F)
train11 <- read.csv("train6_2.csv",header=T,stringsAsFactors = F)
train <- rbind(train1,train2,train3,train4,train5,train6,train7,train8,train9,train10,train11)
test <- read.csv("test_multi_freq10.csv",header=T,stringsAsFactors = F)
#random forest model
set.seed(754)
rf_model <- randomForest::randomForest(factor(type) ~ angv_meanx + angv_meany + angv_meanz + angv_meannorm +
angv_stdx+ angv_stdy+ angv_stdz+ angv_stdnorm+ grava_meanx+ grava_meany +
grava_meanz + grava_meannorm+ grava_stdx+ grava_stdy+ grava_stdz+ grava_stdnorm,
data = train)
dev.new()
par(mfrow=c(1,1))
par(mar=c(2,2,2,2))
plot(rf_model)
importance <- randomForest::importance(rf_model)
varImportance <- data.frame(Variables = row.names(importance),
Importance = round(importance[ ,'MeanDecreaseGini'],2))
library(dplyr)
rankImportance <- varImportance %>%
mutate(Rank = paste0('#',dense_rank(desc(Importance))))
prediction <- predict(rf_model,test)
result <- data.frame(test$time,prediction)
colnames(result) <- c("time","prediction")
write.csv(result,file='prediction.csv',row.names=F)
|
e2f37e61458bbacce3af7a0f35b2018440fec42f
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/1780_7/rinput.R
|
d085b1f4ab131986590dc31b8e653f0c195aec7d
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("1780_7.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1780_7_unrooted.txt")
|
326f5f9265b74fdcc97671d0fb4b675138061242
|
184a3015c5a682e64e3e506528a44af3aaba918b
|
/R/01-featsels.R
|
b4a27766ecfa864d7af667ff39cdbb3cf7576c91
|
[] |
no_license
|
pfnaibert/it-featsel
|
526e584e4164e39878a2a8b647b0d436d9dff0da
|
79f8d785c5d09d67a0732ce183ddf4518cfe20d4
|
refs/heads/master
| 2022-11-27T04:02:49.484297
| 2020-08-04T17:53:45
| 2020-08-04T17:53:45
| 271,356,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 631
|
r
|
01-featsels.R
|
#!/usr/bin/env Rscript
###########################################
library(leaps)
library(glmnet)
source("./funs-ports-IT.R")
#####################################
# import
spy5 <- rets.sp("./data/spy5-rets.rds")
ibov <- rets.ibov("./data/ibov-rets.rds")
#####################################
# SP500
spy5.coefs <- featsels.spy5(spy5, J=500)
saveRDS(spy5.coefs, "./data/spy5-coefs.rds")
#####################################
# IBOV
ibov.coefs <- featsels.ibov(ibov, J=250)
saveRDS(ibov.coefs, "./data/ibov-coefs.rds")
#################################################
cat(" \n ***** FIM DO SCRIPT ****** \n")
print(Sys.time())
|
f999e227b604e3df4f595c3ff375801d9cd76e9d
|
66a2afd9c0dab1d55e6d236f3d85bc1b61a11a66
|
/man/combine_parent_and_child_resultsets.Rd
|
3ce097dff7062e17d337985f63f96de5fb800689
|
[
"MIT"
] |
permissive
|
StevenMMortimer/salesforcer
|
833b09465925fb3f1be8da3179e648d4009c69a9
|
a1e1e9cd0aa4e4fe99c7acd3fcde566076dac732
|
refs/heads/main
| 2023-07-23T16:39:15.632082
| 2022-03-02T15:52:59
| 2022-03-02T15:52:59
| 94,126,513
| 91
| 19
|
NOASSERTION
| 2023-07-14T05:19:53
| 2017-06-12T18:14:00
|
R
|
UTF-8
|
R
| false
| true
| 1,064
|
rd
|
combine_parent_and_child_resultsets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-query.R
\name{combine_parent_and_child_resultsets}
\alias{combine_parent_and_child_resultsets}
\title{Bind the records from nested parent-to-child queries}
\usage{
combine_parent_and_child_resultsets(parents_df, child_df_list)
}
\arguments{
\item{parents_df}{\code{tbl_df}; a dataset with 1 row per parent record from
the query recordset, that can be joined with its corresponding child records.}
\item{child_df_list}{\code{list} of \code{tbl_df}; a list of child records that
is the same length as the number of rows in the parent_df.}
}
\value{
\code{tbl_df}; a data frame of parent data replicated for each child
record in the corresponding list.
}
\description{
This function accepts a \code{data.frame} with one row representing each
parent record returned by a query with a corresponding list element in the
list of child record results stored as \code{tbl_df} in a list.
}
\note{
This function is meant to be used internally. Only use when debugging.
}
\keyword{internal}
|
8af7e330f2d73680ddf02f3902667ef14174ed51
|
1da341f341c35d89da5752dc39ef74aa5f87413e
|
/project2/plot2.R
|
d2709cfc3d8428c2d9bc0699f0d97f9c4b936d17
|
[] |
no_license
|
kscraja/ExData_Plotting1
|
2f85c57081971e19b63700dbcda1149824f22789
|
d82511c8fc29e579b6094c0def02eb5fbc8f2fa7
|
refs/heads/master
| 2021-01-16T20:30:33.539240
| 2014-06-22T04:38:05
| 2014-06-22T04:38:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 857
|
r
|
plot2.R
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$fips <- as.factor(NEI$fips)
NEI$SCC <- as.factor(NEI$SCC)
NEI$Pollutant <- as.factor(NEI$Pollutant)
NEI$type <- as.factor(NEI$type)
# 2. Have total emissions from PM2.5 decreased in the
# Baltimore City, Maryland (fips == "24510")
# from 1999 to 2008? Use the base
# plotting system to make a plot answering this question.
# selecting only baltmore data
bmData <- subset(NEI, fips == "24510")
yearlySplit <- split(bmData, bmData$year)
yearlyEmissionTotals <- sapply(yearlySplit, function(d) {
sum(d$Emissions)
})
png(file="plot2.png")
plot(names(yearlyEmissionTotals), yearlyEmissionTotals, pch=19, type="l",
xaxt="n", xlab="Year", ylab="Total Emission", main="Yearly Total Emissions - Baltimore")
axis(1, at=names(yearlyEmissionTotals))
dev.off()
|
21349d027ca8eece5cd1d3e0c870c23bf2149970
|
c2b457692980edf3760d9209b977bcaeb49ca160
|
/man/virtualRepo-funs.Rd
|
6a392768fba8875f805b7972ac9ac00e8c73bd31
|
[] |
no_license
|
arturochian/gRAN
|
9d19e2990a45c6fb20ff16bda9993465d4c39a0f
|
8a736cc4f030274871c1c69a8d096e854889661e
|
refs/heads/master
| 2021-01-17T21:40:39.228675
| 2015-02-11T18:12:33
| 2015-02-11T18:12:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,080
|
rd
|
virtualRepo-funs.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/virtualRepo.R
\name{sessionRepo}
\alias{getPkgVersions}
\alias{getSessionPackages}
\alias{makeVirtualRepo}
\alias{sessionRepo}
\title{Create a virtual repository containing only the specified package versions versions
Packages are located via the \code{getPkgVersions} function, which will look in the following places:
\enumerate{
\item{The \code{repo} repository and associated notrack directory}
\item{The current CRAN repository}
\item{The CRAN archives of previous source packages}
\item{The current Bioconductor repository}
\item{The Bioconductor SVN history}
\item{The SCM (SVN/git) history for a GRAN package}
}
When found, package versions not already in the GRAN repository proper or notrack directory are built into the \code{repo}'s associated notrack directory.
The repository is the constructed as a sibling to \code{repo}'s repository using only symbolic links. This allows many virtual repositories to contain the same versions of packages without physical file duplication.}
\usage{
sessionRepo(sinfo = sessionInfo(), repo_dir, doi = NULL, dir, name = NULL,
replace = FALSE, stoponfail = TRUE, GRepo = GRANRepo$repo,
install = FALSE, libloc = NULL)
getSessionPackages(sinfo, dir, GRepo = NULL, stoponfail = FALSE)
getPkgVersions(pkgs, dir, GRepo = NULL, stoponfail = FALSE,
pkgcol = "Package", verscol = "Version")
makeVirtualRepo(pkgdf, repo_dir, doi, dir, name = NULL, replace = FALSE,
stoponfail = TRUE, GRepo, install = FALSE, libloc = NULL, Rvers = "",
pkgcol = "Package", verscol = "Version")
}
\arguments{
\item{sinfo}{A sessionInfo object or character vector containing the text from printing such an object}
\item{repo_dir}{The base directory to create the virtual repository under.}
\item{doi}{A DOI associated with the session info. If specified when name is NULL, the repository name is set to the doi with "/" replaced with "_".}
\item{dir}{The directory to download/build package tarballs into during the search process}
\item{name}{The name of the repository to create. Defaults to a 32 character hash generated from \code{sinfo}}
\item{replace}{logical. Indicates whether the newly created virtual repository should overwrite any exists virtual repositories of the same name}
\item{stoponfail}{logical. Indicates whether the function should throw an error if it is unable to retreive one or more of the specified package versions. Defaults to \code{TRUE}}
\item{GRepo}{(optional) a \code{GRANRepository} to act as a parent to the
virtual repository. If specified, this is used to: search for pkg versions, determine where to download/build newly located pkg versions, and set the parent directory of the virtual repo.}
\item{install}{should the packages be immediately installed into
\code{libloc}. Defaults to FALSE}
\item{libloc}{If packages are being installed, a library location to
contain only the packages for this set of package versions. In generally this should
*not* be your standard library location.}
\item{pkgs}{A data.frame of package versions to locate and/or build}
\item{pkgcol}{The column in \code{pkgs} or \code{pkgsdf} containing the package names}
\item{verscol}{The column in \code{pkgs} or \code{pkgsdf} containing the package versions}
\item{pkgdf}{A data.frame containing the package names and versions to populate the repository with}
\item{Rvers}{The R version to build into the repository structure, if desired. Defaults to no specific version (suitable for src packages).}
\item{pkgcol}{Column in the dataframe that contains package name}
\item{verscol}{Column in the dataframe taht contains package version}
}
\value{
for \code{getPkgVersions} and \code{getSessionPackages}, a character vector with the full path to each downloaded/built tar.gz file.
for \code{makeVirtualRepo} and \code{sessionRepo}, the path to the created virtual repository
}
\description{
makeVirtualRepo
}
\details{
Create a virtual repository which contains only the exact packages specified
}
\author{
Gabriel Becker
}
|
37319f3ae07dcd66ee16123ca24b4da9464622a8
|
4855e806d6a5b65643c49ed3b602db276fe76d30
|
/library/shinyapps/R/servers.R
|
d8dcbdbf41e4d812801e934ac11c91622956dd2f
|
[] |
no_license
|
Cococatty/InteractiveMap
|
5701a607a7605a4958c037b6b5559841c67126eb
|
698b173ab0393cc38fdfd69f09b169dd87fd9f3d
|
refs/heads/master
| 2021-01-10T18:14:56.274796
| 2016-02-17T09:02:23
| 2016-02-17T09:02:45
| 47,664,845
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,160
|
r
|
servers.R
|
#' Server Management Functions
#'
#' Functions to manage the list of known servers to which
#' \pkg{rsconnect} can deploy and manage applications.
#'
#' Register a server with \code{addServer} or \code{discoverServers} (the latter
#' is useful only if your administrator has configured server autodiscovery).
#' Once a server is registered, you can connect to an account on the server
#' using \code{\link{connectUser}}.
#'
#' The \code{servers} and \code{serverInfo} functions are provided for viewing
#' previously registered servers.
#'
#' There is always at least one server registered (the \code{shinyapps.io}
#' server)
#'
#' @param name Optional nickname for the server. If none is given, the nickname
#' is inferred from the server's hostname.
#' @param url Server's URL. Should look like \code{http://servername/} or
#' \code{http://servername:port/}.
#' @param local Return only local servers (i.e. not \code{shinyapps.io})
#' @param quiet Suppress output and prompts where possible.
#' @return
#' \code{servers} returns a data frame with registered server names and URLs.
#' \code{serverInfo} returns a list with details for a particular server.
#' @rdname servers
#' @examples
#' \dontrun{
#'
#' # register a local server
#' addServer("http://myrsconnect/", "myserver")
#'
#' # list servers
#' servers(local = TRUE)
#'
#' # connect to an account on the server
#' connectUser(server = "myserver")
#' }
#' @export
servers <- function(local = FALSE) {
configFiles <- list.files(serverConfigDir(), pattern=glob2rx("*.dcf"),
full.names = TRUE)
parsed <- lapply(configFiles, read.dcf)
locals <- do.call(rbind, parsed)
if (local) {
locals
} else {
rbind(locals, as.data.frame(shinyappsServerInfo(), stringsAsFactors = FALSE))
}
}
serverConfigDir <- function() {
rsconnectConfigDir("servers")
}
serverConfigFile <- function(name) {
normalizePath(file.path(serverConfigDir(), paste(name, ".dcf", sep="")),
mustWork = FALSE)
}
shinyappsServerInfo <- function() {
info <- list(name = "shinyapps.io",
url = getOption("shinyapps.shinyapps_url", "https://api.shinyapps.io/v1"))
}
#' @rdname servers
#' @export
discoverServers <- function(quiet = FALSE) {
# TODO: Better discovery mechanism?
discovered <- getOption("rsconnect.local_servers", "http://localhost:3939/__api__")
# get the URLs of the known servers, and silently add any that aren't yet
# present
existing <- servers()[,"url"]
introduced <- setdiff(discovered, existing)
lapply(introduced, function(url) { addServer(url, quiet = TRUE) })
if (!quiet && length(introduced) > 0) {
message("Discovered ", length(introduced),
(if (length(introduced) == 1) "server" else "servers"), ":")
lapply(introduced, message)
} else if (!quiet) {
message("No new servers found.")
}
invisible(introduced)
}
getDefaultServer <- function(local = FALSE, prompt = TRUE) {
existing <- servers(local)
# if there are no existing servers, silently try to discover one to work with
if (length(existing) == 0 || nrow(existing) == 0) {
discoverServers(quiet = TRUE)
existing <- servers(local)
}
# if exactly one server exists, return it
if (nrow(existing) == 1) {
return(list(name = as.character(existing[,"name"]),
url = as.character(existing[,"url"])))
}
# no default server, prompt if there are multiple choices
if (nrow(existing) > 1 && prompt && interactive()) {
name <- as.character(existing[1,"name"])
message("Registered servers: ", paste(existing[,"name"], collapse = ", "))
input <- readline(paste0(
"Which server (default '", name ,"')? "))
if (nchar(input) > 0) {
name <- input
}
return(serverInfo(name))
}
}
#' @rdname servers
#' @export
addConnectServer <- function(url, name = NULL, quiet = FALSE) {
addServer(ensureConnectServerUrl(url), name, quiet)
}
#' @rdname servers
#' @export
addServer <- function(url, name = NULL, quiet = FALSE) {
if (!isStringParam(url))
stop(stringParamErrorMessage("url"))
serverUrl <- parseHttpUrl(url)
# TODO: test server by hitting URL and getting config?
# if no name is supplied for the server, make one up based on the host portion
# of its URL
if (is.null(name)) {
name <- serverUrl$host
if (!quiet && interactive()) {
input <- readline(paste0(
"Enter a nickname for this server (default '", name, "'): "))
if (nchar(input) > 0) {
name <- input
}
}
}
# write the server info
configFile <- serverConfigFile(name)
write.dcf(list(name = name,
url = url),
configFile)
if (!quiet) {
message("Server '", name, "' added successfully: ", url)
}
}
#' @rdname servers
#' @export
removeServer <- function(name) {
if (!isStringParam(name))
stop(stringParamErrorMessage("name"))
configFile <- serverConfigFile(name)
if (file.exists(configFile))
unlink(configFile)
else
warning("The server '", name,"' is not currently registered.")
}
#' @rdname servers
#' @export
serverInfo <- function(name) {
if (!isStringParam(name))
stop(stringParamErrorMessage("name"))
# there's no config file for shinyapps.io
if (identical(name, "shinyapps.io")) {
return(shinyappsServerInfo())
}
configFile <- serverConfigFile(name)
if (!file.exists(configFile))
stop(missingServerErrorMessage(name))
serverDcf <- readDcf(serverConfigFile(name), all=TRUE)
info <- as.list(serverDcf)
info
}
missingServerErrorMessage <- function(name) {
paste0("server named '", name, "' does not exist")
}
clientForAccount <- function(account) {
if (account$server == shinyappsServerInfo()$name)
lucidClient(shinyappsServerInfo()$url, account)
else {
server <- serverInfo(account$server)
connectClient(server$url, account)
}
}
ensureConnectServerUrl <- function(url) {
# ensure 'url' ends with '/__api__'
if (!grepl("/__api__$", url))
url <- paste(url, "/__api__", sep = "")
# if we have duplicated leading slashes, remove them
url <- gsub("(/+__api__)$", "/__api__", url)
url
}
|
0c938d25017d36ff9e2ebaa07a12670eafab35fc
|
99e582a518557a05d4f162325cf956d3edefca28
|
/plot5.R
|
6434833aa4ab2c8b20a9ba48f5ef94a28c5469a1
|
[] |
no_license
|
kandsar/Plotting_in_R
|
53ea5dfec6f4def667fb9afaaf6c1afff5e529b7
|
fe8e1285b77f996d0a4529f22c13e339e67258f3
|
refs/heads/master
| 2021-01-09T20:32:11.895389
| 2016-07-01T23:18:57
| 2016-07-01T23:18:57
| 62,424,440
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 618
|
r
|
plot5.R
|
plot5 <- function(){
file2 <- "Source_Classification_Code.rds"
file1 <- "summarySCC_PM25.rds"
library(ggplot2)
dataset1 <- readRDS(file1)
dataset2 <- readRDS(file2)
dataset <- merge(dataset1, dataset2, by="SCC")
dataset <- dataset[dataset$fips == "24510",]
road_temp <- grepl("ON-ROAD", dataset$type, ignore.case=T)
dataset <- dataset[road_temp, ]
Emission_sum <- tapply(dataset$Emissions, dataset$year, sum)
years <- c(1999,2002,2005,2008)
png("plot5.png", height=480, width=480)
barplot(Emission_sum, col=years, main="Emission due to motor vehicles in USA", xlab="year", ylab="Emission")
dev.off()
}
|
ec28b0187de662b810e7f4880fc061aaea6da89f
|
ab9f6a62532a0e79698fe5171732c610b3eb4c73
|
/report_ward_ranking/ward table.R
|
54cf5571b065369a7fa92c37ba35561980657752
|
[
"MIT"
] |
permissive
|
Chicago/census2020_ward_rpt
|
75430679edb6a07a5b95a5cc41efe0ffc4eea76a
|
6672fb1949d2e1cf804cf53de483df71a6effa3d
|
refs/heads/master
| 2021-01-06T17:42:15.150071
| 2021-01-04T21:58:52
| 2021-01-04T21:58:52
| 241,420,345
| 2
| 1
|
MIT
| 2020-04-10T21:41:23
| 2020-02-18T17:11:47
|
HTML
|
UTF-8
|
R
| false
| false
| 2,324
|
r
|
ward table.R
|
##
## Quick thing to get ward stats into excel
##
rm(list=ls())
library(civis)
library(geneorama)
library(colorspace)
sourceDir("functions")
## Steps to read civis data
## For key setup, see https://civisanalytics.github.io/civis-r/
source("config/setkey.R")
civis_ward_table <- read_civis_query("select * from cic.ward_visualization_table")
d <- copy(civis_ward_table)
setkey(d, ward)
d$hover_text <- NULL
d$rank <- 50 - rank(d$percent_to_target) + 1
colnames(d)
setnames(d, "civis_2020_target", "civis_2020_prediction")
setnames(d, "pct_spanish_speaking", "percent_spanish_speaking")
setcolorder(d,c("ward", "current_response_rate", "civis_2020_prediction",
"adjusted_civis_2020_target", "percent_to_target", "rank",
"low_response_score",
"mail_return_rate_cen_2010", "return_rate_cen_2020",
"tot_occp_units_acs_13_17", "counted_households", "uncounted_households",
"percent_counted", "percent_uncounted", "percent_spanish_speaking"))
setnames(d, gsub("_", " ", colnames(d)))
for(l in letters){
setnames(d,
gsub(paste0(" ", l),
paste0(" ", toupper(l)),
colnames(d)))
setnames(d,
gsub(paste0("^", l),
paste0(toupper(l)),
colnames(d)))
}
setnames(d, gsub("Acs", "ACS", colnames(d)))
setnames(d, gsub("Cen ", "Census ", colnames(d)))
pcts <- c('Current Response Rate', 'Civis 2020 Prediction', 'Adjusted Civis 2020 Target',
'Mail Return Rate Census 2010', 'Return Rate Census 2020',
'Low Response Score', 'Percent Counted', 'Percent Uncounted', 'Percent To Target',
'Percent Spanish Speaking')
cmmas <- c('Tot Occp Units ACS 13 17', 'Counted Households', 'Uncounted Households')
d[ , eval(pcts) := lapply(.SD, function(x) x / 100), .SDcols = pcts]
d[ , eval(cmmas) := lapply(.SD, prettyNum, big.mark = ","), .SDcols = cmmas]
# clipper(d)
pal <- colorRampPalette(c("red", "darkorange", "gold", "limegreen","forestgreen"))
cols <- pal(100)
plot(1:100,col=cols, pch = 16)
v <- d$`Percent To Target`
# # v <-
# v <- sort(v)
# (max(v) -v)/diff(range(v))
# intervals::
# v
library(leaflet)
pal <- colorNumeric(palette = cols, domain = v)
pal(v)
plot(1:50,col=pal(v), pch=16)
d$col <- pal(v)
# clipper(d)
|
07338b82c02ed1f24cd912acfa05c657bef1b1b4
|
ec4880500b824b243beefcfcb514d679d45ad4c6
|
/man/registerFilter.Rd
|
623eeba21d2eab4424f5f1842c16ddbb42b0a98a
|
[] |
no_license
|
cran/gridSVG
|
7091d0a05a6195312f1333a270950c4e92fbda85
|
7b88fca2e80694d20393bcd8bf9424e84b5673bb
|
refs/heads/master
| 2023-03-15T21:02:22.077160
| 2023-03-09T22:20:02
| 2023-03-09T22:20:02
| 17,696,511
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 814
|
rd
|
registerFilter.Rd
|
\name{registerFilter}
\alias{registerFilter}
\title{
Create the definition a filter effect.
}
\description{
A feature of SVG is that elements can be filtered using filter effects
defined somewhere in the document. The purpose of this function is to
create the definition of a filter effect so that it can be referred to
by grobs drawn by gridSVG.
}
\usage{
registerFilter(label, filter)
}
\arguments{
\item{label}{
A character identifier for the definition.
}
\item{filter}{
A \code{filter} object, produced by the \code{\link{filterEffect}}
function.
}
}
\details{
When registering a filter, all locations and dimensions that filter
effects refer to become fixed.
}
\value{
None.
}
\author{
Simon Potter
}
\seealso{
\code{\link{grid.filter}}, \code{\link{filterEffect}}.
}
|
942134e396e21a20fe428190b8e1a1a1858af1ca
|
c1bc5902762dfb9e37629e7d169713210d031487
|
/17.聚类分析.R
|
701be73628db59a05dc88434b9d3e4202d08b996
|
[] |
no_license
|
lzuh/Machine-Learning-and-R-Language-Applications-by-Qiang-Chen-
|
61b532ab843d55ec6aae6fa14d479b9a3134e06d
|
d7e0dddddeba4840cc3815cb2c365909a3b907f0
|
refs/heads/main
| 2023-02-14T15:02:13.450656
| 2021-01-17T03:38:45
| 2021-01-17T03:38:45
| 457,287,734
| 1
| 0
| null | 2022-02-09T09:18:05
| 2022-02-09T09:18:04
| null |
UTF-8
|
R
| false
| false
| 2,027
|
r
|
17.聚类分析.R
|
#第十七章作业
rm(list=ls())
#17.1 iris K-Means分析
data(iris)
kfit <- kmeans(iris[,-5],3,nstart = 20)
(table <- table(Predicted=kfit$cluster,Actual=iris$Species))
(accuracy <- sum(diag(table))/sum(table))
#17.2 使用faithful数据进行聚类
data(faithful)
#(1)画散点图
plot(faithful,main="Data of Old Faithful Geyser")
par(mar=c(5,5,5,5))
#(2)K=2 进行聚类分析,展示聚类结果
fit <- kmeans(faithful,2)
plot(faithful,col=fit$cluster,main="Estimated Clusters(K=2)")
#(3)K=3 进行聚类分析,展示聚类结果
fit <- kmeans(faithful,3)
plot(faithful,col=fit$cluster,main="Estimated Clusters(K=3)")
#(4)通过手肘法则选择K
SSE <- numeric(15)
set.seed(1)
for (k in 1:15){
fit <- kmeans(faithful,k,nstart=20)
SSE[k] <- fit$tot.withinss
}
plot(1:15,SSE,xlab="K",type="b",main = "K-means Clustering")
abline(v=which.min(SSE),lty=2)
#(5)通过BIC信息准则选择K
BIC <- SSE+2*log(272)*(1:15)
plot(1:15,BIC,xlab="K",type="b",main = "K-means Clustering")
abline(v=which.min(SSE),lty=2)
#17.3 对faithful进行层次聚类分析
#(1)使用完全连接并画树状图
hc.complete <- hclust(dist(faithful),method = "complete")
par(mar=c(1,5,3,1))
plot(hc.complete,main = "Complete Linkage",cex=0.9,xlab="",sub="")
#(2)使用平均连接并画树状图
hc.average <- hclust(dist(faithful),method = "average")
plot(hc.average,main = "Average Linkage",cex=0.9,xlab="",sub="")
#(3)使用单一连接并画树状图
hc.single <- hclust(dist(faithful),method = "single")
plot(hc.single,main = "Single Linkage",cex=0.9,xlab="",sub="")
#(4)使用中心连接并画树状图
hc.complete <- hclust(dist(faithful),method = "centroid")
plot(hc.centroid,main = "Centroid Linkage",cex=0.9,xlab="",sub="")
#(5)通过树状图判断哪种连接方式最糟糕
#我觉得是single linkage
#(6)对于完全连接,假设K=2,用散点图展示聚类结果
fit <- cutree(hc.complete, k=3)
plot(faithful,col=fit,main="Data of Old Faithful Geyser")
par(mar=c(5,5,5,5))
|
a26b31abd624f532ec353bfc66072be9fd4f087d
|
c36842d81ca5df57da61b263dd639fb8ac9ae096
|
/src/main/R/thesis/chapter-3/model/Ant Library.r
|
97cd1444ef33c1cfc8d26b8ecc7dc09dea97c06f
|
[] |
no_license
|
jimbarritt/bugsim
|
ebbc7ee7fb10df678b6c3e6107bf90169c01dfec
|
7f9a83770fff9bac0d9e07c560cd0b604eb1c937
|
refs/heads/master
| 2016-09-06T08:32:19.941440
| 2010-03-13T10:13:50
| 2010-03-13T10:13:50
| 32,143,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,207
|
r
|
Ant Library.r
|
cat("Ant Library (Version 1.0)\n")
library(lattice)
AntLibrary.loadBaitCabbages<-function(layoutFileName, experimentPlan, iterationNumber=1) {
baitLayout.df<-read.csv(out.filename)
baits.df<-experimentPlan@iterations[[iterationNumber]]@cabbages.df
merged.df<-merge(baitLayout.df, baits.df, by.x="Plant.ID", by.y="Id")
merged.df$Simulation.Egg.Count<-merged.df$Egg.Count
merged.df$Egg.Count<-NULL
return (merged.df)
}
AntLibrary.outputBaitMatrix<-function(merged.df) {
rowCount<-max(merged.df$row)
colCount<-max(merged.df$col)
dimnames<-list(1:rowCount, 1:colCount)
mx<-matrix(0, nrow=rowCount, ncol=colCount, dimnames=dimnames)
for (irow in 1:rowCount) {
row<-subset(merged.df, merged.df$row==irow)
for (icol in 1:colCount) {
cat(row$Simulation.Egg.Count[[icol]])
if (icol < colCount) {
cat("\t")
}
mx[irow, icol]<-row$Simulation.Egg.Count[[icol]]
}
cat ("\n")
}
return (mx)
}
AntLibrary.plotSurface<-function(mx, zmax=15, levels=1) {
x<-1:length(mx[,1])
y<-1:length(mx[1,])
filled.contour(x, y,mx,zlim=c(0,zmax),col=topo.colors(zmax), nlevels=zmax/levels,
xlab="X",
ylab="Y",
plot.axes = {
axis(1, c(1, 7, 14))
axis(2, c(1, 7, 14))
}
)
}
|
d5be7d996a2ceb89cf891be86641241b367734f6
|
81f49f2828dd48350528bb5b17077b7af486461b
|
/R/randomCoefplot.R
|
2b052b26d569a7f1d0d173ba5b4618b4fcd9ccff
|
[] |
no_license
|
JenniNiku/gllvm
|
497a9a0b6be94080a47803afcf7948a53d33125c
|
9eaf0f66605e32f70ffd050b2c0c3bb051866b81
|
refs/heads/master
| 2023-08-03T11:15:33.087171
| 2023-08-01T06:45:42
| 2023-08-01T06:45:42
| 91,061,910
| 39
| 16
| null | 2023-06-13T09:58:56
| 2017-05-12T07:06:17
|
R
|
UTF-8
|
R
| false
| false
| 5,667
|
r
|
randomCoefplot.R
|
#' @title Plot random slope coefficients
#' @description Plots random slopes and their prediction intervals.
#'
#' @param object an object of class 'gllvm'.
#' @param y.label logical, if \code{TRUE} (default) colnames of y with respect to coefficients are added to plot.
#' @param which.Xcoef fector indicating which covariate coefficients will be plotted. Can be vector of covariate names or numbers. Default is NULL when all covariate coefficients are plotted.
#' @param cex.ylab the magnification to be used for axis annotation relative to the current setting of cex.
#' @param mfrow same as \code{mfrow} in \code{par}. If \code{NULL} (default) it is determined automatically.
#' @param mar vector of length 4, which defines the margin sizes: \code{c(bottom, left, top, right)}. Defaults to \code{c(4,5,2,1)}.
#' @param xlim.list list of vectors with length of two to define the intervals for x axis in each covariate plot. Defaults to NULL when the interval is defined by the range of point estimates and confidence intervals
#' @param order logical, if \code{TRUE} (default), coefficients are sorted according to the point estimates
#' @param ... additional graphical arguments.
#'
#' @author Jenni Niku <jenni.m.e.niku@@jyu.fi>, Francis K.C. Hui, Bert van der Veen, Sara Taskinen,
#'
#' @examples
#' \dontrun{
#'## Load a dataset from the mvabund package
#'data(antTraits)
#'y <- as.matrix(antTraits$abund)
#'X <- as.matrix(antTraits$env)
#'TR <- antTraits$traits
#'# Fit model with random slopes
#'fitF <- gllvm(y = y, X = X, TR = TR,
#' formula = ~ Bare.ground + Bare.ground : Webers.length,
#' family = poisson(), randomX = ~ Bare.ground)
#'randomCoefplot(fitF)
#'}
#'
#'@aliases randomCoefplot randomCoefplot.gllvm
#'@export
#'@export randomCoefplot.gllvm
randomCoefplot.gllvm <- function(object, y.label = TRUE, which.Xcoef = NULL, cex.ylab = 0.5, mfrow = NULL, mar = c(4,6,2,1), xlim.list = NULL, order = FALSE, ...)
{
if (any(class(object) != "gllvm"))
stop("Class of the object isn't 'gllvm'.")
if ((is.null(object$Xrandom) || is.null(object$randomX)) && isFALSE(object$randomB))
stop("No random covariates in the model.")
if((object$num.lv.c+object$num.RR)==0){
if(is.null(which.Xcoef))which.Xcoef <- c(1:NROW(object$params$Br))
Xcoef <- as.matrix(t(object$params$Br)[,which.Xcoef,drop=F])
cnames <- colnames(object$Xr[,which.Xcoef])
k <- length(cnames)
if(is.null(colnames(object$y)))
colnames(object$y) <- paste("Y",1:NCOL(object$y), sep = "")
m <- length(ncol(object$y))
Xc <- Xcoef
if((object$method %in% c("VA", "EVA"))){
object$Ab <- object$Ab+CMSEPf(object)$Ab
# object$Ab <- object$Ab+sdB(object)
sdXcoef <- t(sqrt(apply(object$Ab,1,diag)))
} else {
sdXcoef <- t(sqrt(object$predict$Br))
}
sdXcoef <- sdXcoef[,which.Xcoef,drop=F]
if (is.null(mfrow) && k > 1)
mfrow <- c(1, k)
if (!is.null(mfrow))
par(mfrow = mfrow, mar = mar)
if (is.null(mfrow))
par(mar = mar)
for (i in 1:k) {
Xc <- Xcoef[, i]
lower <- Xc - 1.96 * sdXcoef[, i]
upper <- Xc + 1.96 * sdXcoef[, i]
if(order){
Xc <- sort(Xc)
lower <- lower[names(Xc)]
upper <- upper[names(Xc)]
}
col.seq <- rep("black", m)
col.seq[lower < 0 & upper > 0] <- "grey"
At.y <- seq(1, m)
if (!is.null(xlim.list[[i]])) {
plot( x = Xc, y = At.y, yaxt = "n", ylab = "", col = col.seq, xlab = cnames[i], xlim = xlim.list[[i]], pch = "x", cex.lab = 1.3, ... )
} else {
plot( x = Xc, y = At.y, yaxt = "n", ylab = "", col = col.seq, xlab = cnames[i], xlim = c(min(lower), max(upper)), pch = "x", cex.lab = 1.3, ... )
}
segments( x0 = lower, y0 = At.y, x1 = upper, y1 = At.y, col = col.seq )
abline(v = 0, lty = 1)
if (y.label)
axis( 2, at = At.y, labels = names(Xc), las = 1, cex.axis = cex.ylab)
}
}else{
if(is.null(which.Xcoef))which.Xcoef <- c(1:NROW(object$params$LvXcoef))
Xcoef <- as.matrix(object$params$theta[,1:(object$num.RR+object$num.lv.c),drop=F]%*%t(object$params$LvXcoef))[,which.Xcoef,drop=F]
cnames <- colnames(object$lv.X[,which.Xcoef,drop=F])
k <- length(cnames)
if(is.null(colnames(object$y)))
colnames(object$y) <- paste("Y",1:NCOL(object$y), sep = "")
labely <- colnames(object$y)
m <- length(labely)
Xc <- Xcoef
sdXcoef <- RRse(object)[,which.Xcoef,drop=F]
if (is.null(mfrow) && k > 1)
mfrow <- c(1, k)
if (!is.null(mfrow))
par(mfrow = mfrow, mar = mar)
if (is.null(mfrow))
par(mar = mar)
for (i in 1:k) {
Xc <- Xcoef[, i]
lower <- Xc - 1.96 * sdXcoef[, i]
upper <- Xc + 1.96 * sdXcoef[, i]
if(order){
Xc <- sort(Xc)
lower <- lower[names(Xc)]
upper <- upper[names(Xc)]
}
col.seq <- rep("black", m)
col.seq[lower < 0 & upper > 0] <- "grey"
At.y <- seq(1, m)
if (!is.null(xlim.list[[i]])) {
plot( x = Xc, y = At.y, yaxt = "n", ylab = "", col = col.seq, xlab = cnames[i], xlim = xlim.list[[i]], pch = "x", cex.lab = 1.3, ... )
} else {
plot( x = Xc, y = At.y, yaxt = "n", ylab = "", col = col.seq, xlab = cnames[i], xlim = c(min(lower), max(upper)), pch = "x", cex.lab = 1.3, ... )
}
segments( x0 = lower, y0 = At.y, x1 = upper, y1 = At.y, col = col.seq )
abline(v = 0, lty = 1)
if (y.label)
axis( 2, at = At.y, labels = names(Xc), las = 1, cex.axis = cex.ylab)
}
}
}
#'@export
randomCoefplot <- function(object, ...)
{
UseMethod(generic="randomCoefplot")
}
|
87d2cb2ef4fe954d69140f050b6ca0006727064b
|
13fd537c59bf51ebc44b384d2b5a5d4d8b4e41da
|
/R/h2oRClient-package/demo/h2o.prcomp.R
|
072b2c7d2ed89ace558b65e37ba099d6af582fe9
|
[
"Apache-2.0"
] |
permissive
|
hardikk/h2o
|
8bd76994a77a27a84eb222a29fd2c1d1c3f37735
|
10810480518d43dd720690e729d2f3b9a0f8eba7
|
refs/heads/master
| 2020-12-25T23:56:29.463807
| 2013-11-28T19:14:17
| 2013-11-28T19:14:17
| 14,797,021
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 635
|
r
|
h2o.prcomp.R
|
# This is a demo of H2O's PCA function
# It imports a data set, parses it, and prints a summary
# Then, it runs PCA on a subset of the features
library(h2o)
h2o.installDepPkgs()
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, silentUpgrade = TRUE, promptUpgrade = FALSE)
australia.hex = h2o.importFile(localH2O, system.file("extdata", "australia.csv", package="h2oRClient"), "australia.hex")
summary(australia.hex)
australia.pca = h2o.prcomp(australia.hex)
print(australia.pca)
plot(australia.pca)
australia.pca2 = h2o.prcomp(australia.hex, tol = 0.5, standardize = FALSE)
print(australia.pca2)
|
9791093d801d89153792d7616b4af3b118a1b78f
|
23941d709a5e833cc0b92fd676b2766a911baead
|
/R/carbon.R
|
d44df6dd47b349be4512d2f7a02ac4586825707a
|
[
"MIT"
] |
permissive
|
lilyraye/Intro2R-COVID
|
fd530f686c45b347c45898265550c3b0deb9ddc9
|
b1e1c0b8b2015e53cc7f267479d920bbcf746e3e
|
refs/heads/master
| 2023-07-17T13:20:50.350065
| 2021-06-15T12:45:45
| 2021-06-15T12:45:45
| 398,911,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 551
|
r
|
carbon.R
|
#' @title Iron making process.
#'
#' @description A data set concerning a new innovative iron making process.
#'
#' @details This is a standard data set from the course MATH 4753 taken from the data sets provided by the text book.
#'
#' @format A data frame with 25 rows and 2 variables:
#' \describe{
#' \item{PilotPlant}{Carbon Content \%, y}
#' \item{LabFurnace}{Carbon Content \%, x}
#' }
#'
#' @source \url{https://www.routledge.com/Statistics-for-Engineering-and-the-Sciences-Sixth-Edition/Mendenhall-Sincich/p/book/9781498728850}
"carbon"
|
454bd298d646899610b0e8d4b6151872d7324771
|
b91b4171df45b7e12067dee4ba86a5742f4a1cbc
|
/Homework 2/No 2/R/No 2.R
|
6915537d5922cffbb20678b95341b44191075f28
|
[] |
no_license
|
ishakdavidk/Deep-Learning
|
62f5b38a3b7bfd991e411a67fe7f3b1a9e33660c
|
3bd29a0c4329b3bf988dc43fec2b1c19dae02f8b
|
refs/heads/master
| 2021-05-17T02:06:26.714392
| 2020-06-28T13:48:41
| 2020-06-28T13:48:41
| 250,568,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 217
|
r
|
No 2.R
|
mullin_dataset<-read.table("/cloud/project/No 2/tvsales.dat", header=FALSE, skip=0)
print(mullin_dataset)
mullin_model<-lm(V2 ~ V3+V4+V5+V6+V7+V8+V9+V10, data=mullin_dataset)
print(mullin_model)
summary(mullin_model)
|
b99b8c0253b2a31a4fbe345c5b23859b8768f95f
|
97c3caf1990e0d959581ac4a19705d968ccf1f8f
|
/man/ExpectMat.Rd
|
2d165538746ff1e496e7da9e951186107a45b799
|
[] |
no_license
|
cran/hscovar
|
702d71c2eab3d541fb64c6adf9481815e40be6f5
|
4d08292cdd561dc3d176b8c22400e9abd5654359
|
refs/heads/master
| 2021-07-15T18:04:20.951118
| 2021-04-13T06:20:06
| 2021-04-13T06:20:06
| 245,389,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 575
|
rd
|
ExpectMat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{ExpectMat}
\alias{ExpectMat}
\title{Expected value of paternally inherited allele}
\usage{
ExpectMat(inMat)
}
\arguments{
\item{inMat}{[MATRIX] The paternal genotype matrix}
}
\value{
\describe{
\item{\code{ExP.Fa}}{(N x p) matrix of expected values}
}
}
\description{
Expected value is +/-0.5 if sire is homozygous reference/
alternate allele or 0 if sire is heterozygous at the investigated marker
}
\examples{
data(testdata)
G <- Haplo2Geno(H.sire)
E <- ExpectMat(G)
}
|
bbd39e87fb0a27d152d57000f184b2b96824be50
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt19_302_313/stmt19_302_313.R
|
d882275190a9c87a87d40dae4d2ef6a290b99444
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 720
|
r
|
stmt19_302_313.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 16048
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 16047
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 16047
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt19_302_313.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4839
c no.of clauses 16048
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 16047
c
c QBFLIB/Basler/terminator/stmt19_302_313.qdimacs 4839 16048 E1 [1] 0 235 4603 16047 RED
|
252571778290339f22d543696c5d6a795aade31c
|
42e34dd5956efe9c1236a7d283cfa340e50ac3a1
|
/R_Time_Series_Data_Analysis/Ch03_Graphics_of_Time_Series_data/07_Fraction_Mean.R
|
7e7c49b46387e4c0269aad8e53a7d22147210dea
|
[] |
no_license
|
Fintecuriosity11/Time_Series_Analysis
|
2e8720fd76c2ed8bb3b0e234fd243f4890636fa4
|
7d3c813ec55c61339c4c4acea0f36ac534e056a9
|
refs/heads/master
| 2022-12-24T20:16:56.180854
| 2020-10-08T06:21:11
| 2020-10-08T06:21:11
| 277,093,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,119
|
r
|
07_Fraction_Mean.R
|
##########################################################################################################################################
#(주의) -> 순차적으로 코드를 실행하는 것을 권함!
#에러 발생 시 github Time_Series_Analysis/R_Time_Series_Data_Analysis 경로에 issue를 남기면 확인
##########################################################################################################################################
### Fraction Mean에 의한 Band Plot 적용사례
# 시게열 자료를 몇 개의 부분으로 구분하여 Fraction Mean을 산정하여 평활화를 수행
# 표준편차를 산정하여 산정 범위를 표현하는 Band Plot의 프로그램 설명 및 분석 사례는 아래 참조.
# 분석 사례는 정규분포의 난수 1000개를 발생하여 적요하였으며, 시계열 자료의 부분구분 및 평가를 위한 분석방법은 아래와 같음.
# x = 시간자료, y = 시계열 자료.
# add= 추가자료, sd = 표준편차의 구간설정.
# sd.lwd = 표준편차 선의 특성, sd.col = 표준편차 선의 색
# method = fraction : 시계열 자료를 몇 개의 부분으로 구분, 부분평균 산정, 적용
# = nobs : 시계열 자료수를 지정하여 구분, 부분평균 산정, 적용
# = width : 시계열 자료의 폭을 설정하여 구분, 부분평균 산정, 적용
# = range : 시계열 자료의 범위를 설정하여 구분, 부분평균 산정, 적용
# width=1/5, 시계열 자료의 구분폭, 아무 지정이 없으면 default = 1/5, 즉 5개로 구분.
# n = 50, 산정을 위한 추정값의 수, 아무 지정이 없으면 50개의 자료 추정
# bandplot: 평균과 표준편차의 범위를 산정하여 그래프를 작성하는 프로그램.
.libPaths("C://Users//yjang//R") # 저장 디렉토리 및 lib 경로 설정.
install.packages("gplots")
library(gplots)
x<-1:1000
y<- rnorm(1000, mean = 1, sd = 1 + x/1000)
bandplot(x,y, main="Band Plot by Fraction")
legend("bottomleft",c("m +/- 2d", "m +/- d", "Mean"), col=c("magenta","blue","red"), lwd = c(2,2,2), cex = 0.6)
graphics.off() # 그래프를 지워주는 함수.
# 분석자료: x = 1에서 1000
# y = 정규분포의 난수 1000개
# Fraction mean: 1000개의 시계열 자료
# width = 1/5를 적용하여 분석
# mean +/- 2sd, mean +/- sd 그래프는 직접 돌려보고 참조.
############################################################결과값(print)#################################################################
# # > > x<-1:1000
# >
# > y<- rnorm(1000, mean = 1, sd = 1 + x/1000)
# > bandplot(x,y, main="Band Plot by Fraction")
# > legend("bottomleft",c("m +/- 2d", "m +/- d", "Mean"), col=c("magenta","blue","red"), lwd = c(2,2,2), cex = 0.6)
# > graphics.off()
##########################################################################################################################################
|
b0e50cc29c861d67c5c870f83c75b851f3c2b71a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rnoaa/examples/meteo_tidy_ghcnd.Rd.R
|
4faf0f81d9d95e078f314bc4798155fa545399bc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 415
|
r
|
meteo_tidy_ghcnd.Rd.R
|
library(rnoaa)
### Name: meteo_tidy_ghcnd
### Title: Create a tidy GHCND dataset from a single monitor
### Aliases: meteo_tidy_ghcnd
### ** Examples
## Not run:
##D # One station in Australia is ASM00094275
##D meteo_tidy_ghcnd(stationid = "ASN00003003")
##D meteo_tidy_ghcnd(stationid = "ASN00003003", var = "tavg")
##D meteo_tidy_ghcnd(stationid = "ASN00003003", date_min = "1950-01-01")
## End(Not run)
|
ee2aef5a9a265762b63e67ea7e966e503046e428
|
da52ecac35cfca350fca0d51c30e531057ad0878
|
/man/obama.Rd
|
41a3e9067192b9cf01491eccbe90ccb1c5c5b290
|
[] |
no_license
|
dselivanov/cleanNLP
|
51e623d3a845fe9607fcd12c3d840b79551cf0aa
|
9d30b3a8ca806d51ea3162a397c260346b0fe05a
|
refs/heads/master
| 2021-01-19T11:21:57.760937
| 2017-04-11T16:52:21
| 2017-04-11T16:52:21
| 87,958,242
| 2
| 0
| null | 2017-04-11T16:51:37
| 2017-04-11T16:51:37
| null |
UTF-8
|
R
| false
| true
| 363
|
rd
|
obama.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{obama}
\alias{obama}
\title{Annotation of Barack Obama's State of the Union Addresses}
\description{
Parsed text from all eight State of the Union addresses given by Barack Obama.
}
\references{
\url{http://www.presidency.ucsb.edu/sou.php}
}
\keyword{data}
|
c04cbcd31142742d8d09d9beaa8d1d8d2016dc16
|
6d820299dc77330537078d31d27bd7291d7204c8
|
/Data science/hw7 2/hw7_490IDS_17.R
|
8574a604719f7ca1190649a912f661558501255a
|
[
"MIT"
] |
permissive
|
zyhhhhhhh/info490-intro-to-data-science
|
4fdd29a5bb5df4d4d07883ee4b2b2ad73972a02d
|
32eee71679def276296abf08c9606b8378179da6
|
refs/heads/master
| 2021-01-02T22:49:09.965708
| 2017-08-05T05:37:58
| 2017-08-05T05:37:58
| 99,401,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,856
|
r
|
hw7_490IDS_17.R
|
myText <- readLines("stateoftheunion1790-2012.txt")
stars= unlist(regmatches(myText, gregexpr("\\*\\*\\*", myText)))
locations = grep("^\\*\\*\\*$", myText)
dates = myText[locations[]+4]
years = regexpr("\\<\\d\\d\\d\\d\\>",dates)
years_list = unlist(regmatches(dates, gregexpr("\\<\\d\\d\\d\\d\\>", dates)))
months = regexpr("^[[:upper:]][[:lower:]]*\\>",dates)
months_list = unlist(regmatches(dates, gregexpr("^[[:upper:]][[:lower:]]*\\>", dates)))
presidents = myText[locations[]+3]
num_speech = length(presidents)
print("number of speeches")
num_speech
president_list = as.data.frame(table(presidents))
president_list = unlist(c(president_list["presidents"]))
num_president = length(president_list)
print("number of presidents")
num_president
speeches = c()
for (i in 1:(length(locations)-1)){
a = c(unlist(myText[(locations[i]+1):(locations[i+1]-1)], recursive = TRUE))
a = paste(a, collapse = " ")
speeches[i] =a
}
a = c(unlist(myText[(locations[length(locations)])+1:(length(myText)-1)], recursive = TRUE))
a = paste(a, collapse = " ")
speeches[length(locations)] =a
length(speeches)
print("Yes, there's 222 elements.")
newspeeches = speeches
for (i in 1:length(newspeeches)){
newspeeches[i] = gsub("\\<\\S+'\\S+\\>", "", newspeeches[i])
}
for (i in 1:length(newspeeches)){
newspeeches[i] = gsub("[[:digit:]]", "", newspeeches[i])
}
for (i in 1:length(newspeeches)){
newspeeches[i] = gsub("\\<.*Applause.*\\>", "", newspeeches[i])
}
newspeeches <- lapply(newspeeches, FUN = tolower)
words = c()
for (i in 1:length(newspeeches)){
words = c(words,unlist(strsplit(as.character(newspeeches[i]), "[[:blank:]]|[[:punct:]]",perl = TRUE,useBytes = TRUE)))
}
print(length(words))
words = words[words != ""]
print(length(words))
words_vec = list()
for (i in 1:length(newspeeches)){
a =unlist(strsplit(as.character(newspeeches[i]), "[[:blank:]]|[[:punct:]]",perl = TRUE,useBytes = TRUE))
a = a[a!=""]
a = a[a!= "na"]
a = as.vector(a)
words_vec[[i]] = a
}
frequencies = words_vec
for (i in 1:length(frequencies)){
a = as.vector(frequencies[[i]])
l = length(a)
b = table(a)
for (j in 1:length(frequencies[[i]])){
frequencies[[i]][j] = as.numeric(b[a[j]])/l
}
}
a = table(words_vec[[216]])
mean(a)
sd(a)
c = names(a)
d = as.numeric(sapply(c,nchar))
b = a[which(d>mean(d))]
b = b[b>mean(a)+0.2*sd(a)]
plot(b)
text(b, lab=row.names(b))
a = table(words_vec[[217]])
mean(a)
sd(a)
c = names(a)
d = as.numeric(sapply(c,nchar))
b = a[which(d>mean(d))]
b = b[b>mean(a)+0.2*sd(a)]
plot(b)
text(b, lab=row.names(b))
a = table(words_vec[[221]])
mean(a)
sd(a)
c = names(a)
d = as.numeric(sapply(c,nchar))
b = a[which(d>mean(d))]
b = b[b>mean(a)+0.2*sd(a)]
plot(b)
text(b, lab=row.names(b))
a = table(words_vec[[222]])
mean(a)
sd(a)
c = names(a)
d = as.numeric(sapply(c,nchar))
b = a[which(d>mean(d))]
b = b[b>mean(a)+0.2*sd(a)]
plot(b)
text(b, lab=row.names(b))
print("Above was 4 graphs on frequent words in the speech by J.W Bush and Obama. I filtered only long words and the frequency has to be large. The first two are by Bush; the latter 2 are by Obama. Despite from America and American, we can easily tell that there's a difference in their focus. J.W Bush has been using military, security and terrorists frequently. Obama has been using education, business, innovation, technology and companies more frequently.
The reason could be Bush has a main focus on anti-terrorists. History also proved that this stat is correct, Bush vote for the war and Obama against it. When Bush was in duty, 9/11 happened. When Obama was in duty, he had to deal with the consequences of financial crisis, so his main focus will be on business, thus the word business appears more.")
|
f63323a649921689dc95003d768003fbe31c174c
|
feb134cf8cc19d3f5deb820a2bba8e4f68f48ba7
|
/icews/code/brazil2013targets.R
|
0d0414253d24e97601412f4c1c87dffd6bd663f1
|
[] |
no_license
|
lizmckenna/protests
|
7f268a2d83043a9571b8c35cc62fcaed215809bb
|
0b80e6805bdf7915be2421e341af720698761784
|
refs/heads/master
| 2020-03-08T05:20:30.009452
| 2019-03-10T23:40:02
| 2019-03-10T23:40:02
| 127,945,742
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,099
|
r
|
brazil2013targets.R
|
rm(list=ls())
# Load libraries
library(dplyr)
library(ggplot2)
library(broom)
library(purrr)
library(tidyr)
library(tibble)
library(tidyverse)
library(plotly)
devtools::install_github('hadley/ggplot2')
b <- read.csv(file="/Users/lizmckenna/Desktop/brazil2013targets.csv", header=TRUE,sep=",")
# Fix date class
b$Event.Date <- as.Date(b$Event.Date, format = "%m/%d/%y")
head(b$Event.Date)
class(b$Event.Date)
# Make initial geom_point
b1 <- ggplot(data=b, aes(x=Event.Date, y=daily.count, group=target.recode, colour=target.recode)) +
geom_point()
b1
ggplotly(b1)
# Make initial histogram
b2 <- ggplot(data=b, aes(x=Event.Date, group=target.recode, fill=target.recode)) +
geom_histogram()
b2
ggplotly(b2)
# Make histogram but remove NAs (I need to learn the subset function...)
narm <- read.csv(file="/Users/lizmckenna/Desktop/brazil2013targets.narm.csv", header=TRUE,sep=",")
# Fix date class
narm$Event.Date <- as.Date(narm$Event.Date, format = "%m/%d/%y")
c1 <- ggplot(data=narm, aes(x=Event.Date, group=target.recode, fill=target.recode)) +
geom_histogram()
c1
ggplotly(c1)
|
c04b5305d5e6f5ef4b210abc7e093d7166bcdd55
|
9ea74e7e088c6351ac4d29522529c0e79b51dcc4
|
/tests/testthat/test_treeInfo.R
|
449db853ddd3f14db5f19e2952efdfe8ef6fa43d
|
[] |
no_license
|
imbs-hl/ranger
|
0ccd500db3cd97e5fda5d3b150517c71ed55ade6
|
5a04d9355a1a96315973d88492160bc00a91e399
|
refs/heads/master
| 2023-08-17T01:09:34.455853
| 2023-08-16T09:26:19
| 2023-08-16T09:26:19
| 41,377,351
| 739
| 245
| null | 2023-09-12T20:10:46
| 2015-08-25T17:18:39
|
C++
|
UTF-8
|
R
| false
| false
| 17,726
|
r
|
test_treeInfo.R
|
library(ranger)
library(survival)
context("ranger_treeInfo")
## Classification
rf.class.formula <- ranger(Species ~ ., iris, num.trees = 5)
rf.class.first <- ranger(dependent.variable.name = "Species", data = iris[, c(5, 1:4)], num.trees = 5)
rf.class.mid <- ranger(dependent.variable.name = "Species", data = iris[, c(1:2, 5, 3:4)], num.trees = 5)
rf.class.last <- ranger(dependent.variable.name = "Species", data = iris, num.trees = 5)
ti.class.formula <- treeInfo(rf.class.formula)
ti.class.first <- treeInfo(rf.class.first)
ti.class.mid <- treeInfo(rf.class.mid)
ti.class.last <- treeInfo(rf.class.last)
test_that("Terminal nodes have only prediction, non-terminal nodes all others, classification formula", {
expect_true(all(is.na(ti.class.formula[ti.class.formula$terminal, 2:6])))
expect_true(all(!is.na(ti.class.formula[ti.class.formula$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.class.formula[!ti.class.formula$terminal, -8])))
expect_true(all(is.na(ti.class.formula[!ti.class.formula$terminal, 8])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, classification depvarname first", {
expect_true(all(is.na(ti.class.first[ti.class.first$terminal, 2:6])))
expect_true(all(!is.na(ti.class.first[ti.class.first$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.class.first[!ti.class.first$terminal, -8])))
expect_true(all(is.na(ti.class.first[!ti.class.first$terminal, 8])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, classification depvarname mid", {
expect_true(all(is.na(ti.class.mid[ti.class.mid$terminal, 2:6])))
expect_true(all(!is.na(ti.class.mid[ti.class.mid$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.class.mid[!ti.class.mid$terminal, -8])))
expect_true(all(is.na(ti.class.mid[!ti.class.mid$terminal, 8])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, classification depvarname last", {
expect_true(all(is.na(ti.class.last[ti.class.last$terminal, 2:6])))
expect_true(all(!is.na(ti.class.last[ti.class.last$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.class.last[!ti.class.last$terminal, -8])))
expect_true(all(is.na(ti.class.last[!ti.class.last$terminal, 8])))
})
test_that("Names in treeInfo match, classification", {
varnames <- colnames(iris)[1:4]
expect_true(all(is.na(ti.class.formula$splitvarName) | ti.class.formula$splitvarName %in% varnames))
expect_true(all(is.na(ti.class.first$splitvarName) | ti.class.first$splitvarName %in% varnames))
expect_true(all(is.na(ti.class.mid$splitvarName) | ti.class.mid$splitvarName %in% varnames))
expect_true(all(is.na(ti.class.last$splitvarName) | ti.class.last$splitvarName %in% varnames))
})
test_that("Prediction for classification is factor with correct levels", {
expect_is(ti.class.formula$prediction, "factor")
expect_equal(levels(ti.class.formula$prediction), levels(iris$Species))
})
test_that("Prediction for classification is same as class prediction", {
dat <- iris[sample(nrow(iris)), ]
rf <- ranger(dependent.variable.name = "Species", data = dat, num.trees = 1,
replace = FALSE, sample.fraction = 1)
pred_class <- predict(rf, dat)$predictions
nodes <- predict(rf, dat, type = "terminalNodes")$predictions[, 1]
ti <- treeInfo(rf, 1)
pred_ti <- sapply(nodes, function(x) {
ti[ti$nodeID == x, "prediction"]
})
expect_equal(pred_ti, pred_class)
})
test_that("Prediction for classification is same as class prediction, new factor", {
dat <- iris[sample(nrow(iris)), ]
dat$Species <- factor(dat$Species, levels = sample(levels(dat$Species)))
rf <- ranger(dependent.variable.name = "Species", data = dat, num.trees = 1,
replace = FALSE, sample.fraction = 1)
pred_class <- predict(rf, dat)$predictions
nodes <- predict(rf, dat, type = "terminalNodes")$predictions[, 1]
ti <- treeInfo(rf, 1)
pred_ti <- sapply(nodes, function(x) {
ti[ti$nodeID == x, "prediction"]
})
expect_equal(pred_ti, pred_class)
})
test_that("Prediction for classification is same as class prediction, unused factor levels", {
dat <- iris[c(101:150, 51:100), ]
expect_warning(rf <- ranger(dependent.variable.name = "Species", data = dat, num.trees = 1,
replace = FALSE, sample.fraction = 1))
pred_class <- predict(rf, dat)$predictions
nodes <- predict(rf, dat, type = "terminalNodes")$predictions[, 1]
ti <- treeInfo(rf, 1)
pred_ti <- sapply(nodes, function(x) {
ti[ti$nodeID == x, "prediction"]
})
expect_equal(pred_ti, pred_class)
})
test_that("Prediction for probability is same as probability prediction", {
dat <- iris[sample(nrow(iris)), ]
rf <- ranger(dependent.variable.name = "Species", data = dat, num.trees = 1,
sample.fraction = 1, replace = FALSE, probability = TRUE)
ti <- treeInfo(rf)
pred_prob <- predict(rf, dat)$predictions
nodes <- predict(rf, dat, type = "terminalNodes")$predictions[, 1]
pred_ti <- t(sapply(nodes, function(x) {
as.matrix(ti[ti$nodeID == x, 8:10])
}))
colnames(pred_ti) <- gsub("pred\\.", "", colnames(ti)[8:10])
expect_equal(pred_prob, pred_ti)
})
test_that("Prediction for probability is same as probability prediction, new factor", {
dat <- iris[sample(nrow(iris)), ]
dat$Species <- factor(dat$Species, levels = sample(levels(dat$Species)))
rf <- ranger(dependent.variable.name = "Species", data = dat, num.trees = 1,
sample.fraction = 1, replace = FALSE, probability = TRUE)
ti <- treeInfo(rf)
pred_prob <- predict(rf, dat)$predictions
nodes <- predict(rf, dat, type = "terminalNodes")$predictions[, 1]
pred_ti <- t(sapply(nodes, function(x) {
as.matrix(ti[ti$nodeID == x, 8:10])
}))
colnames(pred_ti) <- gsub("pred\\.", "", colnames(ti)[8:10])
expect_equal(pred_prob, pred_ti)
})
test_that("Prediction for probability is same as probability prediction, unused factor levels", {
dat <- iris[c(101:150, 51:100), ]
dat$Species <- factor(dat$Species, levels = sample(levels(dat$Species)))
expect_warning(rf <- ranger(dependent.variable.name = "Species", data = dat, num.trees = 1,
sample.fraction = 1, replace = FALSE, probability = TRUE))
ti <- treeInfo(rf)
pred_prob <- predict(rf, dat)$predictions
nodes <- predict(rf, dat, type = "terminalNodes")$predictions[, 1]
pred_ti <- t(sapply(nodes, function(x) {
as.matrix(ti[ti$nodeID == x, 8:9])
}))
colnames(pred_ti) <- gsub("pred\\.", "", colnames(ti)[8:9])
expect_equal(pred_prob, pred_ti)
})
test_that("Prediction for matrix classification is integer with correct values", {
rf <- ranger(dependent.variable.name = "Species", data = data.matrix(iris),
num.trees = 5, classification = TRUE)
ti <- treeInfo(rf, 1)
expect_is(ti$prediction, "numeric")
expect_equal(sort(unique(ti$prediction)), 1:3)
})
## Regression
n <- 20
dat <- data.frame(y = rnorm(n),
replicate(2, runif(n)),
replicate(2, rbinom(n, size = 1, prob = .5)))
rf.regr.formula <- ranger(y ~ ., dat, num.trees = 5)
rf.regr.first <- ranger(dependent.variable.name = "y", data = dat, num.trees = 5)
rf.regr.mid <- ranger(dependent.variable.name = "y", data = dat[, c(2:3, 1, 4:5)], num.trees = 5)
rf.regr.last <- ranger(dependent.variable.name = "y", data = dat[, c(2:5, 1)], num.trees = 5)
ti.regr.formula <- treeInfo(rf.regr.formula)
ti.regr.first <- treeInfo(rf.regr.first)
ti.regr.mid <- treeInfo(rf.regr.mid)
ti.regr.last <- treeInfo(rf.regr.last)
test_that("Terminal nodes have only prediction, non-terminal nodes all others, regression formula", {
expect_true(all(is.na(ti.regr.formula[ti.regr.formula$terminal, 2:6])))
expect_true(all(!is.na(ti.regr.formula[ti.regr.formula$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.regr.formula[!ti.regr.formula$terminal, -8])))
expect_true(all(is.na(ti.regr.formula[!ti.regr.formula$terminal, 8])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, regression depvarname first", {
expect_true(all(is.na(ti.regr.first[ti.regr.first$terminal, 2:6])))
expect_true(all(!is.na(ti.regr.first[ti.regr.first$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.regr.first[!ti.regr.first$terminal, -8])))
expect_true(all(is.na(ti.regr.first[!ti.regr.first$terminal, 8])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, regression depvarname mid", {
expect_true(all(is.na(ti.regr.mid[ti.regr.mid$terminal, 2:6])))
expect_true(all(!is.na(ti.regr.mid[ti.regr.mid$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.regr.mid[!ti.regr.mid$terminal, -8])))
expect_true(all(is.na(ti.regr.mid[!ti.regr.mid$terminal, 8])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, regression depvarname last", {
expect_true(all(is.na(ti.regr.last[ti.regr.last$terminal, 2:6])))
expect_true(all(!is.na(ti.regr.last[ti.regr.last$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.regr.last[!ti.regr.last$terminal, -8])))
expect_true(all(is.na(ti.regr.last[!ti.regr.last$terminal, 8])))
})
test_that("Names in treeInfo match, regression", {
varnames <- c("X1", "X2", "X1.1", "X2.1")
expect_true(all(is.na(ti.regr.formula$splitvarName) | ti.regr.formula$splitvarName %in% varnames))
expect_true(all(is.na(ti.regr.first$splitvarName) | ti.regr.first$splitvarName %in% varnames))
expect_true(all(is.na(ti.regr.mid$splitvarName) | ti.regr.mid$splitvarName %in% varnames))
expect_true(all(is.na(ti.regr.last$splitvarName) | ti.regr.last$splitvarName %in% varnames))
})
test_that("Prediction for regression is numeric in correct range", {
expect_is(ti.regr.formula$prediction, "numeric")
expect_true(all(is.na(ti.regr.formula$prediction) | ti.regr.formula$prediction >= min(dat$y)))
expect_true(all(is.na(ti.regr.formula$prediction) | ti.regr.formula$prediction <= max(dat$y)))
})
## Probability estimation
rf.prob.formula <- ranger(Species ~ ., iris, num.trees = 5, probability = TRUE)
rf.prob.first <- ranger(dependent.variable.name = "Species", data = iris[, c(5, 1:4)], num.trees = 5, probability = TRUE)
rf.prob.mid <- ranger(dependent.variable.name = "Species", data = iris[, c(1:2, 5, 3:4)], num.trees = 5, probability = TRUE)
rf.prob.last <- ranger(dependent.variable.name = "Species", data = iris, num.trees = 5, probability = TRUE)
ti.prob.formula <- treeInfo(rf.prob.formula)
ti.prob.first <- treeInfo(rf.prob.first)
ti.prob.mid <- treeInfo(rf.prob.mid)
ti.prob.last <- treeInfo(rf.prob.last)
test_that("Terminal nodes have only prediction, non-terminal nodes all others, probability formula", {
expect_true(all(is.na(ti.prob.formula[ti.prob.formula$terminal, 2:6])))
expect_true(all(!is.na(ti.prob.formula[ti.prob.formula$terminal, c(1, 7:10)])))
expect_true(all(!is.na(ti.prob.formula[!ti.prob.formula$terminal, c(-8, -9, -10)])))
expect_true(all(is.na(ti.prob.formula[!ti.prob.formula$terminal, 8:10])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, probability depvarname first", {
expect_true(all(is.na(ti.prob.first[ti.prob.first$terminal, 2:6])))
expect_true(all(!is.na(ti.prob.first[ti.prob.first$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.prob.first[!ti.prob.first$terminal, c(-8, -9, -10)])))
expect_true(all(is.na(ti.prob.first[!ti.prob.first$terminal, 8:10])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, probability depvarname mid", {
expect_true(all(is.na(ti.prob.mid[ti.prob.mid$terminal, 2:6])))
expect_true(all(!is.na(ti.prob.mid[ti.prob.mid$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.prob.mid[!ti.prob.mid$terminal, c(-8, -9, -10)])))
expect_true(all(is.na(ti.prob.mid[!ti.prob.mid$terminal, 8:10])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, probability depvarname last", {
expect_true(all(is.na(ti.prob.last[ti.prob.last$terminal, 2:6])))
expect_true(all(!is.na(ti.prob.last[ti.prob.last$terminal, c(1, 7:8)])))
expect_true(all(!is.na(ti.prob.last[!ti.prob.last$terminal, c(-8, -9, -10)])))
expect_true(all(is.na(ti.prob.last[!ti.prob.last$terminal, 8:10])))
})
test_that("Names in treeInfo match, probability", {
varnames <- colnames(iris)[1:4]
expect_true(all(is.na(ti.prob.formula$splitvarName) | ti.prob.formula$splitvarName %in% varnames))
expect_true(all(is.na(ti.prob.first$splitvarName) | ti.prob.first$splitvarName %in% varnames))
expect_true(all(is.na(ti.prob.mid$splitvarName) | ti.prob.mid$splitvarName %in% varnames))
expect_true(all(is.na(ti.prob.last$splitvarName) | ti.prob.last$splitvarName %in% varnames))
})
test_that("Prediction for probability is one probability per class, sum to 1", {
expect_equal(ncol(ti.prob.formula), 10)
expect_is(ti.prob.formula$pred.setosa, "numeric")
expect_true(all(!ti.prob.formula$terminal | rowSums(ti.prob.formula[, 8:10]) == 1))
})
test_that("Prediction for probability has correct factor levels", {
dat <- iris[c(101:150, 1:100), ]
rf <- ranger(dependent.variable.name = "Species", data = dat, num.trees = 5, probability = TRUE)
# Predict
pred_rf <- predict(rf, dat, num.trees = 1)$predictions
# Predict with treeInfo
ti <- treeInfo(rf)
terminal_nodes <- predict(rf, dat, type = "terminalNodes")$predictions[, 1]
pred_ti <- as.matrix(ti[terminal_nodes + 1, grep("pred", colnames(ti))])
colnames(pred_ti) <- gsub("pred\\.", "", colnames(pred_ti))
rownames(pred_ti) <- NULL
expect_equal(pred_rf, pred_ti)
})
## Survival
rf.surv.formula <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5)
rf.surv.first <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = veteran[, c(3:4, 1:2, 5:8)], num.trees = 5)
rf.surv.mid <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = veteran, num.trees = 5)
rf.surv.last <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = veteran[, c(2, 1, 5:8, 3:4)], num.trees = 5)
ti.surv.formula <- treeInfo(rf.surv.formula)
ti.surv.first <- treeInfo(rf.surv.first)
ti.surv.mid <- treeInfo(rf.surv.mid)
ti.surv.last <- treeInfo(rf.surv.last)
test_that("Terminal nodes have only nodeID, non-terminal nodes all, survival formula", {
expect_true(all(is.na(ti.surv.formula[ti.surv.formula$terminal, 2:6])))
expect_true(all(!is.na(ti.surv.formula[ti.surv.formula$terminal, c(1, 7)])))
expect_true(all(!is.na(ti.surv.formula[!ti.surv.formula$terminal, ])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, survival depvarname first", {
expect_true(all(is.na(ti.surv.first[ti.surv.first$terminal, 2:6])))
expect_true(all(!is.na(ti.surv.first[ti.surv.first$terminal, c(1, 7)])))
expect_true(all(!is.na(ti.surv.first[!ti.surv.first$terminal, ])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, survival depvarname mid", {
expect_true(all(is.na(ti.surv.mid[ti.surv.mid$terminal, 2:6])))
expect_true(all(!is.na(ti.surv.mid[ti.surv.mid$terminal, c(1, 7)])))
expect_true(all(!is.na(ti.surv.mid[!ti.surv.mid$terminal, ])))
})
test_that("Terminal nodes have only prediction, non-terminal nodes all others, survival depvarname last", {
expect_true(all(is.na(ti.surv.last[ti.surv.last$terminal, 2:6])))
expect_true(all(!is.na(ti.surv.last[ti.surv.last$terminal, c(1, 7)])))
expect_true(all(!is.na(ti.surv.last[!ti.surv.last$terminal, ])))
})
test_that("Names in treeInfo match, survival", {
varnames <- colnames(veteran)[c(1:2, 5:8)]
expect_true(all(is.na(ti.surv.formula$splitvarName) | ti.surv.formula$splitvarName %in% varnames))
expect_true(all(is.na(ti.surv.first$splitvarName) | ti.surv.first$splitvarName %in% varnames))
expect_true(all(is.na(ti.surv.mid$splitvarName) | ti.surv.mid$splitvarName %in% varnames))
expect_true(all(is.na(ti.surv.last$splitvarName) | ti.surv.last$splitvarName %in% varnames))
})
test_that("No prediction for Survival", {
expect_equal(ncol(ti.surv.formula), 7)
})
## General
test_that("Error if no saved forest", {
expect_error(treeInfo(ranger(Species ~ ., iris, write.forest = FALSE)),
"Error\\: No saved forest in ranger object\\. Please set write.forest to TRUE when calling ranger\\.")
})
## Unordered splitting
test_that("Spitting value is comma separated list for partition splitting", {
n <- 50
dat <- data.frame(x = sample(c("A", "B", "C", "D", "E"), n, replace = TRUE),
y = rbinom(n, 1, 0.5),
stringsAsFactors = FALSE)
rf.partition <- ranger(y ~ ., dat, num.trees = 5, respect.unordered.factors = "partition")
ti.partition <- treeInfo(rf.partition)
expect_is(ti.partition$splitval, "character")
expect_true(all(is.na(ti.partition$splitval) | grepl("^\\d+(?:,\\d+)*$", ti.partition$splitval)))
})
test_that("Spitting value is numeric for order splitting", {
set.seed(100)
rf.order <- ranger(Sepal.Length ~ ., iris, num.trees = 5, respect.unordered.factors = "order")
ti.order <- treeInfo(rf.order)
expect_is(ti.order$splitval[!ti.order$terminal & ti.order$splitvarName == "Species"], "numeric")
})
test_that("treeInfo works for 31 unordered factor levels but not for 32", {
n <- 31
dt <- data.frame(x = factor(1:n, ordered = FALSE),
y = rbinom(n, 1, 0.5))
rf <- ranger(y ~ ., data = dt, num.trees = 10, splitrule = "extratrees")
expect_silent(treeInfo(rf))
n <- 32
dt <- data.frame(x = factor(1:n, ordered = FALSE),
y = rbinom(n, 1, 0.5))
rf <- ranger(y ~ ., data = dt, num.trees = 10, splitrule = "extratrees")
expect_warning(treeInfo(rf), "Unordered splitting levels can only be shown for up to 31 levels.")
})
|
c88eae596715c060f75c2d63bdcdb31b11e69816
|
f25f19454371c545fb69ccb7da1a4ef0baf6acb8
|
/man/dmgain.Rd
|
e29639f31c4fa83a2fd422dee5ad91b739348970
|
[] |
no_license
|
Sandy4321/npdr
|
00e6ab9fd7db2a6465b39bb320afeee76db5f7ce
|
b02e08577c58a6fddb0b157f9870740795747b04
|
refs/heads/master
| 2020-12-06T07:34:02.878253
| 2020-01-06T04:54:47
| 2020-01-06T04:54:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,093
|
rd
|
dmgain.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inbixGAIN.R
\name{dmgain}
\alias{dmgain}
\title{Differential modularity genetic association network algorithm (dmGAIN) algorithm.}
\usage{
dmgain(labelledDataFrame)
}
\arguments{
\item{labelledDataFrame}{\code{data.frame} with samples in rows, variables in columns
and classLabel in the last column.}
}
\value{
results \code{matrix} of variable by variable differential modularity values.
}
\description{
\code{dmgain}
}
\examples{
data(testdata10)
rinbixDmgain <- dmgain(testdata10)
}
\references{
\itemize{
\item \url{https://github.com/hexhead/inbix}{C++ inbix on github}
}
}
\seealso{
Other GAIN functions: \code{\link{dcgain}},
\code{\link{fitInteractionModel}},
\code{\link{fitMainEffectModel}},
\code{\link{gainToSimpleSIF}},
\code{\link{getInteractionEffects}},
\code{\link{getMainEffects}},
\code{\link{regainParallel}}, \code{\link{regain}}
}
\concept{GAIN functions}
\concept{inbix synonym functions
\code{\link{dcgain}} for differential correlation GAIN.}
\keyword{array}
\keyword{models}
|
bf57f386bc38291c39d71a50da38a2e8f08a4e0e
|
c241109ba5d2f9cb6e74caceca787e795d136750
|
/plot1.R
|
9d424df0afb8768a54efb7ffb6d524c1c2e2e6c4
|
[] |
no_license
|
pradiptasahaR/ExData_Plotting1
|
472bafd4834aa173b9adf305673a098424a6e4ec
|
43c774b3d8ca11b7e4086d700c35dcb4e0233cf2
|
refs/heads/master
| 2021-01-21T16:11:13.796728
| 2016-03-30T14:49:24
| 2016-03-30T14:49:24
| 55,019,261
| 0
| 0
| null | 2016-03-30T00:14:23
| 2016-03-30T00:14:23
| null |
UTF-8
|
R
| false
| false
| 425
|
r
|
plot1.R
|
file <- "./EDA/household_power_consumption.txt"
pc <- read.table(file,header=TRUE,sep=";",stringsAsFactors=FALSE)
subpc <- pc[pc$Date %in% c("1/2/2007","2/2/2007"),]
subpc$DateTime <- strptime(paste(subpc$Date, subpc$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot1.png", width=480, height=480)
hist(as.numeric(subpc$Global_active_power),xlab="Global Active Power (kilowatts)",main="Global Active Power",col = "red")
dev.off()
|
33b9144dd67e8c2257111b49011dade23f5ec496
|
fbc7b084e91c08b50b655ffa133837f995cc99e6
|
/R/rxtractogon.R
|
44840f09c2522d34e0f83e849129205311cd22c0
|
[] |
no_license
|
MarieAugerMethe/rerddapXtracto
|
d814561fe81b93a0f43ae54b08819dfc3d3fd4f9
|
1616fd7d1566fdf65220259dbec6d1d6bedf0b42
|
refs/heads/master
| 2023-08-06T20:45:38.750343
| 2021-09-27T00:28:08
| 2021-09-27T00:28:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,933
|
r
|
rxtractogon.R
|
#' Extract environmental data in a polygon using 'ERDDAP' and 'rerddap'.
#'
#' \code{rxtractogon} uses the R program 'rerddap' to extract environmental data
#' from an 'ERDDAP' server in a polygon through time.
#' @export
#' @param dataInfo - the return from an 'rerddap:info' call to an 'ERDDAP' server
#' @param parameter - character string containing the name of the parameter to extract
#' @param xcoord - array giving longitudes (in decimal
#' degrees East, either 0-360 or -180 to 180) of polygon
#' @param ycoord - array giving latitudes (in decimal
#' degrees N; -90 to 90)of polygon
#' @param tcoord - 2-array of minimum and maximum times as 'YYYY-MM-DD'
#' @param zcoord - a real number with the z-coordinate(usually altitude or depth)
#' @param xName - character string with name of the xcoord in the 'ERDDAP' dataset (default "longitude")
#' @param yName - character string with name of the ycoord in the 'ERDDAP' dataset (default "latitude")
#' @param zName - character string with name of the zcoord in the 'ERDDAP' dataset (default "altitude")
#' @param tName - character string with name of the tcoord in the 'ERDDAP' dataset (default "time")
#' @param verbose - logical variable (default FALSE) if the the URL request should be verbose
#' @param cache_remove - logical variable (default TRUE) whether to delete 'rerddap' cache
#' @return If successful a structure with data and dimensions
#' \itemize{
#' \item extract$data - the masked data array dimensioned (lon,lat,time)
#' \item extract$varname - the name of the parameter extracted
#' \item extract$datasetname - ERDDAP dataset name
#' \item extract$longitude - the longitudes on some scale as request
#' \item extract$latitude - the latitudes always going south to north
#' \item extract$time - the times of the extracts
#' }
#' else an error string
#' @examples
#' # toy example to show use
#' # and keep execution time low
#' \donttest{
#' dataInfo <- rerddap::info('erdHadISST')
#' }
#' parameter <- 'sst'
#' tcoord <- c("2016-06-15")
#' xcoord <- mbnms$Longitude[1:3]
#' ycoord <- mbnms$Latitude[1:3]
#' sanctSST <- rxtractogon (dataInfo, parameter=parameter, xcoord = xcoord,
#' ycoord = ycoord, tcoord= tcoord)
#' \donttest{
#' xcoord <- mbnms$Longitude
#' ycoord <- mbnms$Latitude
#' dataInfo <- rerddap::info('etopo180')
#' parameter = 'altitude'
#' xName <- 'longitude'
#' yName <- 'latitude'
#' bathy <- rxtractogon (dataInfo, parameter = parameter, xcoord = xcoord,
#' ycoord = ycoord)
#' }
#' @section Details:
#' rxtractogon extracts the data from the smallest bounding box that contains
#' the polygon, and then uses the function "point.in.polygon" from the "sp"
#' package to mask out the areas outside of the polygon.
#' rxtractogon only works with datasets defined
#' on a latitude and longitude grid.
rxtractogon <- function(dataInfo, parameter, xcoord = NULL, ycoord = NULL,
zcoord = NULL, tcoord = NULL, xName = 'longitude',
yName = 'latitude', zName = 'altitude', tName = 'time',
verbose = FALSE, cache_remove = TRUE) {
# check that a valid rerddap info structure is being passed
rerddap::cache_setup(temp_dir = TRUE)
if (!(methods::is(dataInfo, "info"))) {
print("error - dataInfo is not a valid info structure from rerddap")
return("bad info structure")
}
# check that the dataset is a grid
if (!("Grid" %in% dataInfo$alldata$NC_GLOBAL$value)) {
print("error - dataset is not a Grid")
return("dataset not a grid")
}
if (length(xcoord) != length(ycoord)) {
print('xcoord and ycoord are not of the same length')
return('bad xcoord, ycoord values')
}
#extend out tpos to be length 2 if not
tcoord1 <- tcoord
if (length(tcoord1) == 1) {
tcoord1 <- rep(tcoord1, 2)
}
mypoly <- data.frame(xcoord, ycoord)
colnames(mypoly) <- c('x', 'y')
xcoord1 <- c(min(xcoord), max(xcoord))
ycoord1 <- c(min(ycoord), max(ycoord))
# call xtracto to get data
extract <- rxtracto_3D(dataInfo, parameter = parameter, xcoord = xcoord1,
ycoord = ycoord1, zcoord = zcoord, tcoord = tcoord1,
xName = xName, yName = yName, zName = zName,
verbose = verbose, cache_remove = cache_remove)
# extract <- xtracto_3D(xcoord1,ycoord1,tpos1,dtype, verbose)
if (!is.list(extract)) {
print('error in call to rxtracto_3D')
print('see messages above')
return("rxtracto_3D error")
}
if (length(dim(extract[[1]])) == 2) {
extract[[1]] <- array(extract[[1]], c(dim(extract[[1]]), 1))
}
if (length(dim(extract[[1]])) == 4) {
extract[[1]] <- abind::adrop(extract[[1]], drop = 3)
}
# make sure polygon is closed; if not, close it.
if ((mypoly[length(mypoly[, 1]), 1] != mypoly[1, 1]) |
(mypoly[length(mypoly[, 2]), 2] != mypoly[1, 2])) {
mypoly <- rbind(mypoly, c(mypoly[1, 1], mypoly[1, 2]))
}
#Parse grid lats and longs
x.vals <- matrix(rep(extract$longitude, length(extract$latitude)),
ncol = length(extract$latitude))
y.vals <- sort(rep(extract$latitude, length(extract$longitude)))
y.vals <- matrix(y.vals, nrow = length(extract$latitude),
ncol = length(extract$longitude))
# deal with polygon crossing 180
ew.sign <- sign(mypoly$x)
if (length(unique(ew.sign)) > 1) {
mypoly$x[mypoly$x < 0] <- mypoly$x[mypoly$x < 0] + 360
x.vals[x.vals < 0] <- x.vals[x.vals < 0] + 360
print("Polygon data cross 180. Converted to E longitudes")
}
# create new array masked by polygon
in.poly <- matrix(sp::point.in.polygon(x.vals, y.vals, mypoly$x, mypoly$y),
ncol = length(extract$longitude))
in.poly[in.poly > 1] <- 1
in.poly[in.poly == 0] <- NA
dim(in.poly) <- dim(extract[[1]][, , 1])
extract.in.poly <- apply(extract[[1]], 3, "*", in.poly)
dim(extract.in.poly) <- dim(extract[[1]])
extract[[1]] <- extract.in.poly
return(extract)
}
|
974a2cb54ee259a838b54d240d323344d9070c54
|
2be4b043e6cfbfa4cf3869e22a22a127669755f7
|
/benchmark/STARmap_AllenVISp/Seurat/impute_starmap.R
|
dfb1c86f2c7ec3b7dbe459005b08485b2f5fdf0f
|
[
"MIT"
] |
permissive
|
c4chow/SpaGE
|
ffa3a4d7e358796426aa218d67f3b8d4514f38a6
|
bda1036660ab01f8bf44993e52392a37145794a3
|
refs/heads/master
| 2023-02-27T08:35:43.333816
| 2021-02-06T23:44:13
| 2021-02-06T23:44:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,300
|
r
|
impute_starmap.R
|
setwd("STARmap_AllenVISp/")
library(Seurat)
starmap <- readRDS("data/seurat_objects/20180505_BY3_1kgenes.rds")
allen <- readRDS("data/seurat_objects/allen_brain.rds")
# remove HPC from starmap
class_labels <- read.table(
file = "data/Starmap/visual_1020/20180505_BY3_1kgenes/class_labels.csv",
sep = ",",
header = TRUE,
stringsAsFactors = FALSE
)
class_labels$cellname <- paste0('starmap', rownames(class_labels))
class_labels$ClusterName <- ifelse(is.na(class_labels$ClusterName), 'Other', class_labels$ClusterName)
hpc <- class_labels[class_labels$ClusterName == 'HPC', ]$cellname
accept.cells <- setdiff(colnames(starmap), hpc)
starmap <- starmap[, accept.cells]
starmap@misc$spatial <- starmap@misc$spatial[starmap@misc$spatial$cell %in% accept.cells, ]
#Project on allen labels
i2 <- FindTransferAnchors(
reference = allen,
query = starmap,
features = rownames(starmap),
reduction = 'cca',
reference.assay = 'RNA',
query.assay = 'RNA'
)
refdata <- GetAssayData(
object = allen,
assay = 'RNA',
slot = 'data'
)
imputation <- TransferData(
anchorset = i2,
refdata = refdata,
weight.reduction = 'pca'
)
starmap[['ss2']] <- imputation
saveRDS(starmap, 'data/seurat_objects/20180505_BY3_1kgenes_imputed.rds')
|
397de5ee2c3450ccc7ca3dbf3e9c99527b0f3077
|
aa0e6b5c88fb5351d207aebf04dc96d6870f73d4
|
/functions/func_avalanche.R
|
1a64ab02400b1c2c6423f94fa9ad46a208eaacfd
|
[] |
no_license
|
pohleric/mass_balance_model
|
119d96164389002a8689fb4feef3bedc1b791d1d
|
de1e641bb7639542d36f0a8d262b4833ae24507d
|
refs/heads/main
| 2023-08-05T05:56:30.270151
| 2021-09-22T13:44:06
| 2021-09-22T13:44:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,027
|
r
|
func_avalanche.R
|
###################################################################################################
# Author: Enrico Mattea (@unifr.ch) #
# Description: this program models the distributed mass balance of a glacier at daily #
# resolution, optimizing model parameters towards the best fit with point #
# mass balance measurements. #
# This file implements the mass transport algorithm by Gruber (2007), #
# to compute avalanche redistribution of snow. #
# Input: raster grids of elevation, slope, aspect and initial mass. #
# NOTE: we compute here a (heavily parametrized/simplified) *movable* mass #
# which for each cell is a fraction of the initial mass. #
# NOTE-2: the 4 directions from a cell are indexed as 1 = up, 2 = left, #
# 3 = right, 4 = bottom. We use lists of rasters for the processing, #
# list[[1]] is the raster related to the up direction. #
###################################################################################################
# The deposition_max_multiplier can be used to enable consistent modelling on different input grids
# (e.g., the normalized snow distribution grid, which has cell values close to 1; an actual snow cover grid,
# with cell values of maybe 1000 (kg m-2); and a seasonal sum (if we choose to have a single avalanche over
# a whole year), with cell values of maybe 5000 (kg m-2)).
# This addresses the discussion of Section 4.4 in Gruber (2007).
# The preserve_edges switch makes the function put back the mass_initial value
# at the edges, so that the output of func_avalanche has no NAs.
func_avalanche <- function(run_params, grids_avalanche_cur, mass_initial_values, deposition_max_multiplier = 1.0, preserve_edges = TRUE) {
deposition <- setValues(grids_avalanche_cur$elevation_proc, 0.0)
mass_movable <- mass_initial_values * grids_avalanche_cur$movable_frac
mass_fixed <- mass_initial_values - mass_movable # Mass which stays in place no matter what.
# The snow transport loop is implemented in C++
# for performance (about 5000 times faster than pure R).
# An R version is in file "func_avalanche_gruber.R",
# to use it set run_params$avalanche_routine_cpp to FALSE.
transport_deposit_mass_chosen <- ifelse(run_params$avalanche_routine_cpp == TRUE, transport_deposit_mass, transport_deposit_mass_R)
deposition <- setValues(deposition, transport_deposit_mass_chosen(grids_avalanche_cur$elevation_sorted_ids,
run_params$grid_ncol,
getValues(deposition),
getValues(mass_movable),
getValues(grids_avalanche_cur$deposition_max) * deposition_max_multiplier,
getValues(grids_avalanche_cur$draining_fraction[[1]]),
getValues(grids_avalanche_cur$draining_fraction[[2]]),
getValues(grids_avalanche_cur$draining_fraction[[3]]),
getValues(grids_avalanche_cur$draining_fraction[[4]])))
mass_final_values <- getValues(mass_fixed + deposition)
if (preserve_edges) {
ids_na_logi <- is.na(mass_final_values)
mass_final_values[ids_na_logi] <- mass_initial_values[ids_na_logi]
}
return(mass_final_values)
}
|
51c262b6cdee78326d5394996ba3e9df4705470b
|
0308926c21defd15a8fc0cd1c6f7c1ce491f0c05
|
/R/dynamicUIStock.R
|
0e00edba89aacd522fd9bc4117778a1cf0d73bb2
|
[] |
no_license
|
Jiramew/stockCn
|
ffd1e84f49a20c181af5c2b55e013c2fa311e848
|
527e31ed587507924250b3258bd9c3877e2b3bc1
|
refs/heads/master
| 2021-01-01T03:49:40.062487
| 2016-05-18T13:16:36
| 2016-05-18T13:16:36
| 59,115,483
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 413
|
r
|
dynamicUIStock.R
|
#' Refreshing stock data.
#'
#' @param symbol input a string in the double quotes showing the stock symbol
#' @keywords dynamicUIStock
#' @export
dynamicUIStock <- function(symbol) {
par(bty = "n", xaxt = "n", yaxt = "n", mar = c(3, 0.1, 3, 0.1))
while (1) {
stockData <- getData(symbol)
setBasicUI()
setData(stockData)
flush.console()
Sys.sleep(5)
}
}
|
6ed434ae6c3ca1a4f708ffc6915f7565a7802de0
|
9a521fbc31a81d52504520c8f8a9f173fb76e5fb
|
/ESAME/Scripts/gare/query_gara_per_gara.R
|
7f37124ec4adbc8d0ea1e1e89ddf36667a0440db
|
[] |
no_license
|
Vegaz10/Esame-scienza-di-dati-
|
52292adb2151cfb0b9d23f330716430f36a16492
|
b54dd51254867638d801de372f1214d4477c11b6
|
refs/heads/main
| 2023-07-11T17:19:53.506088
| 2021-08-28T14:34:20
| 2021-08-28T14:34:20
| 400,813,131
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,661
|
r
|
query_gara_per_gara.R
|
australia<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 1) %>%
select(driverId,driverRef,position,points)
bahrain<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 2) %>%
select(driverId,driverRef,position,points)
shanghai<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 3) %>%
select(driverId,driverRef,position,points)
baku<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 4) %>%
select(driverId,driverRef,position,points)
montmelo<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 5) %>%
select(driverId,driverRef,position,points)
monaco<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 6) %>%
select(driverId,driverRef,position,points)
canada<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 7) %>%
select(driverId,driverRef,position,points)
francia<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 8) %>%
select(driverId,driverRef,position,points)
redbullring<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 9) %>%
select(driverId,driverRef,position,points)
silverstone<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 10) %>%
select(driverId,driverRef,position,points)
hockenheimring<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 11) %>%
select(driverId,driverRef,position,points)
hungaroring<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 12) %>%
select(driverId,driverRef,position,points)
spa<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 13) %>%
select(driverId,driverRef,position,points)
monza<- risultati %>%
inner_join(drivers,by="driverId") %>%
inner_join(races,by="raceId") %>%
filter(year == 2018, round == 14) %>%
select(driverId,driverRef,position,points)
|
d913656e005a1da561a4f83b0641f9008fea4dee
|
0c2aa81635e454ed18a94e8f052214d07eeb6738
|
/R/unpackUserVariable.R
|
b8e3961c0c9946ece80114f4b7f2d86da52f527e
|
[] |
no_license
|
Dectech/DectechR
|
66872e50e1ceb0c86693a266facc44a2eaf82501
|
5bd6e773f0ae0e0952ab2ec74719d9afdc27f23a
|
refs/heads/master
| 2023-06-02T18:08:25.964801
| 2021-10-13T11:13:49
| 2021-10-13T13:10:02
| 92,304,517
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,615
|
r
|
unpackUserVariable.R
|
unpackUserVariable <- function(rawText, maxCols = NULL, emptyReplacement = -99, numeric = FALSE, delimiter = "*"){
#######################################################
### Function to extract data from user defined ###
### ...variables from questback custom questions ###
#######################################################
# Make sure that input object is 1 dimentional...
if (is.null(dim(rawText)) == FALSE) {
if (min(dim(rawText)) > 1) {
stop("object has more than one column. Please enter a single column of data.")
}
}
# For each item...
thisMatrix <- t(sapply(rawText, FUN = function(x) {
# split the elements...
y <- strsplit(x, delimiter, fixed = TRUE)[[1]]
# if a max number of columns defined, then fill the rest with emptyReplacement...
if (is.null(maxCols) == FALSE){
excess = maxCols - length(y)
y <- c(y, matrix(emptyReplacement, nrow = 1, ncol = excess))
}
# if specified, convert to numeric
if (numeric == TRUE){
y <- as.numeric(y)
}
return(y)
}))
# clean the row names...
rownames(thisMatrix) <- 1:nrow(thisMatrix)
# check that all rows have same number of items
rowLengths <- sapply(thisMatrix, length)
if (length(unique(rowLengths)) > 1) {
# ...if not give a warning about how to fix it...
warning(paste0("Not all rows have same number of items. Longest row has ", max(rowLengths) ," items. Try setting maxCols = ", max(rowLengths)))
}
return(thisMatrix)
}
|
b174f8b4d50ac423835e63b7c848360816961368
|
f7f6aba4f80acb1d0938ce894bdb694c8d0a296d
|
/plot1.R
|
77bc4f824baa622aa441910742617af40549b29b
|
[] |
no_license
|
andrej-mihalik/ExData_Plotting1
|
47f29496f9ef4f009bb64429bac683db2da6abcc
|
7ad75fad6fcbe1cb66e73114396710b8090e696b
|
refs/heads/master
| 2021-01-09T09:34:59.812150
| 2014-12-06T14:06:12
| 2014-12-06T14:06:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 520
|
r
|
plot1.R
|
consumption <- read.csv('household_power_consumption.txt',sep=";",header=TRUE,quote="",na.strings="?")
consumption.subset <- consumption[consumption$Date == '1/2/2007' | consumption$Date == '2/2/2007',]
consumption.subset$datetime<-strptime(paste(consumption.subset$Date,consumption.subset$Time),'%d/%m/%Y %H:%M:%S')
png('plot1.png',width=480,height=480)
par(mfrow=c(1,1),mar=c(4,4,4,4))
hist(consumption.subset$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
d366fa8b875112b4f4c43ca6761d5c6cb8ef0df6
|
1b494e164bf619370655eea371727371f5aaff2f
|
/man/encode_string.Rd
|
1a06bc707d3999e07917697531be4956595926b8
|
[] |
no_license
|
gmbecker/gRAN
|
d4d1886a7b490de5f9b3bf53f2b2d7976ce6a177
|
d243dc799759d491a3d0fe8eb35295e4848132e3
|
refs/heads/master
| 2023-06-29T19:23:31.405858
| 2023-06-13T15:40:52
| 2023-06-13T15:40:52
| 20,597,782
| 17
| 13
| null | 2023-06-13T15:40:54
| 2014-06-07T16:52:25
|
R
|
UTF-8
|
R
| false
| true
| 411
|
rd
|
encode_string.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utilities.R
\name{encode_string}
\alias{encode_string}
\title{Convert string to numeric representation}
\usage{
encode_string(x)
}
\arguments{
\item{x}{String}
}
\value{
Numeric representation of string
}
\description{
Convert string to numeric representation
}
\note{
This function is not intended for direct use by the end user.
}
|
a3b9f31ec44fb9ffadc710be5b6c1ecaae2f8330
|
53ca633e4eb38a591ed13fb3962c9ffc63791085
|
/R/predictMarkov.R
|
ea7a6df6e8bd66ef1f199f7ea38c1dad8f6aa483
|
[] |
no_license
|
Phil1337/predictMarkov
|
8875ca30a49b9c896983b3e22e8e3fc30f80b67c
|
43e5f7689238f6677c247c7182ea3e3b014c9cb7
|
refs/heads/master
| 2021-09-05T12:03:13.979683
| 2018-01-27T08:18:30
| 2018-01-27T08:18:30
| 112,086,339
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,890
|
r
|
predictMarkov.R
|
#' Take some pageviews, output Markov model prediction
#'
#' @param pageview_names A character vector of pageview names
#'
#' @return The prediction
#' @import clickstream
#' @export
predictMarkov <- function(pageview_names) {
## filter pageview_names
# remove string after and including ?
pageview_names <- sub("\\?.*","", pageview_names)
#remove Language-Prefixes ("/de-CH", "fr-CH", "en-US")
pageview_names <- sub(".*fr-CH/","", pageview_names)
pageview_names <- sub(".*de-CH/","", pageview_names)
pageview_names <- sub(".*de-ch/","", pageview_names)
pageview_names <- sub(".*en-US/","", pageview_names)
# summarise start-page ("/" = "/fr-CH.aspx" "/de-CH.aspx" = "en-US.aspx")
pageview_names <- sub("/fr-CH.aspx","/", pageview_names)
pageview_names <- sub("/de-CH.aspx","/", pageview_names)
pageview_names <- sub("/en-US.aspx","/", pageview_names)
pageview_names[grepl("Ticket-Shop.aspx", pageview_names) == TRUE] <- "/Ticket-Shop.aspx"
## end filter
## mc loaded on package load
states <- invisible(clickstream::states(model))
# check if pageview_name exists in model-states
pv_n <- pageview_names[pageview_names %in% states]
startPattern <- new("Pattern", sequence = pv_n)
predit <- predict(model, startPattern, dist = 1)
list(page = predit@sequence,
probability = predit@probability)
}
#' Predict next page model 2
#'
#' @param current_url the url to predict from
#' @export
#' @import markovchain
predictNextPage <- function(current_url){
current_url <- current_url[!grepl("undefined", current_url)]
message("Predicting next page for ", current_url)
markovList <- mcfL$estimate
out <- try(predict(markovList, newdata = current_url), silent = TRUE)
if(inherits(out, "try-error")){
## try just with last page
ll <- length(current_url)
retry_urls <- current_url[ll]
out <- try(predict(markovList, newdata = retry_urls), silent = TRUE)
if(inherits(out, "try-error")){
message("No prediction available")
return(NULL)
}
}
out
}
#' Replace a string with substitutions
#'
#' @param string_vector vector of (URL) strings
#' @param findme Regex or string to find to replace
#' @param replace What to replace with. If NULL, uses findme string
#' @param fixed IF FALSE, findme is regex, if not a fixed match
#'
#' @return string_vector with replacements if required
#' @export
cleanURL <- function(string_vector, findme, replace=NULL, fixed=TRUE){
if(is.null(replace)) {
replace <- findme
} else {
replace <- replace
}
if(fixed) findme <- stringr::fixed(findme)
string_vector[stringr::str_detect(string_vector, findme)] <- replace
string_vector
}
#' Specific client aggregation
#'
#' @param string_vector URLs to clean
#'
#' @return The string_vector with substitutions made
#' @export
aggregateVD <- function(string_vector){
string_vector <- as.character(string_vector)
# string_vector[str_detect(string_vector[,pagePath_name], "[0-9]$"),"aggregation"] <- "holiday_listing"
string_vector <- cleanURL(string_vector, "[0-9]$", replace = "holiday_listing", fixed=FALSE)
string_vector <- cleanURL(string_vector, "search/result", replace = "search_result")
string_vector <- cleanURL(string_vector, "?", "site_search")
string_vector <- cleanURL(string_vector, "blog")
string_vector <- cleanURL(string_vector, "employees-list", replace = "employees")
string_vector <- cleanURL(string_vector, "booking")
string_vector <- cleanURL(string_vector, "geography")
string_vector <- cleanURL(string_vector, "interest")
string_vector <- cleanURL(string_vector, "dragoer")
string_vector <- cleanURL(string_vector, "aktiviteter")
string_vector <- cleanURL(string_vector, "product")
string_vector <- cleanURL(string_vector, "feriecentre")
string_vector <- cleanURL(string_vector, "gastronomi")
string_vector <- cleanURL(string_vector, "aktiviteter")
string_vector <- cleanURL(string_vector, "natur")
string_vector <- cleanURL(string_vector, "fur")
string_vector <- cleanURL(string_vector, "begivenheder")
string_vector <- cleanURL(string_vector, "season")
string_vector <- cleanURL(string_vector, "sydjylland")
string_vector <- cleanURL(string_vector, "vestjylland")
string_vector <- cleanURL(string_vector, "sydsjaelland")
string_vector <- cleanURL(string_vector, "publikationer")
string_vector <- cleanURL(string_vector, "oestjylland")
string_vector <- cleanURL(string_vector, "nordjylland")
string_vector <- cleanURL(string_vector, "koebenhavn")
string_vector <- cleanURL(string_vector, "tyskland")
string_vector <- cleanURL(string_vector, "sverige")
string_vector <- cleanURL(string_vector, "sport")
string_vector <- cleanURL(string_vector, "norge")
string_vector <- cleanURL(string_vector, "historie")
string_vector <- cleanURL(string_vector, "england")
string_vector
}
|
e219de2e7e2e44c8f031222f72b5ed3e77832113
|
030429449293c7e715f62508e469d839e6460937
|
/ui.R
|
0bb3e1004a0d835cf22778a8f4e950a305aeee79
|
[] |
no_license
|
la11soccer/RShinyPlasmidAnnotations
|
ba497623a9edfab616a198636520a59ade55edbd
|
adc38656e7752c089dd2ccd389796939baea8e94
|
refs/heads/main
| 2023-07-15T15:45:56.679993
| 2021-08-23T02:50:48
| 2021-08-23T02:50:48
| 307,770,292
| 1
| 0
| null | 2021-05-10T17:21:53
| 2020-10-27T16:59:15
|
R
|
UTF-8
|
R
| false
| false
| 1,161
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
#downloadUI = FALSE,
# Application title
titlePanel("Plasmid Gene Annotation"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
#function for file ipload
#checkboxInput("text1", "file1"),
#conditionalPanel(
#condition = "input.file1 == true",
fileInput("file", label = "Choose file", accept = c(".fasta", ".fsa")),
#),
textInput("outputFile", label = "Enter folder you would like annotations to go to", value = Sys.Date(), placeholder = "prokkaOutput"),
actionButton("button","Annotate"),
downloadButton("downloadData", "Download")
#)
),
# Show a plot of the generated distribution
mainPanel(
textOutput("percentage")
#checkboxInput("text1", "file1")
)
)
))
|
871509ee859c6a102791da937d6cdeac09224692
|
5ced19b14ae8492943461060264ed99808cc57b9
|
/tests/testthat/test-docxtractr.R
|
f02caadbc2ea45bf5f7994f23bfeb50e70dd8de9
|
[] |
no_license
|
markdly/docxtractr
|
b59ee9303602770579b4e372bd796cd282de769d
|
d632e81dc83e82d1814dc769a15cd4cb4ce2b529
|
refs/heads/master
| 2021-08-24T03:35:56.807285
| 2017-12-06T19:51:20
| 2017-12-06T19:51:20
| 112,971,996
| 0
| 0
| null | 2017-12-03T23:14:29
| 2017-12-03T23:14:29
| null |
UTF-8
|
R
| false
| false
| 750
|
r
|
test-docxtractr.R
|
context("docx extraction works")
test_that("we can do something", {
doc <- read_docx(system.file("examples/data.docx", package="docxtractr"))
expect_that(doc, is_a("docx"))
expect_that(docx_tbl_count(doc), equals(1))
expect_that(docx_extract_tbl(doc, 1), is_a("tbl"))
complx <- read_docx(system.file("examples/complex.docx", package="docxtractr"))
expect_that(docx_tbl_count(complx), equals(5))
tmp_3 <- docx_extract_tbl(complx, 3)
tmp_4 <- docx_extract_tbl(complx, 4)
tmp_5 <- docx_extract_tbl(complx, 5)
expect_that(tmp_3, is_a("tbl"))
expect_that(tmp_4, is_a("tbl"))
expect_that(tmp_5, is_a("tbl"))
expect_that(nrow(tmp_3), equals(6))
expect_that(ncol(tmp_4), equals(3))
expect_that(nrow(tmp_5), equals(6))
})
|
5cb9b1067026438516b6263a6f1fdd34e358542c
|
9bd25546a073b8fe0fb2e8070990f6d554c95d05
|
/neural-networks.R
|
1b2ef484f0ba2b02c89881f49f31eb0b740afed1
|
[] |
no_license
|
AdithyaSan/optic-recognition
|
63a6334658f462a0cb5173392e754f1d98a12801
|
3e229a898d450f1a4b27c335adb4bf9a80fbf159
|
refs/heads/master
| 2021-01-20T15:58:21.061990
| 2017-05-10T17:41:42
| 2017-05-10T17:41:42
| 90,805,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,574
|
r
|
neural-networks.R
|
#Loading the required libraries
library(dplyr)
library(neuralnet)
library(nnet)
#Reading the files train and test data
digitsdata <- read.csv("Train data location", header=FALSE) %>% as.data.frame()
testdata <- read.csv("Test data location", header=FALSE) %>% as.data.frame()
#View the data imported
View(digitsdata)
#Vectorize the output label into 10 different label vectors
train <- cbind(digitsdata[, 1:64], class.ind(as.factor(digitsdata$V65)))
names(train) <- c(names(digitsdata)[1:64],"l0","l1","l2","l3","l4","l5","l6","l7","l8","l9")
test <- cbind(testdata[, 1:64], class.ind(as.factor(testdata$V65)))
names(test) <- c(names(testdata)[1:64],"l0","l1","l2","l3","l4","l5","l6","l7","l8","l9")
#View the processed train and test data
View(train)
View(test)
#Construct the formula for the nueralnet
n <- names(train)
formula <- as.formula(paste("l0+ l1 + l2 + l3 + l4 + l5 + l6 + l7 + l8 + l9 ~",
paste(n[!n %in% c("l0","l1","l2","l3","l4","l5","l6","l7","l8","l9")], collapse = " + ")))
formula
#MODEL 1 ------------------------------------ 3 HIDDEN LAYERS --------------------------
#TRAIN MODEL 1
nn1 <- neuralnet(formula,
data = train,
hidden = c(32, 16, 8),
act.fct = "logistic",
linear.output = FALSE,
lifesign = "minimal")
plot(nn1)
predict1 <- compute(nn1, test[,1:64])
predicted1result <- predict1$net.result
#Accuracy of the model - Model 1
original_values <- max.col(test[, 65:74])
predicted1extracted <- max.col(predicted1result)
mean(predicted1extracted == original_values)
#MODEL 2 ------------------------------------ 2 HIDDEN LAYERS --------------------------
#TRAIN MODEL 2
nn2 <- neuralnet(formula,
data = train,
hidden = c(32, 16),
act.fct = "logistic",
linear.output = FALSE,
lifesign = "minimal")
predict2 <- compute(nn2, test[,1:64])
predicted2result <- predict2$net.result
original_values <- max.col(test[, 65:74])
predicted2extracted <- max.col(predicted2result)
mean(predicted2extracted == original_values)
#MODEL 3 ------------------------------------ 1 HIDDEN LAYER --------------------------
#TRAIN MODEL 3
nn3 <- neuralnet(formula,
data = train,
hidden = c(32),
act.fct = "logistic",
linear.output = FALSE,
lifesign = "minimal")
predict3 <- compute(nn3, test[,1:64])
predicted3result <- predict3$net.result
original_values <- max.col(test[, 65:74])
predicted3extracted <- max.col(predicted3result)
mean(predicted3extracted == original_values)
#VERIFICATION FOR MODEL 2
# Seed for reproducibility purposes
set.seed(500)
# t = 10 for 10 times validation
t <- 10
outs <- NULL
# Train-test split proportions
proportion <- 0.995 # Set to 0.995 for LOOCV
# Crossvalidate, go!
for(i in 1:t)
{
index <- sample(1:nrow(train), round(proportion*nrow(train)))
train_v <- train[index, ]
test_v <- train[-index, ]
nn_v <- neuralnet(formula,
data = train_v,
hidden = c(32, 16),
act.fct = "logistic",
linear.output = FALSE)
# Compute predictions
predictednn <- compute(nn_v, test_v[, 1:64])
# Extracting results
predictedResults <- predictednn$net.result
# Accuracy
original_values <- max.col(test_v[, 65:74])
predictedResultExtracted <- max.col(predictedResults)
outs[i] <- mean(predictedResultExtracted == original_values)
}
mean(outs)
|
32376bf8c6a7af8d41549b6242d9b5720263c56a
|
54eb8c2e10ffa500a111c791120fc81aa4bed2db
|
/R/app_global.R
|
107d18041a421e7f1286696dc6dfde10063d985b
|
[
"MIT"
] |
permissive
|
nyujwc331/golemShinyApp
|
f6ac69a435ed57597bff01692bc1a58fdc14c177
|
e14816cfa8181c5618b2c62794811bbc3ee0fda5
|
refs/heads/master
| 2020-12-27T21:20:32.861223
| 2020-02-05T00:57:35
| 2020-02-05T00:57:35
| 238,061,390
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
app_global.R
|
plotUI <- function(id, label = "Select a Species to filter the plot") {
ns <- NS(id)
tagList(
selectInput(ns("dropDown"), label = label,
choices = c(iris %>% distinct (Species)),
selected = NULL),
plotOutput(ns('plotz'))
)
}
plotz <- function(input, output, session){
df <- reactive({
iris %>% filter (Species == input$dropDown)
})
output$plotz <-
renderPlot({
ggplot(df(), aes(x=Sepal.Length, y= Sepal.Width)) +
geom_point()
})
}
|
ff76207944b5bc477d9c3ac857934516bac6b4a0
|
31d82e5179c51b906069a79dc8e89f6a35925d2b
|
/Codes/Twitter Analysis.R
|
7e4508522fa5c2c32e9569c0ebd97258dc776365
|
[] |
no_license
|
DSO545projectR1/DSO_545_MyLA311
|
0156f085c5f9760932cb2646d1c2aec9caf0be51
|
2bd9e7ab39a58a08eb007153ce823512f5912d08
|
refs/heads/master
| 2020-06-12T15:11:24.407302
| 2016-12-07T22:58:12
| 2016-12-07T22:58:12
| 75,809,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,389
|
r
|
Twitter Analysis.R
|
library(twitteR)
install.packages("twitteR")
install.packages("tm")
library(tm)
install.packages("syuzhet")
library(syuzhet)
library(ggplot2)
library(dplyr)
library(lubridate)
### Authenticate our app
# Get these details from application you created at app.twitter.com
api_key = "sie16JKQ2W4cee5NtZg31dDI9"
api_secret = "mebdb9GUQGosu0yEiImZMD13L9kM1nKpAuCdts3YW8JXgEU3as"
access_token = "798241688791576576-BJC96c0gNHAm9TI5BBmhzkKHcE04ZaQ"
access_token_secret = "jDqv3CrJpj8F79DbiZSXv4TrGFX9mP9uAW0XWinlTyAhV"
## Setting up connection
setup_twitter_oauth(api_key,api_secret,access_token, access_token_secret)
LA311Tweet = searchTwitter("MyLA311",n = 100, since = '2010-01-01')
myLA=userTimeline('myLA311',n=3600)
myLA311.df=twListToDF(LA311Tweet)
tweets = myLA311.df
### \w represents a word character
### \w+ represents one character or more
### we need the escape \ in order to include \w in the pattern
nohandles = str_replace_all(tweets$text, pattern = "@\\w+",
replacement = "")
# deleting the usernames and saving tweets in nohandles
# nohandles will be a vector
wordCorpus <- Corpus(VectorSource(nohandles)) %>%
tm_map(removePunctuation) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(stripWhitespace)
install.packages("wordcloud")
library(wordcloud)
## get the colors
pal = brewer.pal(9, "YlGnBu")
?brewer.pal
display.brewer.pal(9, "YlGnBu")
### Drop the first 4 colors from the palett (not easy to see)
pal = pal[-c(1:4)]
wordcloud(words = wordCorpus, colors = pal)
## Sentiment Analysis
analytics_sentiment = get_nrc_sentiment(tweets$text)
tweets = cbind(tweets, analytics_sentiment)
sentimentTotals = data.frame(colSums(analytics_sentiment))
## clean the dataframe
sentimentTotals$Sentiments = rownames(sentimentTotals)
colnames(sentimentTotals) = c("Count", "Sentiment")
rownames(sentimentTotals) = NULL
ggplot(sentimentTotals, aes(reorder(Sentiment,Count),Count)) +
geom_bar(stat = "identity", aes(fill = Sentiment)) +
theme(legend.position = "none") +
xlab("Sentiment") +
ylab("Total Count")
### Tweets over time
tweets$created = ymd_hms(tweets$created)
ggplot(tweets, aes(x = month(created,label = TRUE))) + geom_bar()
ggplot(tweets, aes(x = wday(created,label = TRUE))) + geom_bar()
ggplot(tweets, aes(x = factor(hour(created)))) + geom_bar()
|
6bb815f7ce613aaf6d9de1a6770141f04c1128a6
|
8c1620266dd6153048f5dec3079fc71836eea681
|
/00_project_settings.R
|
b1539a3a912f636ee683a68042eceaba62714543
|
[] |
no_license
|
NutriNet/NN_Data_Review_2019
|
cd2bac68cda4219527b3f58e3b287632a7f90870
|
a688afdf0f466367e5dc4ddd7a42414d972878a9
|
refs/heads/master
| 2020-08-29T17:05:18.268834
| 2019-10-28T17:17:02
| 2019-10-28T17:17:02
| 218,104,673
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,073
|
r
|
00_project_settings.R
|
# Load the main packages
library(readxl)
library(tidyverse)
library(lubridate)
library(ggforce)
library(googlesheets)
library(cowplot)
# download from the google drive and save in the site folder
# this function checks if you have an updated version of Google Sheet before downloading one
DownloadGoogleSheet <-
function(GOOGLESHEET, TYPE = 'Data') {
if (TYPE %in% c('Data', 'Metadata')) {
if (TYPE == "Data") {
pass <- 'Original_Data/key_data.txt'
subfolder <- word(GOOGLESHEET)
folder <- paste0('Original_Data/', subfolder, '/')
} else {
pass <- 'Original_Data/Metadata/key_metadata.txt'
folder <- 'Original_Data/Metadata/'
}
suppressMessages(read_csv(pass)) -> metadata
metadata %>%
filter(sheet_title == GOOGLESHEET) -> sheet_metadata
if (nrow(sheet_metadata) == 1) {
sheet_key <- gs_key(sheet_metadata$sheet_key)
if (sheet_key$updated == sheet_metadata$updated) {
print('You have the latest version')
} else {
# downloads file and assigns download date
sheet_key %>%
gs_download(
to = paste0(folder, GOOGLESHEET, '_', Sys.Date(), '.xlsx'),
overwrite = TRUE
)
# updates file that stores info about version dates
new_update <- sheet_key$updated
metadata %>%
mutate(
updated = ifelse(sheet_title == GOOGLESHEET, new_update, updated),
updated = as_datetime(updated)
) %>%
write_csv(pass)
}
} else {
print('No matching Google Sheet found')
}
} else {
print('Invalid entry for TYPE')
}
}
# Download Google Sheets to update data
UpdateData <- function(DATA_TYPE) {
SITES <- c("DOUGLAS", "DUDLEY", "KELLEY", "MUDS2",
"NWRF", "SUBSURF", "WQFS", "ONT_4R", "TRO")
DATA_TYPES <- c("Crop", "Soil", "Water", "GHG", "Weather")
if (DATA_TYPE %in% DATA_TYPES) {
for (i in seq_along(SITES)) {
sheet <- paste0(DATA_TYPE, ' Data - ', SITES[i])
print(paste0('Looking for ', sheet, '...'))
DownloadGoogleSheet(GOOGLESHEET = sheet)
}
} else if (DATA_TYPE %in% c('All', 'ALL', 'all')) {
for (j in DATA_TYPES) {
for (i in seq_along(SITES)) {
sheet <- paste0(j, ' Data - ', SITES[i])
print(paste0('Looking for ', sheet, '...'))
DownloadGoogleSheet(GOOGLESHEET = sheet)
}
}
print('good boy')
} else {
print('Please enter one of the options: Crop, Soil, Water, GHG, Weather, All')
}
}
# Download Google Sheets with Metadata of interest for the FIRST time
GetMetadata <-
function(OVERWRITE = FALSE) {
# get keys and names of Google Sheets of interest
gs_ls("^[1-3]. ") %>%
filter(
sheet_title %in% c(
"1. Site Information",
"2. One-Time Plot Data",
"3. Yearly Treatment Data"
)
) -> sheet_info
keys <- sheet_info$sheet_key
titles <- sheet_info$sheet_title
for (i in seq_along(keys)) {
pass <- paste0('Original_Data/Metadata/', titles[i], '.xlsx')
# check if files exist and overwrite is allowed
if (OVERWRITE == FALSE && file.exists(pass)) {
text <- paste(titles[i], '- file exists and overwrite is FALSE')
print(text)
} else {
text <- paste(titles[i], '- file is downloding ...')
print(text)
gs_key(keys[i]) %>%
gs_download(
to = pass,
overwrite = OVERWRITE
)
}
}
}
# Download Google Sheets with Research Data of interest for the FIRST time
GetResearchData <-
function(OVERWRITE = FALSE) {
# Creat list of site IDs
read_excel('Original_Data/Metadata/1. Site Information.xlsx',
range = cell_cols('A')) %>%
filter(!is.na(SiteID)) %>%
pull() -> SITES
# get keys and names of Google Sheets of interest
gs_ls(" Data - ") %>%
filter(str_detect(sheet_title, paste(SITES, collapse = '|'))) -> sheet_info
keys <- sheet_info$sheet_key
titles <- sheet_info$sheet_title
for (i in seq_along(keys)) {
subfolder <- word(titles[i])
pass <- paste0('Original_Data/', subfolder, '/', titles[i], '_', Sys.Date(), '.xlsx')
# check if files exist and overwrite is allowed
if (OVERWRITE == FALSE && file.exists(pass)) {
text <- paste(titles[i], '- file exists and overwrite is FALSE')
print(text)
} else {
text <- paste(titles[i], '- file is downloding ...')
print(text)
gs_key(keys[i]) %>%
gs_download(
to = pass,
overwrite = OVERWRITE
)
}
}
}
# read local copy of Google Sheets
ReadExcelSheets <-
function(PATH, GUESS = 10000){
sheets <- excel_sheets(PATH)
dl <- vector('list', length = length(sheets))
for (i in seq_along(sheets)){
column_names <- read_excel(path = PATH, sheet = i, n_max = 2) %>%
names()
dl[[i]] <- read_excel(path = PATH,
sheet = i,
col_names = column_names,
cell_limits(c(3, 1), c(NA, length(column_names))),
guess_max = GUESS,
na = c('n/a', 'NA', 'did not collect')) %>%
mutate(sheet = sheets[i])
}
return(dl)
}
# reads data from the last version of files in a folder
ReadLatestData <-
function(DATA_TYPE) {
if (DATA_TYPE %in% c("Crop", "Soil", "Water", "GHG", "Weather")) {
folder <- paste0('Original_Data/', DATA_TYPE, '/')
# get list of all files in the folder
files_all <- dir(folder)
# get list of the latest versions of files
files_all %>%
tibble('file_name' = .) %>%
separate(file_name,
into = c('type', 'file'),
sep = ' - ',
extra = 'merge',
remove = FALSE) %>%
mutate(file = str_remove(file, '.xlsx'),
date = str_sub(file, -10, -1) %>% ymd()) %>%
separate(file, into = 'siteid', sep = "_20", extra = 'drop') %>%
group_by(siteid) %>%
filter(date == max(date)) %>%
ungroup() %>%
pull(file_name) ->
files_latest
# aggregate all data into one list object
dl <- vector('list', length = length(files_latest))
for (i in seq_along(dl)) {
path <- paste0(folder, files_latest[i])
dl[i] <- ReadExcelSheets(PATH = path)
}
return(dl)
} else {
ANSWER <- readline('>>> Enter data type to read (Crop, Soil, Water, GHG, Weather): ')
if (ANSWER %in% c("Crop", "Soil", "Water", "GHG", "Weather")) {
ReadLatestData(DATA_TYPE = ANSWER)
} else {
writeLines('You have entered incorrect DATA TYPE.\nTry again!')
}
}
}
# Combining Soil data
ReadSoilData <-
function(){
ReadLatestData('Soil') %>%
# make sure that SiteID is entered in all rows
map(~ .x %>% fill(SiteID)) %>%
# define type of Key variables & gather all non-key variables
map(~ .x %>%
mutate(SiteID = as.character(SiteID),
PlotID = as.character(PlotID),
SoilSampleDate = as.Date(SoilSampleDate),
Laboratory = as.character(Laboratory),
SoilDepth = as.character(SoilDepth),
Subsample = as.character(Subsample)) %>%
gather(key, value, -(SiteID:Subsample))) %>%
bind_rows() %>%
# added in 2019-04-24 to resolve duplicate problems when spreading data
filter(!is.na(SoilDepth)) %>%
spread(key, value) %>%
mutate(BulkDensity = as.numeric(BulkDensity),
PercentSand = as.numeric(PercentSand),
PercentSilt = as.numeric(PercentSilt),
PercentClay = as.numeric(PercentClay),
SoilTexture = as.character(SoilTexture),
SoilpH = as.numeric(SoilpH),
FallSOC = as.numeric(FallSOC),
FallSoilTN = as.numeric(FallSoilTN),
FallSoilNO3 = as.numeric(FallSoilNO3),
FallSoilNH4 = as.numeric(FallSoilNH4),
FallSOC_calc = as.numeric(FallSOC_calc),
FallSoilTN_calc = as.numeric(FallSoilTN_calc),
FallSoilNO3_calc = as.numeric(FallSoilNO3_calc),
FallSoilNH4_calc = as.numeric(FallSoilNH4_calc),
SpringSoilNO3 = as.numeric(SpringSoilNO3),
SpringSoilNH4 = as.numeric(SpringSoilNH4), # NEW
SoilCa = as.numeric(SoilCa),
SoilMg = as.numeric(SoilMg),
SoilP = as.numeric(SoilP),
SoilK = as.numeric(SoilK),
CommentsSoilSamples = as.character(CommentsSoilSamples)) %>%
select(SiteID:Subsample,
BulkDensity,
PercentSand,
PercentSilt,
PercentClay,
SoilTexture,
SoilpH,
FallSOC,
FallSoilTN,
FallSoilNO3,
FallSoilNH4,
FallSOC_calc,
FallSoilTN_calc,
FallSoilNO3_calc,
FallSoilNH4_calc,
SpringSoilNO3,
SpringSoilNH4, # NEW
SoilCa,
SoilMg,
SoilP,
SoilK,
everything()) %>%
# remove the ATEMP before outputting
filter(SiteID != 'ATEMP') -> df
return(df)
}
# Combining Crop data
ReadCropData <-
function() {
ReadLatestData('Crop') %>%
# make sure that SiteID is entered in all rows
map(~ .x %>% fill(SiteID)) %>%
# define type of Key variables & gather all non-key variables
map(~ .x %>%
mutate(SiteID = as.character(SiteID),
PlotID = as.character(PlotID),
Subsample = as.character(Subsample),
ExperimentYear = as.numeric(ExperimentYear),
Crop = as.character(Crop)) %>%
gather(key, value, -(SiteID:Crop))) %>%
bind_rows() %>%
spread(key, value) %>%
mutate(CoverCropBiomass = as.numeric(CoverCropBiomass),
CoverCropStubbleHeight = as.numeric(CoverCropStubbleHeight),
CoverCropBiomassRemoved = as.numeric(CoverCropBiomassRemoved),
CoverCropN = as.numeric(CoverCropN),
CoverCropP = as.numeric(CoverCropP),
CoverCropK = as.numeric(CoverCropK),
EmergenceDate = as.Date(as_datetime(as.numeric(EmergenceDate))),
CornLSNTBiomass = as.numeric(CornLSNTBiomass),
CornLSNT_N = as.numeric(CornLSNT_N),
SilkingDate = as.Date(as_datetime(as.numeric(SilkingDate))),
SilkingEarLeafN = as.numeric(SilkingEarLeafN),
SilkingEarLeafP = as.numeric(SilkingEarLeafP),
SilkingEarLeafK = as.numeric(SilkingEarLeafK),
SPAD = as.numeric(SPAD),
SoybeanR6Biomass = as.numeric(SoybeanR6Biomass),
SoybeanR6N = as.numeric(SoybeanR6N),
SoybeanR6P = as.numeric(SoybeanR6P),
SoybeanR6K = as.numeric(SoybeanR6K),
`CornR6Biomass_non-grain` = as.numeric(`CornR6Biomass_non-grain`),
`CornPlantN_non-grain` = as.numeric(`CornPlantN_non-grain`),
`CornPlantP_non-grain` = as.numeric(`CornPlantP_non-grain`),
`CornPlantK_non-grain` = as.numeric(`CornPlantK_non-grain`),
CornGrainYield_Subsample = as.numeric(CornGrainYield_Subsample),
StalkNitrate = as.numeric(StalkNitrate),
SeasonEndPopulation = as.numeric(SeasonEndPopulation),
CropYield = as.numeric(CropYield),
MoistureAtHarvest = as.numeric(MoistureAtHarvest),
CornGrainN_Subsample = as.numeric(CornGrainN_Subsample),
CornGrainP_Subsample = as.numeric(CornGrainP_Subsample),
CornGrainK_Subsample = as.numeric(CornGrainK_Subsample),
CornGrainN_Combine = as.numeric(CornGrainN_Combine),
CornGrainP_Combine = as.numeric(CornGrainP_Combine),
CornGrainK_Combine = as.numeric(CornGrainK_Combine),
# SoybeanR8Biomass = as.numeric(SoybeanR8Biomass),
SoybeanGrainN_Subsample = as.numeric(SoybeanGrainN_Subsample),
SoybeanGrainP_Subsample = as.numeric(SoybeanGrainP_Subsample),
SoybeanGrainK_Subsample = as.numeric(SoybeanGrainK_Subsample),
SoybeanGrainN_Combine = as.numeric(SoybeanGrainN_Combine),
SoybeanGrainP_Combine = as.numeric(SoybeanGrainP_Combine),
SoybeanGrainK_Combine = as.numeric(SoybeanGrainK_Combine),
StoverRemoved = as.numeric(StoverRemoved)) %>%
select(SiteID:Crop,
CoverCropBiomass,
CoverCropStubbleHeight,
CoverCropBiomassRemoved,
CoverCropN,
CoverCropP,
CoverCropK,
EmergenceDate,
CornLSNTBiomass,
CornLSNT_N,
SilkingDate,
SilkingEarLeafN,
SilkingEarLeafP,
SilkingEarLeafK,
SPAD,
SoybeanR6Biomass,
SoybeanR6N,
SoybeanR6P,
SoybeanR6K,
CornR6Biomass_NonGrain = `CornR6Biomass_non-grain`,
CornPlantN_NonGrain = `CornPlantN_non-grain`,
CornPlantP_NonGrain = `CornPlantP_non-grain`,
CornPlantK_NonGrain = `CornPlantK_non-grain`,
CornGrainYield_Subsample,
StalkNitrate,
SeasonEndPopulation,
CropYield,
MoistureAtHarvest,
CornGrainN_Subsample,
CornGrainP_Subsample,
CornGrainK_Subsample,
CornGrainN_Combine,
CornGrainP_Combine,
CornGrainK_Combine,
# SoybeanR8Biomass,
SoybeanGrainN_Subsample,
SoybeanGrainP_Subsample,
SoybeanGrainK_Subsample,
SoybeanGrainN_Combine,
SoybeanGrainP_Combine,
SoybeanGrainK_Combine,
everything()) %>%
# remove the ATEMP before outputting
filter(SiteID != 'ATEMP') -> df
return(df)
}
# Combining Water data
ReadWaterData <-
function() {
ReadLatestData('Water') %>%
# make sure that SiteID is entered in all rows
map(~ .x %>% fill(SiteID)) %>%
# make sure that there are no empty dates (they create problems with spread)
map(~ .x %>% filter(!is.na(MeasurementDate))) %>%
# define type of Key variables & gather all non-key variables
map(~ .x %>%
mutate(SiteID = as.character(SiteID),
PlotID = as.character(PlotID),
MeasurementDate = as.Date(MeasurementDate)) %>%
gather(key, value, -(SiteID:MeasurementDate))) %>%
bind_rows() %>%
spread(key, value) %>%
mutate(DailyDrainage = as.numeric(DailyDrainage),
CommentsDailyDrainage = as.character(CommentsDailyDrainage),
DailyRunoff = as.numeric(DailyRunoff),
DrainageTotalNConc = as.numeric(DrainageTotalNConc),
CommentsTotalN = as.character(CommentsTotalN),
DrainageNO3Conc = as.numeric(DrainageNO3Conc),
CommentsNO3 = as.character(CommentsNO3),
DrainageNH3Conc = as.numeric(DrainageNH3Conc),
CommentsNH3 = as.character(CommentsNH3),
DrainageTotalPConc = as.numeric(DrainageTotalPConc),
CommentsTotalP = as.character(CommentsTotalP),
DrainageReactivePConc = as.numeric(DrainageReactivePConc),
CommentsReactiveP = as.character(CommentsReactiveP),
DrainageTotalKConc = as.numeric(DrainageTotalKConc),
CommentsTotalK = as.character(CommentsTotalK),
DrainageDissolvedKConc = as.numeric(DrainageDissolvedKConc),
CommentsDissolvedK = as.character(CommentsDissolvedK),
RunoffTotalNConc = as.numeric(RunoffTotalNConc),
RunoffNO3Conc = as.numeric(RunoffNO3Conc),
RunoffTotalPConc = as.numeric(RunoffTotalPConc),
RunoffReactivePConc = as.numeric(RunoffReactivePConc)) %>%
select(SiteID:MeasurementDate,
DailyDrainage,
DrainageTotalNConc,
DrainageNO3Conc,
DrainageNH3Conc,
DrainageTotalPConc,
DrainageReactivePConc,
DrainageTotalKConc,
DrainageDissolvedKConc,
CommentsDailyDrainage,
CommentsTotalN,
CommentsNO3,
CommentsNH3,
CommentsTotalP,
CommentsReactiveP,
CommentsTotalK,
CommentsDissolvedK,
DailyRunoff,
RunoffTotalNConc,
RunoffNO3Conc,
RunoffTotalPConc,
RunoffReactivePConc,
everything()) %>%
# remove the ATEMP before outputting
filter(SiteID != 'ATEMP') -> df
return(df)
}
# Combining GHG data
ReadGHGData <-
function() {
ReadLatestData('GHG') %>%
# make sure that SiteID is entered in all rows
map(~ .x %>% fill(SiteID)) %>%
# define type of Key variables & gather all non-key variables
map(~ .x %>%
mutate(SiteID = as.character(SiteID),
PlotID = as.character(PlotID),
MeasurementDate = as.Date(MeasurementDate),
TimeOfSample = as_datetime(TimeOfSample)) %>%
gather(key, value, -(SiteID:TimeOfSample))) %>%
bind_rows() %>%
spread(key, value) %>%
mutate(TimeOfSample = update(MeasurementDate,
hour = hour(TimeOfSample),
minute = minute(TimeOfSample)),
NH3Emissions = as.numeric(NH3Emissions),
N2OEmissions = as.numeric(N2OEmissions),
CommentsEmissions = as.character(CommentsEmissions),
SamplingTemperature = as.numeric(SamplingTemperature),
SamplingSoilMoisture = as.numeric(SamplingSoilMoisture),
SamplingSoilNO3 = as.numeric(SamplingSoilNO3),
SamplingSoilNH4 = as.numeric(SamplingSoilNH4),
SamplingBulkDensity = as.numeric(SamplingBulkDensity),
CommentsGHGSampling = as.character(CommentsGHGSampling)) %>%
select(SiteID:TimeOfSample,
NH3Emissions,
N2OEmissions,
CommentsEmissions,
SamplingTemperature,
SamplingSoilMoisture,
SamplingSoilNO3,
SamplingSoilNH4,
SamplingBulkDensity,
everything()) %>%
# remove the ATEMP before outputting
filter(SiteID != 'ATEMP') -> df
return(df)
}
# Combining Weather data
ReadWeatherData <-
function() {
ReadLatestData('Weather') %>%
# make sure that SiteID is entered in all rows
map(~ .x %>% fill(SiteID)) %>%
# define type of Key variables & gather all non-key variables
map(~ .x %>%
mutate(SiteID = as.character(SiteID),
MeasurementDate = as.Date(MeasurementDate)) %>%
gather(key, value, -(SiteID:MeasurementDate))) %>%
bind_rows() %>%
spread(key, value) %>%
mutate(MeasurementDate = as.Date(MeasurementDate),
DailyPrecipitation = as.numeric(DailyPrecipitation),
DailySnow = as.numeric(DailySnow),
RelativeHumidity = as.numeric(RelativeHumidity),
SolarRadiation = as.numeric(SolarRadiation),
MaxAirTemperature = as.numeric(MaxAirTemperature),
MinAirTemperature = as.numeric(MinAirTemperature),
AveAirTemperature = as.numeric(AveAirTemperature),
SoilTemperature = as.numeric(SoilTemperature),
WindSpeed = as.numeric(WindSpeed),
WindDirection = as.numeric(WindDirection),
CommentsWeather = as.character(CommentsWeather)) %>%
select(SiteID,
MeasurementDate,
DailyPrecipitation,
DailySnow,
RelativeHumidity,
SolarRadiation,
MaxAirTemperature,
MinAirTemperature,
AveAirTemperature,
SoilTemperature,
WindSpeed,
WindDirection,
everything()) %>%
# remove the ATEMP before outputting
filter(SiteID != 'ATEMP') -> df
return(df)
}
# Setting ggplot theme -----------------------------------
# Set up a theme for plotting
theme_gio <-
theme_light() +
theme(text = element_text(family = "sans"),
plot.title = element_text(colour = "#666666", size = rel(2), hjust = 0.5, face = "bold"),
plot.subtitle = element_text(colour = "#666666", size = rel(1.5), hjust = 0.5, face = "plain", lineheight = rel(1.1)),
plot.caption = element_text(colour = "#666666", size = rel(1.2), hjust = 0.5, face = "plain"),
axis.title = element_text(colour = "#666666", size = rel(1.2)),
axis.text.x = element_text(colour = "#757575", size = rel(1)),
axis.text.y = element_text(colour = "#757575", size = rel(0.9)),
legend.title = element_text(colour = "#757575", size = rel(1.2)),
legend.text = element_text(colour = "#757575", size = rel(1)),
strip.text = element_text(colour = "#666666", hjust = 0.5, face = "bold", size = rel(1)),
strip.background = element_rect(colour = NA, fill = NA),
panel.grid.minor = element_blank())
theme_gio2 <-
theme_light() +
theme(text = element_text(family = "sans"),
plot.title = element_text(colour = "#666666", size = rel(2), hjust = 0.5, face = "bold"),
plot.subtitle = element_text(colour = "#666666", size = rel(1.5), hjust = 0.5, face = "plain", lineheight = rel(1.1)),
plot.caption = element_text(colour = "#666666", size = rel(1.2), hjust = 0.5, face = "plain"),
axis.title = element_text(colour = "#666666", size = rel(1.2)),
axis.text = element_text(colour = "#757575", size = rel(1)),
legend.title = element_text(colour = "#757575", size = rel(1.2)),
legend.text = element_text(colour = "#757575", size = rel(1)),
strip.text = element_text(colour = "#666666", hjust = 0.5, face = "bold", size = rel(1)),
panel.grid.minor = element_blank())
|
df389aac9348499ee7ab9d25ebda10cd8146fbef
|
3a882c3eb6867a5ce5081747c9c538aec0d08705
|
/man/aggregate.Rd
|
f4962e0c0c893b98b70d97bce10a522902a62c2b
|
[] |
no_license
|
cran/hyperSpec
|
02c327c0ea66014936de3af2cb188e9e30a4e6f7
|
4fc1e239f548e98f3a295e0521a2f99a5b84316d
|
refs/heads/master
| 2021-09-22T07:57:28.497828
| 2021-09-13T12:00:02
| 2021-09-13T12:00:02
| 17,696,713
| 3
| 10
| null | 2016-10-31T16:36:46
| 2014-03-13T05:00:53
|
R
|
UTF-8
|
R
| false
| true
| 3,461
|
rd
|
aggregate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregate.R
\docType{methods}
\name{aggregate}
\alias{aggregate}
\alias{aggregate,hyperSpec-method}
\alias{ave,hyperSpec-method}
\title{aggregate hyperSpec objects}
\usage{
\S4method{aggregate}{hyperSpec}(
x,
by = stop("by is needed"),
FUN = stop("FUN is needed."),
...,
out.rows = NULL,
append.rows = NULL,
by.isindex = FALSE
)
}
\arguments{
\item{x}{a \code{hyperSpec} object}
\item{by}{grouping for the rows of \code{x@data}.
Either a list containing an index vector for each of the subgroups or a
vector that can be \code{split} in such a list.}
\item{FUN}{function to compute the summary statistics}
\item{...}{further arguments passed to \code{FUN}}
\item{out.rows}{number of rows in the resulting \code{hyperSpec} object,
for memory preallocation.}
\item{append.rows}{If more rows are needed, how many should be appended?
Defaults to 100 or an estimate based on the percentage of groups that are
still to be done, whatever is larger.}
\item{by.isindex}{If a list is given in \code{by}: does the list already
contain the row indices of the groups? If \code{FALSE}, the list in
\code{by} is computed first (as in \code{\link[stats]{aggregate}}).}
}
\value{
A \code{hyperSpec} object with an additional column
\code{@data$.aggregate} tracing which group the rows belong to.
}
\description{
Compute summary statistics for subsets of a \code{hyperSpec} object.
}
\details{
\code{aggregate} applies \code{FUN} to each of the subgroups given by
\code{by}. It combines the functionality of \code{\link[stats]{aggregate}},
\code{\link[base]{tapply}}, and \code{\link[stats]{ave}} for hyperSpec
objects.
\code{aggregate} avoids splitting \code{x@data}.
\code{FUN} does not need to return exactly one value. The number of
returned values needs to be the same for all wavelengths (otherwise the
result could not be a matrix), see the examples.
If the initially preallocated \code{data.frame} turns out to be too small,
more rows are appended and a warning is issued.
}
\examples{
cluster.means <- aggregate (chondro, chondro$clusters, mean_pm_sd)
plot(cluster.means, stacked = ".aggregate", fill = ".aggregate",
col = matlab.dark.palette (3))
## make some "spectra"
spc <- new ("hyperSpec", spc = sweep (matrix (rnorm (10*20), ncol = 20), 1, (1:10)*5, "+"))
## 3 groups
color <- c("red", "blue", "black")
by <- as.factor (c (1, 1, 1, 1, 1, 1, 5, 1, 2, 2))
by
plot (spc, "spc", col = color[by])
## Example 1: plot the mean of the groups
plot (aggregate (spc, by, mean), "spc", col = color, add = TRUE,
lines.args = list(lwd = 3, lty = 2))
## Example 2: FUN may return more than one value (here: 3)
plot (aggregate (spc, by, mean_pm_sd), "spc",
col = rep(color, each = 3), lines.args = list(lwd = 3, lty = 2))
## Example 3: aggregate even takes FUN that return different numbers of
## values for different groups
plot (spc, "spc", col = color[by])
weird.function <- function (x){
if (length (x) == 1)
x + 1 : 10
else if (length (x) == 2)
NULL
else
x [1]
}
agg <- aggregate (spc, by, weird.function)
agg$.aggregate
plot (agg, "spc", add = TRUE, col = color[agg$.aggregate],
lines.args = list (lwd = 3, lty = 2))
}
\seealso{
\code{\link[base]{tapply}}, \code{\link[stats]{aggregate}},
\code{\link[stats]{ave}}
}
\author{
C. Beleites
}
\keyword{array}
\keyword{category}
\keyword{methods}
|
bafd38cd4e2e0351e5f3807ee5904a01fa0465dd
|
315af6191046d18fa8856566add85b1586b052f4
|
/Code/Fertilizer and Yield/C_EEF_fert_yield.R
|
afb210368f4b66e89c7f96bd45160114309336e7
|
[] |
no_license
|
twilli2/n2oflux
|
40e1fbf12919b33d366800eacec62049d0347f97
|
73f0e143bfab81f458f7e1c2d02d9df228ed4226
|
refs/heads/master
| 2021-01-03T14:43:57.948139
| 2020-02-12T21:01:23
| 2020-02-12T21:01:23
| 240,113,520
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,690
|
r
|
C_EEF_fert_yield.R
|
library(tidyverse)
library(dplyr)
library(ggplot2)
library(readxl)
#C:/Users/twilli2/
fert_data <- read_excel("C:/Users/twilli2/Dropbox/Lab data/S Willamette GWMA Dropbox/Fert-Yield data/Fertilizer Plans.xlsx",
sheet = "total fert", col_types = c("date","text", "text", "numeric", "numeric",
"numeric", "numeric", "numeric"))
#based on samples and estimates
cpyield <- yield_data %>%
filter(plot == "Conv" | plot == "EEF", season = 2) %>%
group_by(field,plot) %>%
summarise(mean_seed_kg_ha = mean(seed_kg_ha))
ggplot(cpyield) +
geom_col(aes(plot, mean_seed_kg_ha),color = "black", fill = "lightblue")+
#geom_col(aes(plot, average_yield_lb), alpha = 0.0,
# color = "black", linetype = "dashed", size = .5) +
labs(x = "Treatment", y = "Yield (kgs/ha)")+
theme(panel.background = element_rect(fill='white', colour='white'),
panel.grid = element_line(color = "lightgray"),
panel.grid.minor = element_line(color = NA),
panel.border = element_rect(fill = NA, color = "black"),
strip.background = element_blank(),
axis.text.x = element_text(size=14, color = "black", face = "bold"),
axis.title.x = element_text(size = 14, vjust=-1, face = "bold"),
axis.text.y = element_text(size=14, color = "black"),
axis.title.y = element_text(vjust=1.8, size = 14, face = "bold"),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
strip.text.x = element_text(size = 14, color = "black", face = "bold", angle = 0)) +
facet_grid(season~field)
weigh_wagon_yield_data <- read_excel("C:/Users/twilli2/Dropbox/Lab data/S Willamette GWMA Dropbox/Fert-Yield data/Yield Data.xlsx",
sheet = "Sheet4")
weigh_wagon_yield_data$season[weigh_wagon_yield_data$season == 1] <- "S1"
weigh_wagon_yield_data$season[weigh_wagon_yield_data$season == 2] <- "S2"
seasonnames <- c("S1" = "Season 1", "S2" = "Season 2", "1" = "Field 1","2" = "Field 2","3" = "Field 3","4" = "Field 4")
ggplot(weigh_wagon_yield_data) +
geom_col(aes(plot, seed_lbs_ac, fill = plot), color = "black")+
scale_fill_manual(values = c("#e7298a","#1b9e77"),
labels = c("Conventional","Enhanced Efficiency"))+
#geom_col(aes(plot, average_yield_lb), alpha = 0.0,
# color = "black", linetype = "dashed", size = .5) +
labs(x = "Treatment", y = "Dirt Seed Yield (lbs/ac)", fill = "")+
theme_bw()+
theme(axis.text.x = element_text(size=14, colour="black", angle = 0),
axis.title.x = element_text(size = 16, vjust=-0.1, face = "bold"),
axis.text.y = element_text(size=16, colour="black"),
axis.title.y = element_text(vjust=1.8, size = 16, face = "bold"),
legend.text = element_text(size = 16),
legend.title = element_text("Field",size = 16),
legend.position = "bottom",
strip.text.y = element_text(size = 14, color = "black", face = "plain", angle = -90),
strip.text.x = element_text(size = 14, color = "black", face = "plain", angle = 0))+
facet_grid(season~field, labeller = as_labeller(seasonnames))
fert_yield$season <- as.factor(fert_yield$season)
fert_total1 <- filter(fert_data,date <= "2018-04-09")
fert_total1$season <- 1
fert_total2 <- filter(fert_data,date > "2018-04-09")
fert_total2$season <- 2
fert_total <- bind_rows(fert_total1,fert_total2)
fert_total2 %>%
filter(plot == "C") %>%
group_by(field) %>%
summarise(total_urea = sum(urea_ac)*1.12085, total_ammonium = sum(nh4_ac)*1.12085, t_n = sum(tn_ac)*1.12085)
fert_applied <- fert_total %>%
filter(plot == "C") %>%
group_by(season,field,plot) %>%
gather(compound, value = conc,nh4_ac:urea_ac)
seasonnames2 <- c("1" = "Season 1", "2" = "Season 2")
ggplot(data=fert_applied, color = black) +
geom_col(aes(field, conc,fill = compound))+
labs(y = expression("N application rate (lbs/acre)"))+
labs(x = expression("Field"), fill = "")+
scale_fill_brewer(palette = "Dark2",
breaks = c("nh4_ac","urea_ac"),
labels = c("Ammonium","Urea"))+
theme_bw()+
theme(axis.text.x = element_text(size=14, colour="black", angle = 0),
axis.title.x = element_text(size = 16, vjust=-0.1, face = "bold"),
axis.text.y = element_text(size=16, colour="black"),
axis.title.y = element_text(vjust=1.8, size = 16, face = "bold"),
legend.text = element_text(size = 16),
legend.title = element_text("Field",size = 16),
legend.position = "bottom",
strip.text.x = element_text(size = 14, color = "black", face = "plain", angle = 0))+
facet_grid(~season, labeller = as_labeller(seasonnames2))
|
7ec7e42857595e31de4bf5f80821adb2d36f7d71
|
6278ae94cd7fdca1846b0dbacd93f62cbde4b278
|
/src/financial/01_getacs.R
|
3a9a1d89210d5e0426dc76732aa5df75be5c7b9d
|
[
"MIT"
] |
permissive
|
uva-bi-sdad/capitals
|
2a0cc445231558534b18a40c05a17a46b155f34f
|
4913d8fc79a362016bdb258d04e2a22029e93593
|
refs/heads/master
| 2023-05-04T20:05:11.266666
| 2021-05-29T16:48:19
| 2021-05-29T16:48:19
| 290,484,318
| 1
| 4
| null | 2020-11-10T19:59:54
| 2020-08-26T12:01:20
|
R
|
UTF-8
|
R
| false
| false
| 2,931
|
r
|
01_getacs.R
|
library(tidycensus)
library(tidyverse)
#
# API key ------------------------------------------------------------------------
#
# installed census api key
readRenviron("~/.Renviron")
Sys.getenv("CENSUS_API_KEY")
#
# Get data ------------------------------------------------------------------------
#
# Select variables
acsvars <- c(
# Employed in agriculture, forestry, fishing and hunting, mining industry
"C24050_001", "C24050_002",
# Gini index of income inequality
"B19083_001",
# Income below poverty level
"B17001_002", "B17001_001",
# Public assistance or snap in past 12 months
"B19058_002", "B19058_001",
# Supplemental security income
"B19056_002", "B19056_001",
# Median household income
"B19013_001",
# Without BA
"B15003_002", "B15003_003", "B15003_004", "B15003_005", "B15003_006", "B15003_007",
"B15003_008", "B15003_009", "B15003_010", "B15003_011", "B15003_012", "B15003_013",
"B15003_014", "B15003_015", "B15003_016", "B15003_017", "B15003_018", "B15003_019",
"B15003_020", "B15003_021", "B15003_001",
# In labor force
"B23025_002", "B23025_001",
# Travel time to work 30+
"B08303_008", "B08303_009", "B08303_010", "B08303_011", "B08303_012", "B08303_013",
"B08303_001"
)
#
# Get data ------------------------------------------------------------------------
#
# Get data from 2014/18 5-year estimates for counties
data <- get_acs(geography = "county", state = c(19, 41, 51),
variables = acsvars,
year = 2018, survey = "acs5",
cache_table = TRUE, output = "wide", geometry = TRUE,
keep_geo_vars = TRUE)
#
# Calculate ------------------------------------------------------------------------
#
acsdata <- data %>% transmute(
STATEFP = STATEFP,
COUNTYFP = COUNTYFP,
COUNTYNS = COUNTYNS,
AFFGEOID = AFFGEOID,
GEOID = GEOID,
LSAD = LSAD,
NAME.x = NAME.x,
NAME.y = NAME.y,
geometry = geometry,
fin_pctemplagri = C24050_002E / C24050_001E * 100,
fin_gini = B19083_001E,
fin_pctinpov = B17001_002E / B17001_001E * 100,
fin_pctassist = B19058_002E / B19058_001E * 100,
fin_pctssi = B19056_002E / B19056_001E * 100,
fin_medinc = B19013_001E,
fin_pctlessba = (B15003_002E + B15003_003E + B15003_004E + B15003_005E + B15003_006E + B15003_007E +
B15003_008E + B15003_009E + B15003_010E + B15003_011E + B15003_012E + B15003_013E +
B15003_014E + B15003_015E + B15003_016E + B15003_017E + B15003_018E + B15003_019E +
B15003_020E + B15003_021E) / B15003_001E * 100,
fin_pctcommute = (B08303_008E + B08303_009E + B08303_010E + B08303_011E + B08303_012E + B08303_013E) /
B08303_001E * 100,
fin_pctlabforce = B23025_002E / B23025_001E * 100
)
#
# Write ------------------------------------------------------------------------
#
write_rds(acsdata, "./rivanna_data/financial/fin_acs_2018.Rds")
|
fe4e8ad2a5953fbfded9d5f7dcc045830c1ef971
|
05f1b9b7010b9558ef0d77477071b584ed59617d
|
/theses/sarah-thesis/Charts/charts.R
|
957c406a67deb30c95c4d55fa654066e46f38c2e
|
[] |
no_license
|
sfu-cl-lab/our-papers
|
5194920eea72b4d82a5c8f942dc203a2973b584b
|
65e366218e85ce49e6a03682e172f91065ab8b0f
|
refs/heads/master
| 2023-07-07T11:13:53.496207
| 2023-06-29T22:01:58
| 2023-06-29T22:01:58
| 74,601,085
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,619
|
r
|
charts.R
|
pdf(file='/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/lowcorrelation.pdf')
data<- read.table("/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/lowcorrelation", header=TRUE, sep="\t")
barplot(as.matrix(data), beside=TRUE, main="AUC value of different methods - Low Correlation Synthetic Data",names.arg=c("Unigram-TF","Bigram-TF","Bigram-IDF","MLN-TF","MLN-IDF"),
density=c(20,70,10),angle=c(70,90,90),ylim=c(0.15,1.2),xpd=FALSE)
abline(h=0.97, lty=20)
text(1.4, 0.34,cex=0.6, "OutRank")
text(2.4, 0.46,cex=0.6, "KNN")
text(3.4, 0.54,cex=0.6, "LOF")
text(5.4, 0.71,cex=0.6, "OutRank")
text(6.4, 0.64,cex=0.6, "KNN")
text(7.4, 0.43,cex=0.6, "LOF")
text(9.4, 0.70,cex=0.6, "OutRank")
text(10.4, 0.81,cex=0.6, "KNN")
text(11.4, 0.41,cex=0.6, "LOF")
text(13.4, 0.90,cex=0.6, "OutRank")
text(14.4, 0.98,cex=0.6, "KNN")
text(15.4, 0.59,cex=0.6, "LOF")
text(17.4, 0.91,cex=0.6, "OutRank")
text(18.4, 0.90,cex=0.6, "KNN")
text(19.4, 0.45,cex=0.6, "LOF")
box(bty="O")
legend("topright",c("OutRank", "KNN","LOF"), cex=1.2, bty="n",density=c(20,70,10),angle=c(70,90,90))
dev.off()
pdf(file='/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/highcorrelation.pdf')
data<- read.table("/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/highcorrelation", header=TRUE, sep="\t")
barplot(as.matrix(data), beside=TRUE, main="AUC value of different methods - High Correlation Synthetic Data",names.arg=c("Unigram-TF","Bigram-TF","Bigram-IDF","MLN-TF","MLN-IDF"),
density=c(20,70,10),angle=c(70,90,90),ylim=c(0.15,1.2),xpd=FALSE)
abline(h=0.99, lty=20)
text(1.4, 0.96,cex=0.6, "OutRank")
text(2.4, 0.95,cex=0.6, "KNN")
text(3.4, 0.88,cex=0.6, "LOF")
text(5.4, 0.99,cex=0.6, "OutRank")
text(6.4, 1,cex=0.6, "KNN")
text(7.4, 0.55,cex=0.6, "LOF")
text(9.4, 1,cex=0.6, "OutRank")
text(10.4, 0.94,cex=0.6, "KNN")
text(11.4, 0.5,cex=0.6, "LOF")
text(13.4, 1,cex=0.6, "OutRank")
text(14.4, 0.98,cex=0.6, "KNN")
text(15.4, 0.69,cex=0.6, "LOF")
text(17.4, 1,cex=0.6, "OutRank")
text(18.4, 0.98,cex=0.6, "KNN")
text(19.4, 0.52,cex=0.6, "LOF")
box(bty="O")
legend("topright",c("OutRank", "KNN","LOF"), cex=1.2, bty="n",density=c(20,70,10),angle=c(70,90,90))
dev.off()
pdf(file='/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/SingleAttribute.pdf')
data<- read.table("/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/singleAttribute", header=TRUE, sep="\t")
barplot(as.matrix(data), beside=TRUE, main="AUC value of different methods - Single Attribute Synthetic Data",names.arg=c("Unigram-TF","Bigram-TF","Bigram-IDF","MLN-TF","MLN-IDF"),
density=c(20,70,10),angle=c(70,90,90),ylim=c(0.15,1.2),xpd=FALSE)
abline(h=0.96, lty=20)
text(1, 0.97,cex=0.6, "OutRank")
text(2.45, 0.97,cex=0.6, "KNN")
text(3.4, 0.58,cex=0.6, "LOF")
text(5.4, 0.88,cex=0.6, "OutRank")
text(6.4, 0.82,cex=0.6, "KNN")
text(7.4, 0.54,cex=0.6, "LOF")
text(9.4, 0.55,cex=0.6, "OutRank")
text(10.4, 0.51,cex=0.6, "KNN")
text(11.4, 0.53,cex=0.6, "LOF")
text(13.4, 0.89,cex=0.6, "OutRank")
text(14.4, 0.87,cex=0.6, "KNN")
text(15.4, 0.64,cex=0.6, "LOF")
text(17.4,0.9 ,cex=0.6, "OutRank")
text(18.4, 0.79,cex=0.6, "KNN")
text(19.4, 0.58,cex=0.6, "LOF")
box(bty="O")
legend("topright",c("OutRank", "KNN","LOF"), cex=1.2, bty="n",density=c(20,70,10),angle=c(70,90,90))
dev.off()
pdf(file='/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/strikers.pdf')
data<- read.table("/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/strikers", header=TRUE, sep="\t")
barplot(as.matrix(data), beside=TRUE, main="AUC value of different methods - Strikers vs Goalies Real-World Data",names.arg=c("Unigram-TF","Bigram-TF","Bigram-IDF","MLN-TF","MLN-IDF"),
density=c(20,70,10),angle=c(70,90,90),ylim=c(0.15,1),xpd=FALSE)
abline(h=0.70, lty=20)
text(1, 0.59,cex=0.6, "OutRank")
text(2.45, 0.52,cex=0.6, "KNN")
text(3.4, 0.69,cex=0.6, "LOF")
text(5.4, 0.64,cex=0.6, "OutRank")
text(6.4, 0.53,cex=0.6, "KNN")
text(7.4, 0.58,cex=0.6, "LOF")
text(9.4, 0.63,cex=0.6, "OutRank")
text(10.4, 0.53,cex=0.6, "KNN")
text(11.4, 0.71,cex=0.6, "LOF")
text(13.4, 0.61,cex=0.6, "OutRank")
text(14.4, 0.64,cex=0.6, "KNN")
text(15.4, 0.62,cex=0.6, "LOF")
text(17.4,0.57,cex=0.6, "OutRank")
text(18.4, 0.51,cex=0.6, "KNN")
text(19.4, 0.54,cex=0.6, "LOF")
box(bty="O")
legend("topright",c("OutRank", "KNN","LOF"), cex=1.2, bty="n",density=c(20,70,10),angle=c(70,90,90))
dev.off()
pdf(file='/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/midfielder.pdf')
data<- read.table("/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/midfielder", header=TRUE, sep="\t")
barplot(as.matrix(data), beside=TRUE, main="AUC value of different methods - Midfielders vs Strikers Real-World Data",names.arg=c("Unigram-TF","Bigram-TF","Bigram-IDF","MLN-TF","MLN-IDF"),
density=c(20,70,10),angle=c(70,90,90),ylim=c(0.15,1),xpd=FALSE)
abline(h=0.79, lty=20)
text(1, 0.55,cex=0.6, "OutRank")
text(2.45, 0.59,cex=0.6, "KNN")
text(3.4, 0.56,cex=0.6, "LOF")
text(5.4, 0.72,cex=0.6, "OutRank")
text(6.4, 0.59,cex=0.6, "KNN")
text(7.4, 0.63,cex=0.6, "LOF")
text(9.4, 0.72,cex=0.6, "OutRank")
text(10.4, 0.59,cex=0.6, "KNN")
text(11.4, 0.74,cex=0.6, "LOF")
text(13.4, 0.72,cex=0.6, "OutRank")
text(14.4, 0.59,cex=0.6, "KNN")
text(15.4, 0.77,cex=0.6, "LOF")
text(17.4,0.66,cex=0.6, "OutRank")
text(18.4, 0.59,cex=0.6, "KNN")
text(19.4, 0.80,cex=0.6, "LOF")
box(bty="O")
legend("topright",c("OutRank", "KNN","LOF"), cex=1.2, bty="n",density=c(20,70,10),angle=c(70,90,90))
dev.off()
pdf(file='/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/imdb.pdf')
data<- read.table("/local-scratch/SRiahi/iii/newPropose/propositionalization/Charts/imdb", header=TRUE, sep="\t")
barplot(as.matrix(data), beside=TRUE, main="AUC value of different methods - Drama vs Comedy Real-World Data",names.arg=c("Unigram-TF","Bigram-TF","Bigram-IDF","MLN-TF","MLN-IDF"),
density=c(20,70,10),angle=c(70,90,90),ylim=c(0.15,1),xpd=FALSE)
abline(h=0.69, lty=20)
text(1, 0.52,cex=0.6, "OutRank")
text(2.45, 0.62,cex=0.6, "KNN")
text(3.4, 0.47,cex=0.6, "LOF")
text(5.4, 0.68,cex=0.6, "OutRank")
text(6.4, 0.70,cex=0.6, "KNN")
text(7.4, 0.45,cex=0.6, "LOF")
text(9.4, 0.69,cex=0.6, "OutRank")
text(10.4, 0.70,cex=0.6, "KNN")
text(11.4, 0.49,cex=0.6, "LOF")
text(13.4, 0.69,cex=0.6, "OutRank")
text(14.4, 0.69,cex=0.6, "KNN")
text(15.4, 0.52,cex=0.6, "LOF")
text(17.4,0.64,cex=0.6, "OutRank")
text(18.4, 0.52,cex=0.6, "KNN")
text(19.4, 0.46,cex=0.6, "LOF")
box(bty="O")
legend("topright",c("OutRank", "KNN","LOF"), cex=1.2, bty="n",density=c(20,70,10),angle=c(70,90,90))
dev.off()
|
1b7aae14cbd479b27331837dfc1627c3a923c94d
|
09c775e8b6e5768ec7cd4a221cc7c63ddbf8f10f
|
/week11/scraping_example_r_only.R
|
2d07903578989d1e03920858a5681d40fafe86f0
|
[] |
no_license
|
lse-my472/lse-my472.github.io
|
f1fda05e16f29f59e43d410415d6a00a4bbf4fa4
|
ac4426a0837a60e71658e2808d13c816023385c2
|
refs/heads/master
| 2022-12-15T10:54:50.025616
| 2022-12-08T13:08:59
| 2022-12-08T13:08:59
| 144,271,243
| 45
| 128
| null | 2021-11-29T11:45:43
| 2018-08-10T10:13:46
|
HTML
|
UTF-8
|
R
| false
| false
| 3,202
|
r
|
scraping_example_r_only.R
|
##
## Continuous scraping: Approach 1
##
# This script illustrates continuous scraping with a cloud instance by
# collecting the Wikipedia featured article of the day once every 24h interval.
#
# The example is helpful for us to discuss a simple case of continous scraping,
# however, note that the code would require the computer to run continously.
# That could be suitable e.g. when scraping in minute intervals, or when
# downloading tweets continuoulsy using the Twitter streaming API. When just
# collecting data once per day like here, it would of course be most energy and
# cost efficient to only briefly spin up and shut down a cloud instance every
# day or to use FaaS, and to store the collected data outside of the instance,
# e.g. via S3 or Dropbox.
# The script illustrates two ways in which the scraped data can be saved:
# i) by appending it to a table in an SQLite database
# ii) by appending it to a .csv file
# Loading packages
library("RSQLite")
library("DBI")
library("rvest")
library("lubridate")
library("tidyverse")
# Connecting to/creating SQLite database
db <- dbConnect(RSQLite::SQLite(), "wikipedia.sqlite")
# If no .csv file exists, creating one with the header
if (file.exists("featured_articles.csv") == FALSE)
{writeLines(c("date, summary, link"), "featured_articles.csv")}
# Scraping around the following hours 0 - 23
target_hours <- c(9) # check system time zone with Sys.time()
# Target items
n <- 10000
# While loop
i <- 0
while(i < n) {
# Sleep for one hour
Sys.sleep(60 * 60)
# Check whether current hour is in target hours
if (hour(Sys.time()) %in% target_hours) {
# Creating a data frame with one row
df <- tibble(date = as_datetime(Sys.time()), summary = "", link = "")
# Reading the HTML code
wikipedia_main_page <- read_html("https://en.wikipedia.org/wiki/Main_Page")
# Article summary
df[1,"summary"] <- wikipedia_main_page %>% html_nodes(css = "#mp-tfa > p") %>% html_text()
# Link to full article
all_links <- wikipedia_main_page %>% html_nodes(css = "a")
tfa_partial_link <- all_links[grepl("Full", all_links) & grepl("article", all_links)] %>%
html_attr("href")
df[1,"link"] <- paste0("https://en.wikipedia.org", tfa_partial_link)
# Now the df is either appended to the table within the database or the a
# csv file
# Option i: Add to table in SQL datase
dbWriteTable(db, "featured_articles", df, append = TRUE)
# Option ii: Append as row to csv
write.table(df, file="featured_articles.csv",
append = TRUE,
row.names = FALSE,
col.names = FALSE,
sep=',')
# Can also create a backup of the csv every 5 days, e.g. with
if (i %% 5 == 0) {file.copy("featured_articles.csv", "featured_articles_backup.csv")}
# Increment item count
i <- i + 1
# Status update
print(paste("Article scraped at", Sys.time()))
}
}
# Check the database
dbGetQuery(db, 'SELECT * FROM featured_articles') %>% as_tibble()
# Check the csv
read_csv("featured_articles.csv") %>% as_tibble()
# Disconnect from database
dbDisconnect(db)
|
5df41f192a1b6fe27782976f3df5104d6a860906
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlegamesv1.auto/man/turnBasedMatches.rematch.Rd
|
e08d260ead2fcf6ce42b98431b2e9e1d72a78494
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,407
|
rd
|
turnBasedMatches.rematch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/games_functions.R
\name{turnBasedMatches.rematch}
\alias{turnBasedMatches.rematch}
\title{Create a rematch of a match that was previously completed, with the same participants. This can be called by only one player on a match still in their list; the player must have called Finish first. Returns the newly created match; it will be the caller's turn.}
\usage{
turnBasedMatches.rematch(matchId, consistencyToken = NULL, language = NULL,
requestId = NULL)
}
\arguments{
\item{matchId}{The ID of the match}
\item{consistencyToken}{The last-seen mutation timestamp}
\item{language}{The preferred language to use for strings returned by this method}
\item{requestId}{A randomly generated numeric ID for each request specified by the caller}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/games
\item https://www.googleapis.com/auth/plus.login
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/games, https://www.googleapis.com/auth/plus.login)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/games/services/}{Google Documentation}
}
|
11fa81d1a938ad5ea2b75d8ff8f995ad36a8a78c
|
951cadf0d812951ab1b933302ec992269b86586a
|
/run_analysis.R
|
ec433f0f01e86b91a5d13a789c7eda47084564ea
|
[] |
no_license
|
ASabate/CourseraGettingAndCleaningData
|
ba30966c68b1c939b7eb18de9b171e607aa6d51b
|
b7cf87d6d95bc9b960f56a58be334f18fc2de9ca
|
refs/heads/master
| 2021-01-13T00:42:32.919991
| 2015-11-28T21:27:22
| 2015-11-28T21:27:22
| 46,680,088
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,591
|
r
|
run_analysis.R
|
####################################################################
# Getting and Cleaning Data Course Project, from Coursera
# Date: 22/11/2015
####################################################################
library(plyr)
# Load activity labels
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
# Load features labels
features <- read.table("UCI HAR Dataset/features.txt")
# search mean and Std measures
featmeanStdPos<-grep(".*mean.*|.*std.*", features$V2);
xtrain<-read.table("UCI HAR Dataset/train/X_train.txt")
xtrainmeanstd<-xtrain[featmeanStdPos]
ytrain<-read.table("UCI HAR Dataset/train/Y_train.txt")
subjecttrain<-read.table("UCI HAR Dataset/train/subject_train.txt")
trainjoin<-cbind(subjecttrain, ytrain,xtrainmeanstd)
xtest<-read.table("UCI HAR Dataset/test/X_test.txt")
xtestmeanstd<-xtest[featmeanStdPos]
ytest<-read.table("UCI HAR Dataset/test/Y_test.txt")
subjecttest<-read.table("UCI HAR Dataset/test/subject_test.txt")
testjoin<-cbind(subjecttest, ytest, xtestmeanstd)
traintestjoin<-rbind(trainjoin, testjoin)
colnames(traintestjoin)<-c("subject","activity",as.character(features$V2[featmeanStdPos]))
traintestjoin$activity<-factor(traintestjoin$activity, levels=activity_labels$V1,labels=activity_labels$V2)
traintestjoin$subject<-as.factor(traintestjoin$subject)
xactivities<-traintestjoin$activity
alv2<-as.character(activity_labels$V2)
foo<-mapvalues(xactivities, from = c(1,2,3,4,5,6), to = alv2)
traintestjoin$activity<-foo
# Output result into a text file
write.table(traintestjoin, "tidy.txt", row.names = FALSE, quote = FALSE)
|
b757c5ce0266f6c1919c1951327e3cf52f7fd229
|
cdd4279fbbf16d407cff257bcb28f770a699912c
|
/utility_based_caching/scenario_generation/graph_gen/barabasi.r
|
8f3a2b67ee99c3474e6269a65a54bd9bfdf5e4ef
|
[] |
no_license
|
andreaaraldo/araldo-phd-code
|
45aca2281c54318cb2df664f8ac1c30980ebd710
|
2d6538bae0a6df30a0a428b5142be34077f0e38e
|
refs/heads/master
| 2020-04-06T06:19:35.960188
| 2019-07-05T12:57:13
| 2019-07-05T12:57:13
| 33,357,328
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,014
|
r
|
barabasi.r
|
#!/usr/bin/Rscript
args <- commandArgs(TRUE);
if(length( args )!= 4 )
stop("correct usage:\n\t barabasi.r <size> <tier3_cardinality> <capacity> <topology_seed>")
size = as.numeric(args[1] ); # number of nodes
tier3_cardinality = as.numeric(args[2] );
capacity = as.numeric(args[3] ); # link capacity
topology_seed = as.numeric(args[4] ); # link capacity
suppressPackageStartupMessages(library("igraph") )
set.seed(topology_seed);
g <- barabasi.game(size, directed=FALSE);
V(g)$name <- V(g);
sortedDegrees <- sort(degree(g) );
sortedVertexIDs <- attr(sortedDegrees, "names", exact=TRUE);
tier3_nodes = sortedVertexIDs[1:tier3_cardinality];
#core_nodes = sortedVertexIDs[(size-core_cardinality+1):size] #useless
cat(tier3_nodes,"\n")
link_str = "{";
for (e in E(g))
{
verts = ends(g, e);
link_str = sprintf("%s,<%d,%d,%d>, <%d,%d,%d>", link_str, verts[1],verts[2], capacity,verts[2],verts[1], capacity );
}
link_str = sprintf("%s };", link_str);
link_str <- sub('\\{,', "{ ", link_str);
cat(link_str,"\n")
|
e7e4c133c1e90fb01575dea6d8b99d9da0e789e6
|
d9736711c9c01c91218f9bb06b5a81498014cf0b
|
/R/strand_from_vcf.R
|
c87158027e043a5d78a4bde97100f85b5e1c84f2
|
[
"MIT"
] |
permissive
|
Biocodings/MutationalPatterns
|
6c3819ecca88b1e51f6d41510504c3960e60f217
|
5698fb9abb7c61e54c05b37df4e7f131c1ba5c28
|
refs/heads/master
| 2021-01-20T07:13:39.640056
| 2017-05-01T12:08:32
| 2017-05-01T12:08:42
| 89,982,167
| 2
| 0
| null | 2017-05-02T02:16:44
| 2017-05-02T02:16:44
| null |
UTF-8
|
R
| false
| false
| 4,861
|
r
|
strand_from_vcf.R
|
#' Find transcriptional strand of base substitutions in vcf
#'
#' For the positions that are within gene bodies it is determined whether
#' the "C" or "T" base is on the same strand as the gene definition. (Since
#' by convention we regard base substitutions as C>X or T>X.)
#'
#' Base substitions on the same strand as the gene definitions are considered
#' untranscribed, and on the opposite strand of gene bodies as transcribed,
#' since the gene definitions report the coding or sense strand, which is
#' untranscribed.
#'
#' No strand information "-" is returned for base substitutions outside gene
#' bodies, or base substitutions that overlap with more than one gene body.
#'
#' @param vcf GRanges containing the VCF object
#' @param genes GRanges with gene bodies definitions including strand
#' information
#'
#' @return Character vector with transcriptional strand information with
#' length of vcf: "-" for positions outside gene bodies, "U" for
#' untranscribed/sense/coding strand, "T" for
#' transcribed/anti-sense/non-coding strand.
#'
#' @examples
#' ## For this example we need our variants from the VCF samples, and
#' ## a known genes dataset. See the 'read_vcfs_as_granges()' example
#' ## for how to load the VCF samples.
#' vcfs <- readRDS(system.file("states/read_vcfs_as_granges_output.rds",
#' package="MutationalPatterns"))
#'
#' # Exclude mitochondrial and allosomal chromosomes.
#' autosomal = extractSeqlevelsByGroup(species="Homo_sapiens",
#' style="UCSC",
#' group="auto")
#'
#' vcfs = lapply(vcfs, function(x) keepSeqlevels(x, autosomal))
#'
#' ## You can obtain the known genes from the UCSC hg19 dataset using
#' ## Bioconductor:
#' # source("https://bioconductor.org/biocLite.R")
#' # biocLite("TxDb.Hsapiens.UCSC.hg19.knownGene")
#' # library("TxDb.Hsapiens.UCSC.hg19.knownGene")
#'
#' ## For this example, we preloaded the data for you:
#' genes_hg19 <- readRDS(system.file("states/genes_hg19.rds",
#' package="MutationalPatterns"))
#'
#' strand_from_vcf(vcfs[[1]], genes_hg19)
#'
#' @seealso
#' \code{\link{read_vcfs_as_granges}},
#'
#' @export
strand_from_vcf = function(vcf, genes)
{
# Check consistency of chromosome names.
if (!(all(seqlevels(vcf) %in% seqlevels(genes))))
stop(paste( "Chromosome names (seqlevels) of vcf and genes Granges",
"object do not match. Use the seqlevelsStyle() function",
"to rename chromosome names.") )
# Determine overlap between vcf positions and genes.
overlap = findOverlaps(vcf, genes)
overlap = as.data.frame(as.matrix(overlap))
colnames(overlap) = c('vcf_id', 'gene_body_id')
# Remove mutations that overlap with multiple genes and therefore cannot
# be determined whether they are on transcribed or untranscribed strand
# duplicated mutations.
dup_pos = overlap$vcf_id[duplicated(overlap$vcf_id)]
# Index of duplicated mutations
dup_idx = which(overlap$vcf_id %in% dup_pos)
# Remove all duplicated (non-unique mapping) mutations.
if (length(dup_idx) > 0)
overlap = overlap[-dup_idx,]
# Subset of mutations in genes
vcf_overlap = vcf[overlap$vcf_id]
# Find reference allele of mutations (and strand of reference genome is
# reported in vcf file).
ref = vcf_overlap$REF
# Find the strand of C or T (since we regard base substitutions as
# C>X or T>X) which mutations have ref allele C or T.
i = which(ref == "C" | ref == "T")
# Store mutation strand info in vector.
strand_muts = rep(0, nrow(overlap))
strand_muts[i] = "+"
strand_muts[-i] = "-"
# Find strand of gene bodies of overlaps.
strand_genebodies = as.character(strand(genes)[overlap$gene_body_id])
# Find if mut and gene_bodies are on the same strand.
same_strand = (strand_muts == strand_genebodies)
# Subset vcf object for both untranscribed and transcribed
# gene definition represents the untranscribed/sense/coding strand
# if mutation is on same strand as gene, than its untranscribed.
U_index = which(same_strand == TRUE)
# If mutation is on different strand than gene, then its transcribed.
T_index = which(same_strand == FALSE)
strand = rep(0, nrow(overlap))
strand[U_index] = "U"
strand[T_index] = "T"
# Make vector with all positions in input vcf for positions that do
# not overlap with gene bodies, report "-".
strand2 = rep("-", length(vcf))
strand2[overlap$vcf_id] = strand
return(strand2)
}
##
## Deprecated variants
##
get_strand <- function(vcf, genes)
{
.Defunct("strand_from_vcf", package="MutationalPatterns",
msg=paste("This function has been removed. Use",
"'strand_from_vcf' instead."))
}
|
b2eac8ad061b4493d44163786863cd5165b5d78e
|
8d36791af234243d9cbed6d8c5091b03dda8b608
|
/secondscript.R
|
3ced1ee14cf410704b7901e0ba82421d2b9b2b30
|
[] |
no_license
|
acannis/gitDemo
|
d63d4c44d381b311ff7f9cfa606183c13f4754cc
|
d2931ce32ae18f6a86af30ad221b39775e66c097
|
refs/heads/main
| 2022-12-28T23:35:01.658986
| 2020-10-13T21:17:54
| 2020-10-13T21:17:54
| 303,831,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 46
|
r
|
secondscript.R
|
# this is the second file
x=(pi-3.14)
print(x)
|
0a38372221a6d0dc135ed73cd0dc728458553343
|
417c893217ff9daea613f878599030cbda775658
|
/SU_kinematics_dataload.R
|
eb14c873dd5487092c4cdadff9109a9ca8597dc3
|
[] |
no_license
|
lionwarriorjr/Operator
|
edf09497b97b6771b9952253fcc54f4b1d9976b8
|
929a7978d949792774bff758c02a9a198362eb88
|
refs/heads/master
| 2021-01-24T08:18:38.091684
| 2016-09-25T14:30:28
| 2016-09-25T14:30:28
| 69,141,640
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,886
|
r
|
SU_kinematics_dataload.R
|
require(plyr)
require(dplyr)
require(MASS)
require(ISLR)
require(leaps)
require(glmnet)
require(caret)
require(e1071)
require(depmixS4)
setup()
setup <- function() {
#Script to load Suturing Kinematics dataset, metadata and its transcriptions
setwd("/R Projects/temp/SU_kinematics/Suturing/kinematics/AllGestures")
remove(SU_kinematics_dataset)
file_list <- list.files()
file_list
for (file in file_list) {
# if the merged dataset doesn't exist, create it
if(!exists("SU_kinematics_dataset")){
SU_kinematics_dataset <- read.table(file, header=FALSE)
SU_kinematics_dataset$filename<-file
}
# if the merged dataset does exist, append to it
if(exists("SU_kinematics_dataset")){
temp_dataset <-read.table(file, header=FALSE)
temp_dataset$filename<-file
SU_kinematics_dataset<-rbind(SU_kinematics_dataset, temp_dataset)
rm(temp_dataset)
}
}
#Loaded 138506 observations and 76 columns
#Create column heading for dataframe based on readme.txt provided
colnames(SU_kinematics_dataset) <- c("Master_left_tooltip_x","Master left_tooltip_y", "Master_left_tooltip_z","Master_left_tooltip_R1","Master_left_tooltip_R2","Master_left_tooltip_R3","Master_left_tooltip_R4", "Master_left_tooltip_R5","Master_left_tooltip_R6", "Master_left_tooltip_R7", "Master_left_tooltip_R8",
"Master_left_tooltip_R9","Master_left_tooltip_trans_vel_x'","Master_left_tooltip_trans_vel_y'","Master_left_tooltip_trans_vel_z'","Master_left_tooltip_rot_vel_1","Master_left_tooltip_rot_vel_2",
"Master_left_tooltip_rot_vel_3","Master_left_gripper_angle","Master_right1","Master_right2","Master_right3","Master_right4","Master_right5","Master_right6","Master_right7","Master_right8","Master_right9",
"Master_right10","Master_right11","Master_right12","Master_right13","Master_right14","Master_right15","Master_right16","Master_right17","Master_right18","Master_right19","Slave_left_tooptip_x",
"Slave_left_tooptip_y","Slave_left_tooptip_z","Slave_left_tooltip_R1","Slave_left_tooltip_R2","Slave_left_tooltip_R3","Slave_left_tooltip_R4","Slave_left_tooltip_R5","Slave_left_tooltip_R6",
"Slave_left_tooltip_R7","Slave_left_tooltip_R8","Slave_left_tooltip_R9","Slave_left_tooltip_trans_vel_x'","Slave_left_tooltip_trans_vel_y'","Slave_left_tooltip_trans_vel_z'",
"Slave_left_tooltip_rot_vel_1","Slave_left_tooltip_rot_vel_2","Slave_left_tooltip_rot_vel_3","Slave_left_gripper_angle","Slave_right1","Slave_right2","Slave_right3","Slave_right4",
"Slave_right5","Slave_right6","Slave_right7","Slave_right8","Slave_right9","Slave_right10","Slave_right11","Slave_right12","Slave_right13","Slave_right14","Slave_right15","Slave_right16",
"Slave_right17","Slave_right18","Slave_right19","filename")
#############
remove(SU_kinematics_metafile)
SU_kinematics_metafile<-read.table("/R Projects/temp/SU_kinematics/Suturing/meta_file_Suturing.txt",header=F)
colnames(SU_kinematics_metafile) <-c("filename","skill_level_self_proclaimed","skill_level_GRS","Global_Rating_Score1","Global_Rating_Score2","Global_Rating_Score3","Global_Rating_Score4","Global_Rating_Score5","Global_Rating_Score6")
#############
remove(SU_kinematics_transcriptions)
setwd("/R Projects/temp/SU_kinematics/Suturing/transcriptions")
file_list <- list.files()
for (file in file_list){
# if the merged dataset doesn't exist, create it
if (!exists("SU_kinematics_transcriptions")){
SU_kinematics_transcriptions <- read.table(file, header=FALSE)
SU_kinematics_transcriptions$filename<-file
}
# if the merged dataset does exist, append to it
if(exists("SU_kinematics_transcriptions")){
temp_dataset <-read.table(file, header=FALSE)
temp_dataset_transcriptions$filename<-file
SU_kinematics_transcriptions<-rbind(SU_kinematics_transcriptions, temp_dataset)
rm(temp_dataset)
}
}
colnames(SU_kinematics_transcriptions ) <- c("start_frame","end_frame","gesture_id","file")
#Loaded
#Create tasks for Gesture
Gesture_Suturing_TaskID <- c(1, 2, 3, 4, 5, 6, 8, 9, 10, 11)
Gesture_Knot_Trying_TaskID<-c(1 , 11 , 12 , 13 , 14 , 15)
Gesture_NeedlePassing_TaskID<- c( 1, 2, 3, 4, 5, 6, 8, 11)
GestureID<-c("G1","G2","G3","G4","G5","G6","G7","G8","G9","G10","G11","G12","G13","G14","G15")
GestureDesc <-
c("Reaching for needle with right hand",
"Positioning needle",
"Pushing needle through tissue",
"Transferring needle from left to right",
"Moving to center with needle in grip",
"Pulling suture with left hand",
"Pulling suture with right hand",
"Orienting needle",
"Using right hand to help tighten suture",
"Loosening more suture",
"Dropping suture at end and moving to end points",
"Reaching for needle with left hand",
"Making C loop around right hand",
"Reaching for suture with right hand",
"Pulling suture with both hands")
head(SU_kinematics_dataset)
GestureDF<-data.frame(GestureID,GestureDesc)
Gesture_SuturingTasks<-data.frame(Gesture_Suturing_TaskID)
Gesture_Knot_TryingTasks<-data.frame(Gesture_Knot_Trying_TaskID)
Gesture_NeedlePassingTasks<-data.frame(Gesture_NeedlePassing_TaskID)
classify()
}
classify <- function() {
#Generate Classifications on GestureID
dvvs <- SU_kinematics_dataset
dvvs[,"filename"] <- NULL
labels <- SU_kinematics_transcriptions
offset = ncol(labels)
for(i in 1:3) {
labels[offset+i] <- rowShift(labels$gesture_id,i)
}
colnames(labels)[[offset+1]] <- "next.id"
colnames(labels)[[offset+2]] <- "next.next.id"
colnames(labels)[[offset+3]] <- "next.next.next.id"
starts <- labels[which(labels$gesture_id == "G2"),]
starts$classe <- ifelse(starts$next.id == "G3" & starts$next.next.id == "G6"
& starts$next.next.next.id == "G4", 1, 0)
starts$classe <- as.factor(starts$classe)
starts <- na.omit(starts)
appendHelper <- function(x) {
ind <- append(ind,list(x[["start_frame"]]:x[["end_frame"]]))
}
ind <- list()
ind <- apply(starts, 1, function(x) do.call(appendHelper, list(x)))
lLength = length(ind)
growDF <- dvvs
growDF <- subset(dvvs,FALSE)
for(i in 1:lLength) {
indexes <- ind[[i]][[1]]
subDF <- dvvs[indexes,]
retVec <- apply(subDF,2,mean)
growDF <- rbind(growDF,retVec)
}
colnames(growDF) <- colnames(dvvs)
dvvs <- growDF
dvvs$classe <- starts$classe
#for(i in 1:length(ind)) { dvvs[c(ind[[i]][[1]]),"classe"] <- as.integer(starts[[i,"classe"]]) }
dvvs <- dvvs[!is.na(dvvs$classe),]
dvvs$classe <- as.numeric(dvvs$classe)
dvvs$classe <- dvvs$classe - 1
dvvs$classe <- as.factor(dvvs$classe)
#Setup
inTrain <- sample(1:nrow(dvvs),as.integer(0.5 * nrow(dvvs)),FALSE)
trainDF <- dvvs[inTrain,]
testDF <- dvvs[-inTrain,]
# PCA on input features => Feature Engineering
nzv <- nearZeroVar(trainDF,saveMetrics = T)
zeros <- nzv$zeroVar
trainDF <- trainDF[,which(zeros == F)]
testDF <- testDF[,which(zeros == F)]
# remove zero variance features
ytrain <- trainDF$classe
ytest <- testDF$classe
trainDF$classe <- NULL
testDF$classe <- NULL
l <- sapply(testDF,function(x) is.factor(x))
ind <- which(sapply(trainDF,function(x) is.factor(x)))
levels(testDF[,ind]) <- levels(trainDF[,ind])
pcaTrain <- model.matrix(~.,data=trainDF)
pcaTest <- model.matrix(~.,data=testDF)
pcaTrain <- pcaTrain[,2:ncol(pcaTrain)]
pcaTest <- pcaTest[,2:ncol(pcaTest)]
pca <- prcomp(pcaTrain,scale=T,center=T)
# Feature Selection by PCA Loadings
summary(pca)
pcaTrain <- pca$x[,1:3] # Extract Principal Components
pcaTest <- predict(pca,newdata = pcaTest)[,1:3] #PCA transformation onto test set
pcaTrain <- data.frame(pcaTrain)
pcaTest <- data.frame(pcaTest)
ytrain <- as.factor(ytrain)
ytest <- as.factor(ytest)
pcaTrain$classe <- ytrain
pcaTest$classe <- ytest
# predict "lift up" classe using SVM (1)
costs <- array(dim = 20)
for(i in 1:20) {
svm.dvvs <- svm(classe~.,data=pcaTrain,kernel="radial",cost=i,scale=F)
svm.predict <- predict(svm.dvvs,newdata=pcaTest)
costs[i] = round(mean(svm.predict == pcaTest$classe),5)
}
best.cost = which(costs == max(costs))[[1]]
svm.best <- svm(classe~.,data=pcaTrain,kernel="radial",cost=best.cost,scale=F,probability=T)
}
rowShift <- function(x, shiftLen = 1L) {
r <- (1L + shiftLen):(length(x) + shiftLen)
r[r<1] <- NA
return(x[r])
}
predict.classe <- function(x) {
predDF <- trainDF
predDF <- subset(trainDF,FALSE)
names(x) <- colnames(trainDF)
predDF <- rbind(predDF,x); predDF <- rbind(predDF,x)
colnames(predDF) <- colnames(trainDF)
predPCA <- model.matrix(~.,data=predDF)[,-1]
predTest <- predict(pca,newdata=predPCA)[,1:3]
svm.pred <- predict(svm.best,newdata=predTest,probability=T)
probs <- attr(svm.pred,"probabilities")[1,]
return (probs[1]);
}
|
706d3996ae430fd1e2090303e80dcf7feb9ed6fc
|
ed6b8a6f4a8a7dcf38d3632451ca01419fd7bebf
|
/man/str.len.Rd
|
ae0c64f63d8fc1387eb0ca910b0b16bf5db0ca1f
|
[] |
no_license
|
skranz/stringtools
|
8ab0ee094e3edf837a5b95cb81ba6f5b084f6505
|
8a37e75f057c7af3a0b115bd1e82ae66c91a0579
|
refs/heads/master
| 2022-05-22T02:36:16.012223
| 2022-05-04T20:28:27
| 2022-05-04T20:28:27
| 12,137,116
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 182
|
rd
|
str.len.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{str.len}
\alias{str.len}
\title{a synonym for nchar}
\usage{
\method{str}{len}(str)
}
\description{
a synonym for nchar
}
|
8036024570f196a1c60d1a5909a21735a7f621ae
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/economics/project-bid/proj-bidding.R
|
19444999e7d066806b3f7ec4b135e2ba7ea0e78f
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,753
|
r
|
proj-bidding.R
|
#
# proj-bidding.R, 3 Jun 14
#
# Data from:
# An Empirical Study of Software Project Bidding
# Magne Jørgensen and Gunnar J. Carelius
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
a_pre=read.csv(paste0(ESEUR_dir, "economics/project-bid/project-bidding.a_pre"), as.is=TRUE, strip.white=TRUE)
a_post=read.csv(paste0(ESEUR_dir, "economics/project-bid/project-bidding.a_post"), as.is=TRUE, strip.white=TRUE)
b=read.csv(paste0(ESEUR_dir, "economics/project-bid/project-bidding.b"), as.is=TRUE, strip.white=TRUE)
c_pre=read.csv(paste0(ESEUR_dir, "economics/project-bid/project-workhours.c_pre"), as.is=TRUE, strip.white=TRUE)
c_post=read.csv(paste0(ESEUR_dir, "economics/project-bid/project-workhours.c_post"), as.is=TRUE, strip.white=TRUE)
d=read.csv(paste0(ESEUR_dir, "economics/project-bid/project-workhours.d"), as.is=TRUE, strip.white=TRUE)
plot(sort(a_pre[,1]), col="red", ylab="Amounts bid", xlab="")
plot(sort(a_post[,1]), col="red", ylab="Amounts bid", xlab="")
plot(sort(b[,1]), col="red", ylab="Amounts bid", xlab="")
# Are the samples drawn from a population having a normal distribution?
shapiro.test(a_pre[,1])
shapiro.test(a_post[,1])
shapiro.test(b[,1])
# Do the samples have the same variance?
ansari.test(a_pre[,1], a_post[,1])
ansari.test(a_pre[,1], b[,1])
ansari.test(b[,1], a_post[,1])
# Two sets of bids from the same subjects
wilcox.test(a_pre[,1], a_post[,1], conf.int=TRUE, paired=TRUE)
# Two sets of bids from different subjects
wilcox.test(a_pre[,1], b[,1], conf.int=TRUE) # First bids from A and B
wilcox.test(b[,1], a_post[,1], conf.int=TRUE) # Only bid from B and second from A
# Compare second bids from Small/Large companies
wilcox.test(a_post[a_post[,2]=="Small",1], a_post[a_post[,2]=="Large",1], conf.int=TRUE)
# Estimates of work-hours
# Are the samples drawn from a population having a normal distribution?
shapiro.test(c_pre[,1])
shapiro.test(c_post[,1])
shapiro.test(d[,1])
# Do the samples have the same variance?
ansari.test(c_pre[,1], c_post[,1])
ansari.test(c_pre[,1], d[,1])
ansari.test(d[,1], c_post[,1])
wilcox.test(c_pre[,1], c_post[,1], conf.int=TRUE, paired=TRUE) # Two estimates from C
wilcox.test(c_pre[,1], d[,1], conf.int=TRUE) # First estimate from C and D
wilcox.test(d[,1], c_post[,1], conf.int=TRUE) # Only estimate from D and second from C
# Data from: Variability and Reproducibility in Software
# Engineering: A Study of Four Companies that Developed the Same System
# By Bente C.D. Anda, Dag I.K. Sjøberg and Audris Mockus.
# Note: Bids are in Euro not Norwegian Krona here.
# Those bidders who also submitted an estimate of the number of days
estimate.days=c(14, 28, 18, 94, 77, 91, 30, 49, 45, 77, 42, 77, 63, 49)
bid.amount=c(2630, 4970, 8750, 11880, 12190, 18510, 20000, 33250, 26880,
28700, 28950, 33880, 38360, 69060)
plot(estimate.days, bid.amount)
# Are sample subsets each drawn from a population having a normal distribution?
shapiro.test(estimate.days[estimate.days < 65])
shapiro.test(bid.amount[estimate.days < 65])
cor.test(estimate.days[estimate.days < 65], bid.amount[estimate.days < 65])
# Is the complete sample drawn from a population having a normal distribution?
shapiro.test(estimate.days)
t.test(estimate.days, conf.int=TRUE)
wilcox.test(estimate.days, conf.int=TRUE)
min(estimate.days)
max(estimate.days)
# Four bids accepted
devel_info=read.csv(paste0(ESEUR_dir, "economics/project-bid/project-develop.txt.xz"), as.is=TRUE, strip.white=TRUE)
wilcox.test(devel_info$scheduled.days, conf.int=TRUE)
# Actual days
wilcox.test(devel_info$actual.days, conf.int=TRUE)
plot(devel_info$agreed.price, devel_info$LOC)
plot(devel_info$actual.days, devel_info$LOC)
|
cf2a3d33465146ce5efc286a60561c8d753b079c
|
246ebd6fce5f1ea3d51fbdecb4b397af734b3d45
|
/server.R
|
b51ae861525c079c32e8f3b505102d61f70603b6
|
[] |
no_license
|
Frank-Sw/website-building-in-rmarkdown-and-Shiny
|
fa6ec8810d82135d58ea3d2fbfab7db585e379ea
|
0348b77224d18ea67862e123fd9face3158c6142
|
refs/heads/master
| 2020-04-07T02:48:45.976803
| 2017-07-03T09:45:22
| 2017-07-03T09:45:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,093
|
r
|
server.R
|
#R group
require(shiny)
shinyServer <- function(input, output) {
res=eventReactive(input$submit,{
name=as.character(input$name)
minHp=input$hp[1]
maxHp=input$hp[2]
res=mtcars
if (name!="") {
res=mtcars[name,]
}
res=res[res$hp<maxHp & res$hp>minHp,]
validate(need(nrow(res)>0, "No matches found"))
return(res)
})
output$res=renderDataTable({
res=res()
},options=list(hover = T, bordered = T, align="c", colnames = T, rownames = T, na="NA"))
output$resTable=downloadHandler(filename="data_table.csv",
content = function(file) {
write.csv(res,file,row.names = F)
}, contentType = "text/csv")
output$priceList=downloadHandler(filename = "cars_prices.csv",
content=function(file){
file.copy("price_list.csv")
},contentType = "text/csv")
}
|
44529338a7e414a9b8a139f92a77a9f3df55bf05
|
64098b83f218221064dacb4307f9b844e9a70373
|
/R/mext-contract.R
|
29840cbd267d29af0e5d2dfe5f9ace2c01e479b9
|
[
"MIT"
] |
permissive
|
takuizum/irtfun2
|
07800c5e6abeb9eb1892724582be7b9ed2202387
|
def9eac15a1150804f3702cf3f84df1c638a1c38
|
refs/heads/master
| 2021-07-19T00:29:21.794826
| 2020-05-06T09:28:05
| 2020-05-06T09:28:05
| 151,583,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,375
|
r
|
mext-contract.R
|
#' a function generates conditional probability for fixed ability on 2-parameter logistic model
#'
#' @param trait a vector of theta in IRT.
#' @param a a slope parameter
#' @param b a location parameter
#' @param c a lower asymptote parameter
#' @param D a factor constant
#' @author The original Fortran77 program was developed by Inoue,S., December 1990., extended by Shibayama,T., January 1991., translated into R by Shibayama,T. September 2008., functionalized by Itamiya, C., & Shibuya. T., June 2018.
#' @references Kolen, M. J., & Brennan, R. L. (2014). Test Equating, Scaling, and Linking. Springer.
#' @export
probability <- function(trait,a,b,c,D=1.702){
m <- a %>% as.vector() %>% length()
m1 <- m+1
ptheta <- matrix(0,m,1)
qtheta <- matrix(0,m,1)
prb <- matrix(0,m1,1)
#
ptheta<- c+(1-c)/(1+exp(-D*a*(trait-b)))
qtheta<- 1-ptheta
#
prb[1] <- qtheta[1]
prb[2] <- ptheta[1]
# recursive formula
for(j in 2:m){
l <- j -1 # item index of previous one
j1 <- j+1 # maximum + 1 score can be scored until item j
l1 <- l+1 # item index (the location of probability vector) muximum score can be scored until item j
prb[j1] <- prb[l1]*ptheta[j]
for(i in l:1){
k <- i -1
i1 <- i+1
k1 <- k+1
prb[i1]<-prb[k1]*ptheta[j]+prb[i1]*qtheta[j]
}
prb[1] <- prb[1]*qtheta[j]
}
#
probability <- prb
#
}
#' A function calculates IRT observed score using recursion formula.
#'
#' @param theta a vector of theta estimator EAP, MAP or MLE...
#' @param a a slope parameter.
#' @param b a location parameter
#' @param c a lower asymptote parameter
#' @param D a factor constant
#' @param output int. if 1 score vector, if 2 cumulative distribution plot.
#' @param name a plot title
#' @param color a plot color.
#' @export
obscore_dist <- function(theta,a,b,c,D=1.702,name="test",color="cyan", output=1){
a <- as.matrix(a)
b <- as.matrix(b)
c <- as.matrix(c)
theta <- as.matrix(theta)
#--------------------------------------------------------------#
# Number of Items and Subjects
#--------------------------------------------------------------#
m <- nrow(a)
m1 <- m+1
n <- nrow(theta)
#--------------------------------------------------------------#
# Distribution of Ability
#--------------------------------------------------------------#
i <- sort.list(theta)
theta <- theta[i]
i <- matrix(1:n,1,n)
#--------------------------------------------------------------#
# Conditional Probabilities of Test Scores
#--------------------------------------------------------------#
prbtestscore<-matrix(0,n,m1)
for(i in 1:n){
prbtestscore[i,]<- t(probability(theta[i],a,b,c,D=D))
}
#--------------------------------------------------------------#
# Marginal Probabilities of Test Score
#--------------------------------------------------------------#
freq <- t(prbtestscore) %*% matrix(1,n,1)
freq <- cbind(matrix(0:m,m1,1),freq)
temp <- round(freq[,2])
score <- rep(freq[,1],temp)
if(output == 1){
return(score)
}else if(output==2){
mx <- max(score)
graphics::hist(score,freq=FALSE,ylim=c(0,0.15),breaks=seq(-0.5,(mx+0.5),1),col=color,main=name,cex.main=1.5)
}
}
# Comditional Probabilities of True Scores
truescore<-function(trait,a,b,c,D){
ptheta<- c+(1-c)/(1+exp(-D*a*(trait-b)))
truescore <- sum(ptheta, na.rm = T)
}
#' A function calculates IRT true score.
#'
#' @param theta a vector of theta estimator EAP, MAP or MLE...
#' @param a a slope parameter.
#' @param b a location parameter
#' @param c a lower asymptote parameter
#' @param D a factor constant
#' @export
tscore_dist <- function(theta,a,b,c,D=1.702){
#--------------------------------------------------------------#
a <- as.matrix(a)
b <- as.matrix(b)
c <- as.matrix(c)
theta <- as.matrix(theta)
# Number of Items and Subjects
m <- length(a) #n of items
m1 <- m+1
n <- length(theta) #n of subjects
# Distribution of Ability
i <- sort.list(theta)
theta <- theta[i]
i <- matrix(1:n,1,n)
tscore <- matrix(0,n,1)
for(i in 1:n){
tscore[i,] <- t(truescore(theta[i],a,b,c,D=D))
}
return(as.vector(round(tscore)))
}
dist_f <- function(x, mxc = NULL, mnc = NULL){
x <- as.vector(x) # vectorization
if(is.null(mnc)) mnc <- 0
if(is.null(mxc)) mxc <- max(x)
score <- mnc:mxc
m <- length(score)
if(min(x) < mnc || max(x) < mxc) stop("max or min of score is incorrect!")
res <- data.frame(score = score, freq = rep(0,m))
f <- numeric(m)
for(i in score){
f[i+1] <- sum(x == i)
}
res$freq <- f
res <- res %>%
dplyr::mutate(cum_freq = cumsum(res$freq)) %>%
dplyr::mutate(percent = res$freq/sum(res$freq)*100) %>%
dplyr::mutate(cum_percent = cumsum(percent)) %>%
dplyr::mutate(ddf = res$freq/length(x)) %>%
dplyr::mutate(cddf = cumsum(ddf))
res
}
prf <- function(q,table){
xast <- round(q)
x1 <- q+1 #
x1ast <- round(x1)
cddf <- table$cddf
# パーセンタイルランクの計算
if(x1ast == 1){ # 0以下の度数は存在しないため0と置いた。
prf <- (0+(q-xast+0.5)*(cddf[x1ast]-0))*100
} else if(x1ast >= 2){
prf <- (cddf[x1ast-1]+(q-xast+0.5)*(cddf[x1ast]-cddf[x1ast-1]))*100
} else if(x1ast == 0){ # -0.5以下の場合は0
prf <- 0
}
# 最大値+0以上の場合は100。
# ただし,最大値+0.5以上の値を入れるとprfがNAを返す性質を利用した。
if (is.na(prf)) prf <- 100
prf
}
pfU <- function(p,tabley2){
if(0 <= p && p < 100){
a <- tabley2[tabley2$cddf > (p/100),]
yU <- a$score[1]
FyU <- a$cddf[1]
b <- tabley2[tabley2$cddf <= (p/100),]
FyU1 <- b$cddf[yU]
if(nrow(b) == 0) FyU1 <- 0
pfU <- (p/100 - FyU1)/(FyU-FyU1) + yU - 0.5
}else{
pfU <- tabley2$score %>% max() %>% magrittr::add(0.5)
}
pfU
}
# The inverse of percentile rank function which uses the largest integer score with cumulative persent that is less then `p`.
pfL <- function(p,tabley2){
if(0 < p && p <= 100){
a <- tabley2[tabley2$cddf >= (p/100),]
yL1 <- a[1,1]
FyL1 <- a$cddf[1]
b <- tabley2[tabley2$cddf < (p/100),]
FyL <- b$cddf[yL1]
if(nrow(b) == 0) FyL <- 0
pfL <-(p/100 - FyL)/(FyL1-FyL) + (yL1 - 1) + 0.5
}else if(p == 0){
pfL <- -0.5
}
pfL
}
#' Equipercentile equating function of row score.
#' This function generates the Form Y equipercentle equivalent of score x on FormX, eY(x).
#' @param x integer vector. test score of Form X
#' @param y integer vector. test score of Form Y
#' @param type character. if "U", result percentile is calculated by the smmallest integer score with a cum_percent that is greater then p,
#' "L", the largest score that is less than p or "both" output both of them.
#' @author Takumi Shibuya.
#' @examples
#' set.seed(0204)
#' X <- round(rnorm(1000) * 10 + 50)
#' Y <- round(rnorm(900) * 9 + 40)
#' res <- epe(x = X, y = Y)
#'
#' set.seed(0507)
#' X <- round(rnorm(1000) * 10 + 40)
#' X[X < 0] <- 0
#' Y <- round(rnorm(900) * 10 + 40)
#' res2 <- epe(x = X, y = Y)
#' @export
epe <- function(x, y, type = "both"){
# frequency distribution table
tablex <- dist_f(x)
tabley <- dist_f(y)
#n of item on Form X and Y
nx <- max(tablex$score)
ny <- max(tabley$score)
resultx <- matrix(0, nrow(tablex), 1)
resulty <- matrix(0, nrow(tabley), 1)
for (i in 0:nx) resultx[i+1, 1] <- prf(i,tablex)
for (i in 0:ny) resulty[i+1, 1] <- prf(i,tabley)
tabley2 <- data.frame(score = tabley$score, cddf = tabley$cddf, prf = resulty)
eYx <- matrix(0, nrow(tablex), 2)
for (i in 1:nrow(tablex)){
eYx[i,1] <- pfU(resultx[i],tabley2)
eYx[i,2] <- pfL(resultx[i],tabley2)
}
result <- cbind(tablex$score, eYx)
if(type == "both"){
result <- data.frame(x = tablex$score, ey_U = eYx[,1], ey_L = eYx[,2])
} else if(type == "L"){
result <- data.frame(x = tablex$score, ey = eYx[,2])
} else if(type == "U"){
result <- data.frame(x = tablex$score, ey = eYx[,1])
}
list(type = type, table = result, freq_x = tablex, freq_y = tabley, pr = tabley2)
}
|
6d674ec22196f566126d1faa30a83825fbf61ab9
|
c7a7e02bfe49d5195cda8cf973b09c24e3094b15
|
/NAM_250m_ensemble_4classes.R
|
7a49027948ad0fa8a1fe266f605714d63741dc57
|
[] |
no_license
|
iSDAgri/AlexVerlinden
|
836af00bae78d90da2e3fb52c442ac5eb8f983d3
|
867fbbf57c06a9a4b0347e2df398fa127c76315a
|
refs/heads/master
| 2021-05-04T13:07:43.265314
| 2017-05-07T13:35:24
| 2017-05-07T13:35:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,091
|
r
|
NAM_250m_ensemble_4classes.R
|
#' Prediction and local elastic net stacking of Namibia 10 k GeoSurvey woody cover classes predictions with additional Namibia 30 k GeoSurvey test data.
#' Modified by Alex Verlinden April 2016 after M.Walsh & J.Chen, April 2015
# This code models multiclasses of woody cover A=0 , B=0-10%, C= 10-30%, D=30-60%, E>60%
#+ Required packages
# install.packages(c("downloader","raster","rgdal","dismo","caret","glmnet", "DoMC")), dependencies=TRUE)
require(downloader)
require(raster)
require(rgdal)
require(dismo)
require(caret)
require(glmnet)
require(randomForest)
require(foreach)
#+ Data downloads ----------------------------------------------------------
# Create a "Data" folder in your current working directory
dir.create("NAM_bush", showWarnings=F)
dat_dir <- "./NAM_bush"
# download GeoSurvey Namibia data
download.file("https://www.dropbox.com/s/fopvazaeathk941/NAM_woody_class2015_4cl.csv?dl=0", "./NAM_bush/NAM_woody_class2015_4cl.csv", mode="wb")
bushclass <- read.table(paste(dat_dir, "/NAM_woody_class2015_4cl.csv", sep=""), header=T, sep=",")
bushclass <- na.omit(bushclass)
# download Namibia 250 m Gtifs (~ Mb) and stack in raster
download.file("https://www.dropbox.com/s/8mpat9mohyst1fj/NAMGRIDS_250.zip?dl=0", "./NAM_bush/NAMGRIDS_250.zip", mode="wb")
unzip("./NAM_bush/NAMGRIDS_250.zip", exdir="./NAM_bush", overwrite=T)
glist <- list.files(path="./NAM_bush", pattern="tif", full.names=T)
grid <- stack(glist)
#scaling the grids
t=scale(grid, center=TRUE, scale=TRUE)
#+ Data setup --------------------------------------------------------------
# Project GeoSurvey coords to grid CRS
bush.proj <- as.data.frame(project(cbind(bushclass$Longitude, bushclass$Latitude), "+proj=laea +ellps=WGS84 +lon_0=20 +lat_0=5 +units=m +no_defs"))
colnames(bush.proj) <- c("x","y")
coordinates(bush.proj) <- ~x+y
projection(bush.proj) <- projection(grid)
# Extract gridded variables at GeoSurvey test data locations (n~26k)
bushgrid=extract(t, bush.proj)
# Assemble dataframes
# woody plant classes
bush0=bushclass$CLASS
bush0dat <- cbind.data.frame(bush0, bushgrid)
bush0dat <- na.omit(bush0dat)
# set train/test set randomization seed
seed <- 1385321
set.seed(seed)
#+ Split data into train and test sets ------------------------------------
bush0Index=createDataPartition(bush0dat$bush0, p = 0.75, list = FALSE, times = 1)
bush0Train <- bush0dat[ bush0Index,]
bush0Test <- bush0dat[-bush0Index,]
#+ Random forests <randomForest> -------------------------------------------
# out-of-bag predictions
oob <- trainControl(method = "oob")
# use all cores (workers)
library(doMC)
registerDoMC(cores=4)
# random Forest
bush.rf <- train(bush0 ~ ., data = bush0Train,
method = "rf", metric= "Accuracy", importance=T,
trControl = oob)
bushrf.test <- predict(bush.rf, bush0Test) ## predict test-set
confusionMatrix(bushrf.test, bush0Test$bush0) ## print validation summaries
bushrf.pred <- predict(t, bush.rf) ## spatial predictions
plot(bushrf.pred)
#importance of variables to RF
imprf=varImp(bush.rf)
plot(imprf, main = "Variables contribution to the RF regression, cover classes")
#+ Gradient boosting <gbm> ------------------------------------------
# CV for training gbm's
gbm <- trainControl(method = "repeatedcv", number = 10, repeats = 5, classProbs = TRUE)
# gradient boosting
bush.gbm <- train(bush0 ~ ., data = bush0Train,
method = "gbm", metric="Accuracy",
trControl = gbm )
bushgbm.test <- predict(bush.gbm, bush0Test) ## predict test-set
confusionMatrix(bushgbm.test, bush0Test$bush0) ## print validation summaries
bushgbm.pred=predict(t, bush.gbm)
#variable importance
impgbm=varImp(bush.gbm)
plot(impgbm, main = "Variables contribution to GBM cover classes")
#deepnet
tc <- trainControl(method = "cv", number = 5)
bush.dnn <- train(bush0 ~ ., data = bush0Train,
method = "dnn",
metric="Accuracy",
trControl = tc,
tuneGrid = expand.grid(layer1 = 0:12,
layer2 = 0:3,
layer3 = 0:3,
hidden_dropout = 0,
visible_dropout = 0))
bushdnn.test <- predict(bush.dnn, bush0Test) ## predict test-set
confusionMatrix(bushdnn.test, bush0Test$bush0) ## print validation summaries
bushdnn.pred=predict(t, bush.dnn)
impdnn=varImp(bush.dnn)
plot(impgbm, main = "Variables contribution to DNN cover classes")
#+ Plot predictions by GeoSurvey variables ---------------------------------
# bush classes
bush.preds <- stack(bushrf.pred, bushgbm.pred, bushdnn.pred)
names(bush.preds) <- c("randomForest","gradientboosting","deepnet")
plot(bush.preds, axes = F)
bushpred=extract(bush.preds, bush.proj)
#
bushens <- cbind.data.frame(bush0, bushpred)
bushens <- na.omit(bushens)
bushensTest <- bushens[-bush0Index,] ## replicate previous test set
# Regularized ensemble weighting on the test set
# 5-fold CV
ens <- trainControl(method = "cv", number = 5)
# ensembles of 3 classifiers
bush.ens <- train(bush0 ~ randomForest + gradientboosting + deepnet, data = bushensTest,
family="binomial",
method = "rf",
trControl = ens)
bushens.pred <- predict(bush.preds, bush.ens) ## spatial prediction
plot(bushens.pred, main= "Ensemble of low,medium and high woody plant cover", cex.main=0.8)
bushens.test <- predict(bush.ens, bushensTest)
confusionMatrix(bushens.test, bushensTest$bush0) ## print validation summaries
impens=varImp(bush.ens)
plot(impens, main = "Regression contribution to Ensemble, for classes")
#+ Write spatial predictions -----------------------------------------------
# Create a "Results" folder in current working directory
dir.create("NAM_results", showWarnings=F)
# Export Gtif's to "./NAM_results"
#write tiff
rf=writeRaster(bushrf.pred,filename="./NAM_Results/bushrf4cl.tif", format= "GTiff", overwrite = TRUE)
rf=writeRaster(bushens.pred,filename="./NAM_Results/bushens4cl.tif", format= "GTiff", overwrite = TRUE)
|
18a6c117fa75775a822abb326e90ccd520c0b937
|
63f41cb145d21943bb010538becdfab5767a8714
|
/R/gbm_plots.R
|
a6fe54accb0a27836354501aaa0ee490708a472f
|
[] |
no_license
|
Daniel-Fuckner/Consulting
|
69e55f345c892eaf49718b97120ec698b1168161
|
8e095dfeaacdc92fb4717222b42671338d08a70f
|
refs/heads/master
| 2016-09-05T15:14:04.700223
| 2014-08-22T14:17:43
| 2014-08-22T14:17:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,433
|
r
|
gbm_plots.R
|
rm(list=ls())
#load packages
library(data.table)
library(gbm)
library(ggplot2)
library(scales)
library(reshape)
#load results
# load("C:/Users/Markus/Desktop/plotsData/gbm5_8.RData")
load("Z:/consulting/r_results/gbm5_8.RData")
trainIdx = res[[1]]
model = res[[2]]
pred = res[[3]]
res1 = res[[1]]
res2 = res[[2]]
res3 = res[[3]]
res4 = res[[4]]
model = res[[2]]
model1 = model[[4]]
# ?gbm.object
sum(res1$train.error < res1$valid.error) # = n.trees, as expected, but:
sum(res2$train.error < res2$valid.error) # = 0
# ?pretty.gbm.tree
pretty.gbm.tree(res2, 1000)
# ?gbm.perf
# check performance using an out-of-bag estimator
# OOB underestimates the optimal number of iterations
bestIter = gbm.perf(model1, method = "OOB")
print(bestIter)
# check performance using a 50% heldout test set
best.iter <- gbm.perf(res2, method="test")
print(best.iter)
# check performance using 5-fold cross-validation
best.iter <- gbm.perf(res2,method="cv")
print(best.iter)
# plot the performance # plot variable influence
summary.gbm(model1, n.trees=1) # based on the first tree
summary.gbm(model1, n.trees=bestIter) # based on the estimated best number of trees
# compactly print the first and last trees for curiosity
print(pretty.gbm.tree(res2,1))
print(pretty.gbm.tree(res2,res2$n.trees))
plot.gbm(res2, 3)
#########
###ROC###
#########
pdf(file="/zpool1/s10859017/consulting/r_results/roc.pdf")
pred = res[[3]]
for(j in 1 : length(pred)){ # for each Position
pred1 = as.data.table(pred[[j]])
grid = seq(min(pred1$p), max(pred1$p), length.out = 1000) #Gitter von min vorhergesagter Wkeit und max Wkeit
roc = data.table(sensi = numeric(1000), spezi = numeric(1000))
for(i in 1:1000){
p1 = as.numeric(pred1$p > grid[i]) #wenn groesser, dann wird als Transaktion vorhergesagt
table = as.data.table(cbind(y = pred1$Transaction, prediction = p1))
table1 = table[table$y == 1, ]
table0 = table[table$y == 0, ]
roc$sensi[i] = sum(table1$prediction == 1) / nrow(table1) #TPF
roc$spezi[i] = sum(table0$prediction == 0) / nrow(table0) #TNF
}
ggplot(roc, aes(x = (1 - spezi), y = sensi)) +
geom_line() +
geom_segment(aes(x = 0, y = 0, xend = 1, yend = 1), color = "red") +
labs(x = "1 - Spezifitaet", y = "Sensitivitaet", title = paste("Position", j, sep = " ")) +
scale_x_continuous(limits = c(0,1), oob=squish) +
scale_y_continuous(limits = c(0,1), oob=squish)
}
dev.off()
|
88e9306d40bf0567a05d03dba1e665712127143e
|
66f368728041f6f234f74bd2882f48693f190c3f
|
/conditional.R
|
da98679034c401da9502a9323a5d641012d4ced5
|
[] |
no_license
|
williamfried/Joint-Probability-Distribution-of-Atmospheric-Gravity-Wave-Parameters
|
c97d9670bbabff333078f44721a441f6fa3ae986
|
1a05ef47cb98f0a460d3ba554b577bf02a1a4c7d
|
refs/heads/master
| 2021-08-27T20:54:40.825311
| 2021-08-21T22:07:32
| 2021-08-21T22:07:32
| 177,266,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,367
|
r
|
conditional.R
|
setwd(path_to_data)
source('functions.R')
library('mixR')
library('stringr')
library('dplyr')
MLE_list = list(normal_MLE, lognormal_MLE, gamma_MLE, weibull_MLE, rayleigh_MLE, gumbel_MLE, burr_MLE, dagum_MLE, invgamma_MLE, genray_MLE, generalized_normal_MLE, polynomial_MLE)
names(MLE_list) = c('norm', 'lnorm', 'gamma', 'weibull', 'rayleigh', 'gumbel', 'burr', 'dagum', 'invgamma', 'genray', 'generalized_normal', 'polynomial')
cond_dist_both = function(U_parameter, V_parameter, frequency_train, U_intervals, V_intervals, task)
{
if (task == 'cross-validation')
{
distributions = c('gamma', 'weibull', 'rayleigh', 'lnorm')
#distributions = c('polynomial')
}
else if (task == 'modeling')
{
#distributions = c('gamma', 'weibull', 'rayleigh', 'lnorm', 'gumbel', 'burr', 'dagum', 'invgamma', 'mixture model')#, 'polynomial')
distributions = c('polynomial')
}
else
{
stop('invalid')
}
conditional_list = list()
for (i in 1:(length(U_intervals)-1))
{
for (j in 1:(length(V_intervals)-1))
{
segment = frequency_train[between(U_parameter, U_intervals[i], U_intervals[i+1]) & between(V_parameter, V_intervals[j], V_intervals[j+1])]
if (length(distributions) == 1)
{
dist = distributions
}
else
{
fit = fit_dist(segment, F, F, F, distributions, 'freq')
dist = names(fit)
}
if (grepl('mixture model', dist))
{
conditional_list[[paste(toString(U_intervals[i]), toString(V_intervals[j]))]] = list('dist' = 'mixture model', 'parameters' = mixture_model_MLE(segment, word(dist, 1, 1)))
}
else
{
conditional_list[[paste(toString(U_intervals[i]), toString(V_intervals[j]))]] = list('dist' = dist, 'parameters' = MLE_list[[dist]](segment))
}
}
}
return(conditional_list)
}
# split the data into a specified number of intervals that all contain the same number of data points
render_bins = function(bin_num, type)
{
if (type == 'U')
{
U_wavelength_intervals = unname(quantile(U_wavelength, seq(0,1,length=bin_num+1)))
U_wavelength_intervals[1] = 0
U_wavelength_intervals[bin_num+1] = U_wavelength_intervals[bin_num] + 10000
return(U_wavelength_intervals)
}
else if (type == 'V')
{
V_wavelength_intervals = unname(quantile(V_wavelength, seq(0,1,length=bin_num+1)))
V_wavelength_intervals[1] = 0
V_wavelength_intervals[bin_num+1] = V_wavelength_intervals[bin_num] + 10000
return(V_wavelength_intervals)
}
else
{
stop('invalid')
}
}
# determine which interval a given zonal/meridional wavelength value falls into
category = function(x, intervals)
{
for (i in 1:(length(intervals)-1))
{
if (x >= intervals[i] & x < intervals[i+1])
{
return(toString(intervals[i]))
}
}
}
# total number of parameters associated with maximum likelihood estimates of the conditional frequency distribution for each interval
conditional_list_param_num = function(cond_list)
{
tot = 0
for (i in 1:length(cond_list))
{
if (cond_list[[i]]$dist == 'polynomial')
{
add = 2
}
else if (cond_list[[i]]$dist == 'mixture model')
{
add = 3*length(cond_list[[i]]$parameters$fit[[1]])-1
}
else
{
add = length(cond_list[[i]]$parameters)
}
tot = tot + add
}
return(tot)
}
|
3c95f53745e1bf4f824bf108b0b477cb23e37603
|
aab676f026945eceba5b5cda9c783654f4f01b01
|
/download_data.R
|
8e661817bd05b659fed2e6ef64adad0033308b2c
|
[] |
no_license
|
shudengnyc/human_methylation_data
|
237c194cccf082647c53638998fe689d9acdc460
|
e8d2f847c375a946efd88f201e0a4fe87ce69b88
|
refs/heads/master
| 2020-05-18T22:08:35.282813
| 2019-05-09T16:01:38
| 2019-05-09T16:01:38
| 184,683,293
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 85
|
r
|
download_data.R
|
library(MethylMix)
Download_DNAmethylation(CancerSite = "OV", TargetDirectory = ".")
|
5ea4de7cbe41acc997679e472423d4b8201e66ba
|
124df74bd27893e5d0de7f6ea48f5b2d7ac34c4f
|
/Chapter13/R/03-score-dataset-using-tidymodels-model.R
|
266ec070e99c0cefff0ad2797a843507bc006f92
|
[
"MIT"
] |
permissive
|
Micseb/Extending-Power-BI-with-Python-and-R
|
5dc3cf4051312e4d0e5bc915c17c8bb735e0a39b
|
12791b8f1499f70c9c7b0c9aeb2016d12e41f845
|
refs/heads/main
| 2023-08-24T07:08:43.000627
| 2021-11-01T07:50:24
| 2021-11-01T07:50:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 402
|
r
|
03-score-dataset-using-tidymodels-model.R
|
library(readr)
library(tidymodels)
project_folder <- r'{C:\<your-path>\Chapter13\}'
titanic_testing <- read_csv(file.path(project_folder, 'titanic-test.csv'))
# Unserialize the model previously trained
rf_final <- readRDS(file.path(project_folder, r'{R\titanic-model.RDS}'))
# Get model predictions for the input dataframe
pred <- predict(rf_final, new_data = titanic_testing, type = 'prob')
pred
|
dff853c8b762d642c799db26d1ff16842af06f2a
|
bb9604e7a0be12fb3e259e990695f6e1d222e57d
|
/man/GTEX_LIVER_CRUDE.Rd
|
2daa3efbf68e95adb224748f8860873f2310c4f0
|
[] |
no_license
|
ManuelGoepferich/LINC_justlinc
|
0323374a04cc8a86f69b7a7c0f081ad7ec3a834d
|
4d672fac3b4d4ef718f5853918638ad4e2d36ecc
|
refs/heads/master
| 2020-12-03T07:41:27.495279
| 2016-08-17T13:50:34
| 2016-08-17T13:50:34
| 66,368,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 516
|
rd
|
GTEX_LIVER_CRUDE.Rd
|
\name{GTEX_LIVER_CRUDE}
\alias{GTEX_LIVER_CRUDE}
\alias{LIVER_EXPR}
\docType{data}
\title{
mRNA Expression Of Normal LIVER From GTEX
}
\description{
mRNA expression in LIVER
}
\usage{data(LIVER_EXPR)}
\format{
A matrix
}
\value{
gene expression matrix
}
\source{
http://www.gtexportal.org
Genotype-Tissue Expression (GTEx)
}
\references{
Carithers et al.
Biopreservation and Biobanking. October 2015, 13(5): 311-319. doi:10.1089/bio.2015.0032.
PMID: 26484571.
}
\examples{
data(LIVER_EXPR)
}
\keyword{datasets}
|
bd909468de16f494c65dfb03a9a69e1c2e67e25f
|
789dd3039ae8c7a1b29582e563c66f2f3b573e9b
|
/VIZ/raymat.r
|
1aabbb7737e17f5848a04a111868d652d4cbcb7f
|
[] |
no_license
|
Aurametrix/R
|
44ecb2969e0eb39120176692761304adae7a3539
|
affb2b2e06b94ff8a1c8d552aa3b996b0158911f
|
refs/heads/master
| 2023-01-31T22:28:15.893079
| 2023-01-27T01:17:57
| 2023-01-27T01:17:57
| 16,440,534
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 867
|
r
|
raymat.r
|
library(rayshader)
#Here, I load a map with the raster package.
loadzip = tempfile()
download.file("https://tylermw.com/data/dem_01.tif.zip", loadzip)
localtif = raster::raster(unzip(loadzip, "dem_01.tif"))
unlink(loadzip)
#And convert it to a matrix:
elmat = matrix(raster::extract(localtif,raster::extent(localtif),buffer=1000),
nrow=ncol(localtif),ncol=nrow(localtif))
#We use another one of rayshader's built-in textures:
elmat %>%
sphere_shade(texture = "desert") %>%
plot_map()
#sphere_shade can shift the sun direction:
elmat %>%
sphere_shade(sunangle = 45, texture = "desert") %>%
plot_map()
raymat = ray_shade(elmat)
#And we can add a raytraced layer from that sun direction as well:
elmat %>%
sphere_shade(texture = "desert") %>%
add_water(detect_water(elmat), color="desert") %>%
add_shadow(raymat) %>%
plot_map()
|
21606bb4d45e3863c269edd398a8ef6abb0fc29d
|
f464a388b87c7c9d0af4e25d693c7fb9879ebd29
|
/PH125.4 Inference and modeling/R code/POLL DATA AND POLLSTER BIAS.R
|
e3600b56a3d99a244706742b6c7eac549d905fb9
|
[] |
no_license
|
alvarobarbera/HarvardX-PH125-DataScience
|
5e57e8d5d36bc57992fbf9a6477a51465ef5fc02
|
1a152f47b20131d71bc61ac94282867843f5a3ae
|
refs/heads/master
| 2022-07-12T09:21:19.496692
| 2020-05-15T08:37:51
| 2020-05-15T08:37:51
| 260,189,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,751
|
r
|
POLL DATA AND POLLSTER BIAS.R
|
data("polls_us_election_2016")
# Filter out US, data after 31 Oct and upper grades or no grade
polls <- polls_us_election_2016 %>%
filter(state == "U.S." & enddate >= "2016-10-31" &
(grade %in% c("A+","A","A-","B+") | is.na(grade)))
# include a column for spread
polls <- polls %>%
mutate(spread = rawpoll_clinton/100 - rawpoll_trump/100)
# we call p for Clinton and (1-p) for Trump
# we are interested in the difference d = 2p - 1
# there are 49 estimates of the spread
# we know they follow approx. a normal function with
# expected value d and standard error 2*sqrt(p*(1-p)/N)
d_hat <- polls %>%
summarise(d_hat = sum(spread*samplesize)/sum(samplesize)) %>%
pull(d_hat)
# to calculate the standard error, we know that p_hat is (d+1)/2
# and N is polls$samplesize
p_hat <- (d_hat+1)/2
moe <- 1.96 * 2 * sqrt(p_hat*(1-p_hat)/sum(polls$samplesize))
# We report an expected spread (difference) of d_hat ~ 1.42%
# and standard error moe ~ 0.66%
d_hat
moe
# So we report a spread of 1.43% with a margin of error of 0.66%.
# On election night, we discover that the actual percentage was 2.1%,
# which is outside a 95% confidence interval. What happened?
# A histogram of the reported spreads shows a problem:
polls %>%
ggplot(aes(spread)) + geom_histogram(binwidth = .01,col="black")
# The distribution does not appear to be normal, and the standard error
# is larger than 0.0066
################################# POLLSTER BIAS #######################################
# Note that various pollster are involved are some are taking several polls a week
polls %>% group_by(pollster) %>% summarise(n())
polls %>% group_by(pollster) %>%
filter(n()>=6) %>%
ggplot(aes(spread,pollster)) + geom_point()
|
98244f6395d3432dfb0682c964fc56632154bb80
|
05ffda84f992287c02bb55c82029bc995d57be37
|
/R/rollDbn.R
|
202929fbca949cc6d8080d5e067fe77c0f1c553b
|
[] |
no_license
|
sap01/TGS
|
b933928a291d3cc2c3d1d2a6b825971dc758d64f
|
c639074ec5d4e734ad00bbec1d713e60ae75b181
|
refs/heads/master
| 2021-07-13T21:07:53.655812
| 2020-05-22T14:04:41
| 2020-05-22T14:04:41
| 139,746,838
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,499
|
r
|
rollDbn.R
|
#' Convert a given unrolled Dynamic Bayesian Network (DBN) into a rolled DBN using different rolling methods
#'
#' Rolls time-varying networks into a single time-invariant network.
#' This function is compatible with the time-varying networks learnt through
#' learnDbnStruct3dParDeg1.R::learnDbnStructMo1Layer3dParDeg1().
#'
#' @param num.nodes Number of the desired nodes in the rolled DBN
#' @param node.names Names of the desired nodes in the rolled DBN
#' @param num.timepts Number of time points in the unrolled DBN
#' @param unrolled.DBN.adj.matrix Given unrolled DBN adjacency matrix. It is a 2D matrix of dimension ((num.nodes X num.timepts) X (num.nodes X num.timepts)).
#' @param roll.method Which rolling method to use from {'any', 'all', or some real number in (0, 1), like - 0.5}.
#' @param allow.self.loop Boolean to decide whether to allow self loop or not in the rolled DBN
#'
#' @return rolled.DBN.adj.matrix Return the rolled DBN adjacency matrix. It is a 2D matrix of dimension (num.nodes * num.nodes).
#'
#' @keywords internal
#' @noRd
rollDbn <- function(num.nodes, node.names, num.timepts, unrolled.DBN.adj.matrix, roll.method, allow.self.loop) {
if(!base::is.matrix(unrolled.DBN.adj.matrix))
{
base::stop("Error in rollDbn unrolled.DBN.adj.matrix is not a matrix")
}
## Initialize rolled DBN adj matrix as a zero matrix
rolled.DBN.adj.matrix <- base::matrix(0, nrow = num.nodes, ncol = num.nodes,
dimnames = base::c(base::list(node.names), base::list(node.names)))
num.time.trans <- num.timepts - 1 # Num of time transitions
# todo: replace with foreach dopar
for (tgt.node.idx in 1:ncol(rolled.DBN.adj.matrix))
{
tgt.node.name <- base::colnames(rolled.DBN.adj.matrix)[tgt.node.idx]
# grep('^G1', tmpvec, fixed = FALSE) returns the indices of the elements in 'tmpvec' whose values start with 'G1'.
# '^G1' is the given pattern.
# 'fixed = FALSE' represents that the given pattern is a regular expression.
unrolled.DBN.tgt.node.indices <- base::grep(base::paste('^', tgt.node.name, sep = ''),
base::colnames(unrolled.DBN.adj.matrix),
fixed = FALSE)
unrolled.DBN.adj.matrix.tgt.node <- unrolled.DBN.adj.matrix[, unrolled.DBN.tgt.node.indices]
# If the value corr. to a row in 'unrolled.DBN.adj.matrix.tgt.node.single.col'
# is greater than zero, then the node corr. to the row name is a parent of the target node
unrolled.DBN.adj.matrix.tgt.node.single.col <-
base::matrix(rowSums(unrolled.DBN.adj.matrix.tgt.node),
nrow = base::nrow(unrolled.DBN.adj.matrix.tgt.node), ncol = 1,
dimnames = base::c(base::list(base::rownames(unrolled.DBN.adj.matrix.tgt.node)),
tgt.node.name))
# After execution of this for loop,
# rolled.DBN.adj.matrix[src.node.name, tgt.node.name] represents how many times there is an edge from
# the src node to the tgt node in the unrolled DBN. The value is an integer in the interval [0, num.time.trans].
for (src.node.name in node.names)
{
unrolled.DBN.src.node.indices <- base::grep(paste('^', src.node.name, sep = ''),
base::rownames(unrolled.DBN.adj.matrix.tgt.node.single.col),
fixed = FALSE)
rolled.DBN.adj.matrix[src.node.name, tgt.node.name] <-
base::sum(unrolled.DBN.adj.matrix.tgt.node.single.col[unrolled.DBN.src.node.indices, tgt.node.name])
}
}
roll.threshold <- NULL
if(base::is.character(roll.method))
{
if (roll.method == 'any') # Insert an edge in rolled DBN if it is present at least for one time transition
{
roll.threshold <- 1
}
else if (roll.method == 'all') # Insert an edge in rolled DBN if it is present at every time transition
{
roll.threshold <- num.time.trans
}
}
else if (base::is.numeric(roll.method)) # Insert an edge in rolled DBN if it is present at at least (roll.method * num.time.trans) number of time transitions
{
if ((roll.method > 0) & (roll.method < 1))
{
roll.threshold <- num.time.trans * roll.method
}
else
{
# print('\'roll.method\' accepts numeric values in the interval (0,1)')
base::stop('\'roll.method\' accepts numeric values in the interval (0,1)')
}
}
# writeLines('\n rolled.DBN.adj.matrix = \n')
# print(rolled.DBN.adj.matrix)
for (tgt.node.idx in 1:ncol(rolled.DBN.adj.matrix))
{
for (src.node.idx in 1:nrow(rolled.DBN.adj.matrix))
{
if (rolled.DBN.adj.matrix[src.node.idx, tgt.node.idx] >= roll.threshold)
{
rolled.DBN.adj.matrix[src.node.idx, tgt.node.idx] <- 1
}
else
{
rolled.DBN.adj.matrix[src.node.idx, tgt.node.idx] <- 0
}
}
}
# Remove self loops if 'allow.self.loop' = FALSE
if (!allow.self.loop)
{
base::diag(rolled.DBN.adj.matrix) <- 0
}
return(rolled.DBN.adj.matrix)
}
#' Convert a given unrolled Dynamic Bayesian Network (DBN) into a rolled DBN using different rolling methods
#'
#' Rolls time-varying networks into a single time-invariant network.
#' This function is compatible with the time-varying networks learnt through
#' learnDbnStruct3dParDeg1.R::learnDbnStructMo1Layer3dParDeg1_v2().
#'
#' @param num.nodes Number of the desired nodes in the rolled DBN
#' @param node.names Names of the desired nodes in the rolled DBN
#' @param num.timepts Number of time points in the unrolled DBN
#' @param unrolled.DBN.adj.matrix.list Given time-varying network adjacency list. Its length =
#' num.time.trans = (num.timepts - 1). The t^{th} element of the list represents the predicted
#' network adjacency matrix of the t^{th} time transition. This matrix is of dimension
#' (num.nodes \ times num.nodes).
#' @param roll.method Which rolling method to use from {'any', 'all', or some real number in (0, 1), like - 0.5}.
#' @param allow.self.loop Boolean to decide whether to allow self loop or not in the rolled DBN
#'
#' @return rolled.DBN.adj.matrix Return the rolled DBN adjacency matrix. It is a 2D matrix of dimension (num.nodes * num.nodes).
#'
#' @keywords internal
#' @noRd
rollDbn_v2 <- function(num.nodes, node.names, num.timepts, unrolled.DBN.adj.matrix.list,
roll.method, allow.self.loop) {
if(!base::is.list(unrolled.DBN.adj.matrix.list))
{
base::stop("Error in rollDbn_v2 unrolled.DBN.adj.matrix.list is not a list")
}
## Initialize rolled DBN adj matrix as a zero matrix
rolled.DBN.adj.matrix <- base::matrix(0, nrow = num.nodes, ncol = num.nodes,
dimnames = base::c(base::list(node.names), base::list(node.names)))
num.time.trans <- num.timepts - 1 # Num of time transitions
for (list.idx in 1:num.time.trans)
{
rolled.DBN.adj.matrix <- rolled.DBN.adj.matrix + unrolled.DBN.adj.matrix.list[[list.idx]]
}
base::rm(list.idx)
roll.threshold <- NULL
if(base::is.character(roll.method))
{
if (roll.method == 'any') # Insert an edge in rolled DBN if it is present at least for one time transition
{
roll.threshold <- 1
}
else if (roll.method == 'all') # Insert an edge in rolled DBN if it is present at every time transition
{
roll.threshold <- num.time.trans
}
}
else if (base::is.numeric(roll.method)) # Insert an edge in rolled DBN if it is present at at least (roll.method * num.time.trans) number of time transitions
{
if ((roll.method > 0) & (roll.method < 1))
{
roll.threshold <- num.time.trans * roll.method
}
else
{
# print('\'roll.method\' accepts numeric values in the interval (0,1)')
base::stop('\'roll.method\' accepts numeric values in the interval (0,1)')
}
}
# writeLines('\n rolled.DBN.adj.matrix = \n')
# print(rolled.DBN.adj.matrix)
for (tgt.node.idx in 1:ncol(rolled.DBN.adj.matrix))
{
for (src.node.idx in 1:nrow(rolled.DBN.adj.matrix))
{
if (rolled.DBN.adj.matrix[src.node.idx, tgt.node.idx] >= roll.threshold)
{
rolled.DBN.adj.matrix[src.node.idx, tgt.node.idx] <- 1
}
else
{
rolled.DBN.adj.matrix[src.node.idx, tgt.node.idx] <- 0
}
}
}
base::rm(tgt.node.idx)
# Remove self loops if 'allow.self.loop' = FALSE
if (!allow.self.loop)
{
base::diag(rolled.DBN.adj.matrix) <- 0
}
return(rolled.DBN.adj.matrix)
}
|
9889afe530d1284313b5ae5efc9edbe05aeb91d6
|
1ff5957bcdea934cd13460d578227a7d9adc5910
|
/man/mapToDiscreteTime.Rd
|
2875292d6cdc84b096e1817352de72f096c27d5f
|
[
"MIT"
] |
permissive
|
jarrod-dalton/discretetime
|
40c0c91da79c4cda096a1fdca509bb46df8f20f0
|
9c073e8f5cd5e74fe5b01cc605be65e34bd9296b
|
refs/heads/master
| 2021-05-11T14:11:49.280770
| 2018-01-16T17:54:59
| 2018-01-16T17:54:59
| 117,697,769
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,379
|
rd
|
mapToDiscreteTime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapToDiscreteTime.R
\name{mapToDiscreteTime}
\alias{mapToDiscreteTime}
\title{Map Irregular Data to a Discrete Time Grid via Interpolation}
\usage{
mapToDiscreteTime(data, xvar = "value", timevar = "t", start, end, dt,
method = "constant", rule = 2, ...)
}
\arguments{
\item{data}{A data frame}
\item{xvar}{Character string indicating the column name in \code{data}
that contains the measurements}
\item{timevar}{Character string indicating the column name in \code{data}
that contains the (numeric) time values associated with the measurements
in \code{xvar}.}
\item{start}{Numeric value of the starting time over which the discrete time data
will be generated. Defaults to \code{min(data$xvar)}.}
\item{end}{Numeric value of the ending time over which the discrete time data
will be generated. Defaults to \code{max(data$xvar)}.}
\item{dt}{Numeric value representing the time increment of measurements over the
time interval given by \code{start} and \code{end}. Must satisfy
\code{dt <= (end - start)}.}
\item{method}{Interpolation method passed to \code{approx()}. Defaults to
\code{"constant"}. See \code{help(approx)} for details.}
\item{rule}{Rule by which observations outside the interval
\code{[min(data$xvar), max(data$xvar)]} are handled. Defaults to \code{rule=2};
see \code{help(approx)} for details.}
\item{...}{Other parameters passed to \code{approx()}.}
}
\value{
A data frame containing equally-spaced, interpolated values of \code{xvar}
over the specified time grid. Other variables in \code{data} are returned (assuming)
}
\description{
This function takes irregularly-spaced observations in time and,
through a call to \code{approx}, interpolates the data at a user-specified,
fixed grid of equally-spaced time points. The function is essentially a
wrapper to \code{approx} that allows data frames as input (as opposed to
individual vectors for the x variable and time variable). As such, it
provides a useful framework for grouped data operations such as
\code{plyr::ddply}.
}
\details{
The function ignores columns in \code{data} other than \code{xvar} and
\code{timevar}.
}
\examples{
irregularData <- tibble(value=1:10, t=c(1,4,5,7,8,10,11,13,17,21))
mapToDiscreteTime(irregularData, end=24, dt=2)
mapToDiscreteTime(irregularData, end=24, dt=0.5)
}
|
a4c893601e77bb78da985c894456679afde0abed
|
d97e2169ce9cd893920a54cffa3e754d1e309e6f
|
/man/calc_pls_args.Rd
|
6df3c372aaa68686ef967cd3af8eda2326eee2fe
|
[] |
no_license
|
bpollner/aquap2
|
5ccef0ba4423413e56df77a1d2d83967bffd1d04
|
7f7e2cf84056aad4c8a66f55d099b7bdaa42c0be
|
refs/heads/master
| 2021-07-22T15:07:22.912086
| 2021-05-27T12:50:22
| 2021-05-27T12:50:22
| 30,932,899
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,348
|
rd
|
calc_pls_args.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_plsr.r
\name{calc_pls_args}
\alias{calc_pls_args}
\title{Calculate PLSR - Arguments}
\arguments{
\item{do.pls}{Logical. If used in \code{getap}, if PLSR models should be
calculated with a given dataset.}
\item{pls.regOn}{NULL or character vector. Which variables should be used
to regress on. Set to NULL for using all numerical variables to regress on, or
provide a character vector with the column names of numerical variables to use
those for regression in the PLSR.}
\item{pls.ncomp}{NULL or integer length one. The number of components used in
PLSR. Set to NULL for automatic detection, or provide an integer to use this
number of components in the PLSR.}
\item{pls.valid}{Character. Which crossvalidation to use. Possible values are:
\itemize{
\item "def" Read in the default value from settings.r (parameter
\code{plsr_calc_typeOfCrossvalid})
\item A numeric length one for this n-fold crossvalidation. The default is
to always exclude resp. include consecutive scans together.
\item A valid name of a class variable for performing a crossvalidation
based on the grouping defined by this variable. For a class variable
containing e.g. four different levels, a 4-fold crossvalidation with always
all members of one group being excluded is performed.
This is overruling any grouping that would result from the consecutive
scans, please see below.
\item "LOO" for a leave-one-out crossvalidation
}
If a vector with the same length as the vector in \code{pls.regOn} is
provided, each element of \code{pls.valid} is used for crossvalidating the
corresponding element in \code{pls.regOn}. Any of the above mentioned input
types can be mixed, so the input could be e.g.
\code{pls.valid <- c("C_FooBar", 10, "C_BarFoo", 10)}. The corresponding
\code{pls.regOn} input for this would then be e.g.
\code{pls.regOn <- c("Y_FooBar", "Y_FooBar", "Y_BarFoo", "Y_BarFoo")}.
Please note that via the parameter \code{plsr_calc_CV_consecsTogether} in
the settings file you can select if for crossvalidation the
\strong{consecutive scans} (i.e. the scans with the same sample number) should
always be excluded or included together. The default is to always exclude resp.
include the consecutive scans of a single sample together.}
\item{pls.exOut}{Logical. If a plsr-specific box-plot based outlier-detection
algorithm should be used on the data of a first plsr model to determine the
outliers that then will be excluded in the final plsr model. Possible values
are:
\itemize{
\item "def" Read in the default value from settings.r (parameter
\code{plsr_calc_excludePlsrOutliers})
\item TRUE for excluding plsr specific outliers
\item FALSE for not performing the plsr specific outlier exclusion
}
If a vector with the same length as the vector in \code{pls.regOn} is
provided, each element of \code{pls.exOut} is used to perform the
corresponding outlier-detection (or not) for each element in
\code{pls.regOn}.}
}
\description{
The following parameters can be used in the \code{...} argument in
function \code{\link{getap}}, also within function \code{\link{gdmm}}, to
override the values in the analysis procedure file and so to modify the
calculation of PLSR models - see examples.
\describe{
\item{\code{getap(...)}}{ }
\item{\code{gdmm(dataset, ap=getap(...))}}{ }
}
}
\details{
For a list of all parameters that can be used in the \code{...}
argument in \code{\link{getap}} and in the \code{\link{plot}} functions
please see \code{\link{anproc_file}}.
}
\section{Note}{
Calculation of PLSR models is done with the function
\code{\link[pls]{plsr}}.
}
\examples{
\dontrun{
dataset <- gfd()
cube <- gdmm(dataset, ap=getap(pls.regOn="Y_Temp"))
cube <- gdmm(dataset, ap=getap(pls.ncomp=5, pls.regOn="Y_Foo"))
}
}
\seealso{
\code{\link{gdmm}}
Other Calc. arguments:
\code{\link{calc_NNET_args}},
\code{\link{calc_SVM_args}},
\code{\link{calc_aqg_args}},
\code{\link{calc_discrimAnalysis_args}},
\code{\link{calc_pca_args}},
\code{\link{calc_randomForest_args}},
\code{\link{calc_sim_args}},
\code{\link{split_dataset}}
Other PLSR documentation:
\code{\link{plot_pls,aquap_cube-method}},
\code{\link{plot_pls_args}},
\code{\link{plot_pls_indepPred}()}
}
\concept{Calc. arguments}
\concept{PLSR documentation}
|
2582835555c6a499d649a80ea3778bd316b4e754
|
c2a6015d964e0a004fa4ac9c59df8aed039cc4fc
|
/R/setCaptionNumberingKnitrHook.R
|
0826594aaf2d5d9ee3667012291154679d55360b
|
[] |
no_license
|
cran/ufs
|
27083e54b6e4c89f802c4de9218dbbd7c7d4260d
|
74bcfb60160bced552d79d301b739bb965d1a156
|
refs/heads/master
| 2023-06-23T09:48:11.331297
| 2023-06-09T15:30:03
| 2023-06-09T15:30:03
| 145,907,951
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,544
|
r
|
setCaptionNumberingKnitrHook.R
|
#' Set a knitr hook for caption numbering
#'
#' Set a knitr hook to automatically number captions for, e.g., figures
#' and tables. `setCaptionNumberingKnitrHook()` is the general purpose
#' function; you normally use `setFigCapNumbering()` or `setTabCapNumbering()`.
#'
#' @param captionName The name of the caption; for example, `fig.cap`
#' or `tab.cap`.
#' @param prefix,suffix The prefix and suffix; any occurrences of
#' `\%s` will be replaced by the number.
#' @param optionName THe name to use for the option that keeps track
#' of the numbering.
#' @param resetCounterTo Whether to reset the counter (as stored in the
#' options), and if so, to what value (set to `FALSE` to prevent resetting).
#'
#' @return `NULL`, invisibly.
#' @rdname setCaptionNumberingKnitrHook
#' @export
#'
#' @examples ### To start automatically numbering figure captions
#' setFigCapNumbering();
#'
#' ### To start automatically numbering table captions
#' setTabCapNumbering();
setCaptionNumberingKnitrHook <- function (captionName = "fig.cap",
prefix = "Figure %s: ",
suffix = "",
optionName = paste0("setCaptionNumbering_",
captionName),
resetCounterTo = 1) {
### Store prefix and suffix
do.call(options,
stats::setNames(list(prefix, suffix),
nm = paste0(optionName,
c("_prefix",
"_suffix"))));
### Reset counter here; not in the knitr hook,
### otherwise it's reset every time that's called
if (is.numeric(resetCounterTo)) {
counter <- resetCounterTo;
do.call(options,
stats::setNames(list(counter),
nm = optionName));
}
### Define hook function
hookFunction <-
function(options) {
options[[captionName]] <-
insertNumberedCaption(
captionText = options[[captionName]],
captionName = captionName,
prefix = prefix,
suffix = suffix,
optionName = optionName,
resetCounterTo = NULL
)
return(options);
}
### Set hook function
do.call(knitr::opts_hooks$set,
stats::setNames(list(hookFunction),
captionName));
return(invisible(NULL));
}
|
78a4d53da2d15ec25c1ea9b3a9a5381b141ccc3b
|
512ee36607bbfde0f74429fabee54a004352875c
|
/code/autodetec.R
|
5066b8a999d79b6b765f1b12157bb9efee0e7b66
|
[] |
no_license
|
fsfelix/tcc
|
7d930aef125b7f6ee94f2335be45a9932bbf8c94
|
2394b5de32b36554657ba868be8c29ea0e7e89ee
|
refs/heads/master
| 2021-03-24T12:28:12.855753
| 2017-12-05T14:12:09
| 2017-12-05T14:12:09
| 91,889,044
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,947
|
r
|
autodetec.R
|
#' Automatically detect vocalizations in sound files
#'
#' \code{autodetec} automatically detects the start and end of vocalizations in sound files based
#' on amplitude, duration, and frequency range attributes.
#' @usage autodetec(X = NULL, threshold = 15, envt = "abs", ssmooth = NULL, msmooth = NULL,
#' power = 1, bp = NULL, osci = FALSE, wl = 512, xl = 1, picsize = 1, res = 100,
#' flim = c(0,22), ls = FALSE, sxrow = 10, rows = 10, mindur = NULL, maxdur =
#' NULL, redo = FALSE, img = TRUE, it = "jpeg", set = FALSE, flist = NULL, smadj = NULL,
#' parallel = 1, path = NULL, pb = TRUE, pal = reverse.gray.colors.2,
#' fast.spec = FALSE, ...)
#' @param X Data frame with results from \code{\link{manualoc}} function or any data frame with columns
#' for sound file name (sound.files), selection number (selec), and start and end time of signal
#' (start and end).
#' @param threshold A numeric vector of length 1 specifying the amplitude threshold for detecting
#' signals (in \%).
#' @param envt Character vector of length 1 specifying the type of envelope to
#' be used: "abs" for absolute amplitude envelope or "hil" for Hilbert
#' amplitude envelope. Default is "abs".
#' @param ssmooth A numeric vector of length 1 to smooth the amplitude envelope
#' with a sum smooth function. Default is NULL.
#' @param msmooth A numeric vector of length 2 to smooth the amplitude envelope
#' with a mean sliding window. The first component is the window length and
#' the second is the overlap between successive windows (in \%). Faster than ssmooth but time detection is
#' much less accurate. Will be deprecated in future versions. Default is NULL.
#' @param power A numeric vector of length 1 indicating a power factor applied
#' to the amplitude envelope. Increasing power will reduce low amplitude
#' modulations and increase high amplide modulations, in order to reduce
#' background noise. Default is 1 (no change).
#' @param bp Numeric vector of length 2 giving the lower and upper limits of a
#' frequency bandpass filter (in kHz). Default is c(0, 22).
#' @param osci Logical argument to add an oscillogram underneath spectrogram, as
#' in \code{\link[seewave]{spectro}}. Default is \code{FALSE}. Not applied if ls is
#' \code{TRUE}.
#' @param wl A numeric vector of length 1 specifying the window length of the spectrogram, default
#' is 512.
#' @param xl Numeric vector of length 1, a constant by which to scale
#' spectrogram width. Default is 1.
#' @param picsize Numeric argument of length 1. Controls the relative size of
#' the spectrogram. Default is 1.
#' @param res Numeric argument of length 1 controling resolution of images.
#' Default is 100 (faster) although 300 - 400 is recommended for publication/
#' presentation quality.
#' @param flim A numeric vector of length 2 for the frequency limit in kHz of
#' the spectrogram, as in \code{\link[seewave]{spectro}}. Default is c(0, 22).
#' @param ls Logical argument. If \code{TRUE}, long spectrograms as in \code{\link{lspec}}
#' are produced.
#' @param sxrow A numeric vector of length 1. Specifies seconds of spectrogram
#' per row when creating long spectrograms. Default is 10. Applied when ls =
#' \code{TRUE} and/or when X is not provided.
#' @param rows A numeric vector of length 1. Specifies number of rows per
#' image file when creating long spectrograms. Default is 10. Applied when ls =
#' \code{TRUE} and/or when X is not provided.
#' @param mindur Numeric vector of length 1 giving the shortest duration (in
#' seconds) of the signals to be detected. It removes signals below that
#' threshold.
#' @param maxdur Numeric vector of length 1 giving the longest duration (in
#' seconds) of the signals to be detected. It removes signals above that
#' threshold.
#' @param redo Logical argument. If \code{TRUE} all selections will be analyzed again
#' when code is rerun. If \code{FALSE} only the selections that do not have an 'autodetec' generated image
#' file in the working directory will be analyzed. Default is \code{FALSE}.
#' @param img Logical argument. If \code{FALSE}, image files are not produced. Default is \code{TRUE}.
#' @param it A character vector of length 1 giving the image type to be used. Currently only
#' "tiff" and "jpeg" are admitted. Default is "jpeg".
#' @param set A logical argument indicating wheter the settings of the autodetection
#' process should be included in the image file name. If \code{TRUE}, threshold (th), envelope (envt), bandpass (bp),
#' power (pw), smooth (smo, either mmsooth[1] or ssmooth), maxdur (mxdu), and mindur (midu) are included.
#' @param flist character vector or factor indicating the subset of files that will be analyzed. Ignored
#' if X is provided.
#' @param smadj adjustment for amplitude smoothing. Character vector of length one indicating whether start end
#' values should be adjusted. "start", "end" or "both" are the inputs admitted by this argument. Amplitude
#' smoothing through ssmooth generates a predictable deviation from the actual start and end positions of the signals,
#' determined by the threshold and ssmooth values. This deviation is more obvious (and problematic) when the
#' increase and decrease in amplitude at the start and end of the signal (respectively) is not gradual. Ignored if ssmooth is \code{NULL}.
#' @param parallel Numeric. Controls whether parallel computing is applied.
#' It specifies the number of cores to be used. Default is 1 (i.e. no parallel computing).
#' @param path Character string containing the directory path where the sound files are located.
#' If \code{NULL} (default) then the current working directory is used.
#' @param pb Logical argument to control progress bar. Default is \code{TRUE}. Note that progress bar is only used
#' when parallel = 1.
#' @param pal Color palette function for spectrogram. Default is reverse.gray.colors.2. See
#' \code{\link[seewave]{spectro}} for more palettes. Palettes as \code{\link[monitoR]{gray.2}} may work better when \code{fast.spec = TRUE}.
#' @param fast.spec Logical. If \code{TRUE} then image function is used internally to create spectrograms, which substantially
#' increases performance (much faster), although some options become unavailable, as collevels, and sc (amplitude scale).
#' This option is indicated for signals with high background noise levels. Palette colors \code{\link[monitoR]{gray.1}}, \code{\link[monitoR]{gray.2}},
#' \code{\link[monitoR]{gray.3}}, \code{\link[monitoR]{topo.1}} and \code{\link[monitoR]{rainbow.1}} (which should be imported from the package monitoR) seem
#' to work better with 'fast.spec' spectograms. Palette colors \code{\link[monitoR]{gray.1}}, \code{\link[monitoR]{gray.2}},
#' \code{\link[monitoR]{gray.3}} offer
#' decreasing darkness levels. THIS IS STILL BEING TESTED.
#' @param ... Additional arguments to be passed to a modified version of \code{\link[seewave]{spectro}} for customizing
#' graphical output.
#' @return Image files with spectrograms showing the start and end of the detected signals. It
#' also returns a data frame containing the start and end of each signal by
#' sound file and selection number.
#' @export
#' @name autodetec
#' @details This function determines the start and end of signals in the segments of the sound files listed
#' in the input data frame. Alternatively, if no data frame is provided, the function detects signals across
#' each entire sound file and creates long spectrograms highlighting the start and of the detected
#' signals for all sound files in the working directory. The input data frame should have the following
#' columns: c("sound.files","selec","start","end"). The ouptut of \code{\link{manualoc}} can be used as the
#' input data frame. This function uses a modified version of the \code{\link[seewave]{timer}} function from
#' seewave package to detect signals.
#'
#' @examples
#' \dontrun{
#' # Set temporary working directory
#' setwd(tempdir())
#'
#' data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4"))
#' writeWave(Phae.long1,"Phae.long1.wav")
#' writeWave(Phae.long2,"Phae.long2.wav")
#' writeWave(Phae.long3,"Phae.long3.wav")
#' writeWave(Phae.long4,"Phae.long4.wav")
#'
#' ad <- autodetec(threshold = 5, env = "hil", ssmooth = 300, power=1,
#' bp=c(2,9), xl = 2, picsize = 2, res = 200, flim= c(1,11), osci = TRUE,
#' wl = 300, ls = FALSE, sxrow = 2, rows = 4, mindur = 0.1, maxdur = 1, set = TRUE)
#'
#' #run it with different settings
#' ad <- autodetec(threshold = 90, env = "abs", ssmooth = 300, power = 1, redo = TRUE,
#' bp=c(2,9), xl = 2, picsize = 2, res = 200, flim= c(1,11), osci = TRUE,
#' wl = 300, ls = FALSE, sxrow = 2, rows = 4, mindur=0.1, maxdur=1, set = TRUE)
#'
#' #check this folder!!
#' getwd()
#' }
#'
#' @author Marcelo Araya-Salas (\email{araya-salas@@cornell.edu}). Implements a
#' modified version of the timer function from seewave.
#last modification on jul-5-2016 (MAS)
autodetec<-function(X= NULL, threshold=15, envt="abs", ssmooth = NULL, msmooth = NULL, power = 1,
bp = NULL, osci = FALSE, wl = 512, xl = 1, picsize = 1, res = 100, flim = c(0,22),
ls = FALSE, sxrow = 10, rows = 10, mindur = NULL, maxdur = NULL, redo = FALSE,
img = TRUE, it = "jpeg", set = FALSE, flist = NULL, smadj = NULL, parallel = 1,
path = NULL, pb = TRUE, pal = reverse.gray.colors.2,
fast.spec = FALSE, ...){
# reset working directory
wd <- getwd()
on.exit(setwd(wd))
#check path to working directory
if(is.null(path)) path <- getwd() else {if(!file.exists(path)) stop("'path' provided does not exist") else
setwd(path)
}
#if files not found
if(length(list.files(pattern = "\\.wav$", ignore.case = TRUE)) == 0) if(is.null(path)) stop("No .wav files in working directory") else stop("No .wav files found")
#if bp is not vector or length!=2 stop
if(!is.null(bp))
{if(!is.vector(bp)) stop("'bp' must be a numeric vector of length 2") else{
if(!length(bp) == 2) stop("'bp' must be a numeric vector of length 2")}}
#if flim is not vector or length!=2 stop
if(is.null(flim)) stop("'flim' must be a numeric vector of length 2") else {
if(!is.vector(flim)) stop("'flim' must be a numeric vector of length 2") else{
if(!length(flim) == 2) stop("'flim' must be a numeric vector of length 2")}}
#if msmooth is not vector or length!=2 stop
if(!is.null(msmooth)) {
if(!is.vector(msmooth)) stop("'msmooth' must be a numeric vector of length 2") else {
if(!length(msmooth) == 2) stop("'msmooth' must be a numeric vector of length 2")}}
#if ssmooth is not vector or length!=1 stop
if(!is.null(ssmooth)) {
if(!is.vector(ssmooth)) stop("'ssmooth' must be a numeric vector of length 1") else {
if(!length(ssmooth) == 1) stop("'ssmooth' must be a numeric vector of length 1")}}
#if wl is not vector or length!=1 stop
if(is.null(wl)) stop("'wl' must be a numeric vector of length 1") else {
if(!is.vector(wl)) stop("'wl' must be a numeric vector of length 1") else{
if(!length(wl) == 1) stop("'wl' must be a numeric vector of length 1")}}
#if sxrow is not vector or length!=1 stop
if(is.null(sxrow)) stop("'sxrow' must be a numeric vector of length 1") else {
if(!is.vector(sxrow)) stop("'sxrow' must be a numeric vector of length 1") else{
if(!length(sxrow) == 1) stop("'sxrow' must be a numeric vector of length 1")}}
#if rows is not vector or length!=1 stop
if(is.null(rows)) stop("'rows' must be a numeric vector of length 1") else {
if(!is.vector(rows)) stop("'rows' must be a numeric vector of length 1") else{
if(!length(rows) == 1) stop("'rows' must be a numeric vector of length 1")}}
#if picsize is not vector or length!=1 stop
if(is.null(picsize)) stop("'picsize' must be a numeric vector of length 1") else {
if(!is.vector(picsize)) stop("'picsize' must be a numeric vector of length 1") else{
if(!length(picsize) == 1) stop("'picsize' must be a numeric vector of length 1")}}
#if xl is not vector or length!=1 stop
if(is.null(xl)) stop("'xl' must be a numeric vector of length 1") else {
if(!is.vector(xl)) stop("'xl' must be a numeric vector of length 1") else{
if(!length(xl) == 1) stop("'xl' must be a numeric vector of length 1")}}
#if res is not vector or length!=1 stop
if(is.null(res)) stop("'res' must be a numeric vector of length 1") else {
if(!is.vector(res)) stop("'res' must be a numeric vector of length 1") else{
if(!length(res) == 1) stop("'res' must be a numeric vector of length 1")}}
#if threshold is not vector or length!=1 stop
if(is.null(threshold)) stop("'threshold' must be a numeric vector of length 1") else {
if(!is.vector(threshold)) stop("'threshold' must be a numeric vector of length 1") else{
if(!length(threshold) == 1) stop("'threshold' must be a numeric vector of length 1")}}
#if flist is not character vector
if(!is.null(flist) & is.null(X) & any(!is.character(flist), !is.vector(flist))) stop("'flist' must be a character vector")
#if parallel is not numeric
if(!is.numeric(parallel)) stop("'parallel' must be a numeric vector of length 1")
if(any(!(parallel %% 1 == 0),parallel < 1)) stop("'parallel' should be a positive integer")
#if parallel and pb in windows
if(parallel > 1 & pb & Sys.info()[1] == "Windows") {
message("parallel with progress bar is currently not available for windows OS")
message("running parallel without progress bar")
pb <- FALSE
}
#if it argument is not "jpeg" or "tiff"
if(!any(it == "jpeg", it == "tiff")) stop(paste("Image type", it, "not allowed"))
#wrap img creating function
if(it == "jpeg") imgfun <- jpeg else imgfun <- tiff
#if envt is not vector or length!=1 stop
if(any(envt %in% c("abs", "hil"))){if(!length(envt) == 1) stop("'envt' must be a numeric vector of length 1")
} else stop("'envt' must be either 'abs' or 'hil'" )
if(any(!sapply(list(osci,ls, redo),is.logical)))
stop(paste(paste(c("osci","ls","redo")[!sapply(list(osci,ls, redo),is.logical)],collapse = " "),"not logical"))
#stop if power is 0
if (power == 0)
stop("'power' cannot equal to 0")
if(!is.null(msmooth)) smo <- msmooth[1] else {if(!is.null(ssmooth)) smo <- ssmooth else smo <- 0}
#if smadj argument is not "start" "end" or "both"
if(!is.null(smadj)) if(!any(smadj == "start", smadj == "end", smadj == "both"))
stop(paste("smooth adjustment", smadj, "not allowed"))
if(!is.null(X)){
#check if all columns are found
if(any(!(c("sound.files", "selec", "start", "end") %in% colnames(X))))
stop(paste(paste(c("sound.files", "selec", "start", "end")[!(c("sound.files", "selec",
"start", "end") %in% colnames(X))], collapse=", "), "column(s) not found in data frame"))
if(!class(X) == "data.frame") stop("X is not a data frame")
#if there are NAs in start or end stop
if(any(is.na(c(X$end, X$start)))) stop("NAs found in start and/or end columns")
#if end or start are not numeric stop
if(all(class(X$end) != "numeric" & class(X$start) != "numeric")) stop("'end' and 'selec' must be numeric")
#if any start higher than end stop
if(any(X$end - X$start<0)) stop(paste("The start is higher than the end in", length(which(X$end - X$start<0)), "case(s)"))
#return warning if not all sound files were found
fs <- list.files(pattern = "\\.wav$", ignore.case = TRUE)
if(length(unique(X$sound.files[(X$sound.files %in% fs)])) != length(unique(X$sound.files)))
message(paste(length(unique(X$sound.files))-length(unique(X$sound.files[(X$sound.files %in% fs)])),
".wav file(s) not found"))
#count number of sound files in working directory and if 0 stop
d <- which(X$sound.files %in% fs)
if(length(d) == 0) stop("The .wav files are not in the working directory") else X <- X[d,]
xprov <- T #to replace X if not provided
} else {
if(!is.null(flist)) X <- warbleR::wavdur(files = flist) else
X <- warbleR::wavdur()
X$start <- 0
X$selec <- 1
names(X)[2] <- "end"
xprov <- F #to replace X if not provided
if(nrow(X) == 0) stop("Files in 'flist' not in working directory")
}
#redo the ones that have no images in folder
if(!redo) {
imgfs <- list.files(pattern = "\\.jpeg$|\\.tiff$")
done <- sapply(1:nrow(X), function(x){
any(grep(paste(gsub(".wav","", X$sound.files[x]),X$selec[x], sep = "-"), imgfs, invert = FALSE))
})
X <- X[!done, ]
if(nrow(X) == 0) stop("All selections have been analyzed (redo = FALSE)")
}
# if parallel was not called
if(any(parallel == 1, Sys.info()[1] == "Linux") & pb) {if(!ls & img) message("Detecting signals in sound files and producing spectrogram:") else
message("Detecting signals in sound files:")}
#create function to detec signals
adFUN <- function(i, X, flim, wl, bp, envt, msmooth, ssmooth, mindur, maxdur)
{
song <- tuneR::readWave(as.character(X$sound.files[i]),from=X$start[i],to=X$end[i],units="seconds")
if(length(song@left) > wl + 2)
{
f <- song@samp.rate
fl<- flim #in case flim is higher than can be due to sampling rate
if(fl[2] > ceiling(f/2000) - 1) fl[2] <- ceiling(f/2000) - 1
#filter frequnecies below 1000 Hz
if(!is.null(bp))
f.song<-seewave::ffilter(song, f=f, from = bp[1]*1000, to = bp[2]*1000, bandpass = TRUE, wl = wl, output="Wave") else
f.song<-song
#detect songs based on amplitude (modified from seewave::timer function)
input <- seewave::inputw(wave = f.song, f = f)
wave <- input$w
f <- input$f
rm(input)
n <- length(wave)
thres <- threshold/100
wave1 <- seewave::env(wave = wave, f = f, msmooth = msmooth, ssmooth = ssmooth,
envt = envt, norm = TRUE, plot = FALSE)
n1 <- length(wave1)
f1 <- f * (n1/n)
if (power != 1)
wave1 <- wave1^power
wave2 <- ifelse(wave1 <= thres, yes = 1, no = 2)
n2 <- length(wave2)
wave4 <- apply(as.matrix(1:(n2 - 1)), 1, function(x) wave2[x] +
wave2[x + 1])
n4 <- length(wave4)
wave4[c(1, n4)] <- 3
wave5 <- which(wave4 == 3)
wave5[-1] <- wave5[-1] + 1
f4 <- f * (n4/n)
wave4 <- ts(wave4, start = 0, end = n4/f4, frequency = f4)
positions <- time(wave4)[wave5]
npos <- length(positions)
durations <- apply(as.matrix(1:(npos - 1)), 1, function(x) positions[x +
1] - positions[x])
if (wave2[1] == 1 & npos > 2) {
signal <- durations[seq(2, npos - 1, by = 2)]
start.signal <- positions[seq(2, npos - 1, by = 2)]
} else {
signal <- durations[seq(1, npos - 1, by = 2)]
start.signal <- positions[seq(1, npos - 1, by = 2)]
}
aut.det <- list(s = signal, s.start = start.signal)
#put time of detection in data frame
time.song <- data.frame(sound.files = X$sound.files[i], duration = aut.det$s, selec = NA, start = aut.det$s.start+X$start[i], end = (aut.det$s+aut.det$s.start+X$start[i]))
#remove signals based on duration
if(!is.null(mindur)) time.song <-time.song[time.song$duration > mindur,]
if(!is.null(maxdur)) time.song <-time.song[time.song$duration < maxdur,]
if(nrow(time.song) > 0)
{if(xprov) time.song$selec <- paste(X$selec[i], 1:nrow(time.song), sep = "-") else
time.song$selec <- 1:nrow(time.song)}
#if nothing was detected
if(nrow(time.song)==0)
time.song <- data.frame(sound.files = X$sound.files[i], duration = NA,selec = NA,start = NA, end = NA)
time.song1 <- time.song
time.song$start[is.na(time.song$start)] <- -2
time.song$start[is.na(time.song$start)] <- -1
if(!ls & img & nrow(time.song) > 0) {
if(set)
fna<-paste(substring(X$sound.files[i], first = 1, last = nchar(as.character(X$sound.files[i]))-4),
"-", X$selec[i], "-autodetec","-th" ,threshold , "-env.", envt,"-bp", bp[1],".",bp[2], "-smo", smo, "-midu", mindur,
"-mxdu", maxdur, "-pw", power, sep = "") else
fna<-paste(substring(X$sound.files[i], first = 1, last = nchar(as.character(X$sound.files[i]))-4),
"-", X$selec[i], "-autodetec", sep = "")
imgfun(filename = paste(fna, paste0(".", it), sep = "-"),
width = (10.16) * xl * picsize, height = (10.16) * picsize, units = "cm", res = res)
spectro.INTFUN(song, f = f, wl = wl, collevels=seq(-45,0,1),grid = FALSE, main = as.character(X$sound.files[i]), osc = osci, colwave = "blue4", fast.spec = fast.spec,
scale = FALSE, palette = pal, flim = fl, ...)
rm(song)
if(nrow(time.song)>0)
{sapply(1:nrow(time.song), function(j) abline(v=c(time.song$start[j]-X$start[i], time.song$end[j]-X$start[i]),col="red",lwd=2, lty= "dashed"))
sapply(1:nrow(time.song), function(j) text(time.song$start[j]+time.song$duration[j]/2-X$start[i],
rep(c(((fl[2]-fl[1])*0.85)+fl[1],((fl[2]-fl[1])*0.9)+fl[1],((fl[2]-fl[1])*0.95)+fl[1]),
nrow(time.song))[j],paste(X$selec[i], j, sep = "-"),cex=1))}
dev.off()
}
}
#remove duration column
time.song1 <- time.song1[,grep("duration",colnames(time.song1), invert = TRUE)]
return(time.song1)
on.exit(rm(time.song1))
}
#Apply over each sound file
# Run parallel in windows
if(parallel > 1) {
if(Sys.info()[1] == "Windows") {
i <- NULL #only to avoid non-declared objects
cl <- parallel::makeCluster(parallel)
doParallel::registerDoParallel(cl)
ad <- parallel::parLapply(cl, 1:nrow(X), function(i)
{
adFUN(i, X, flim, wl, bp, envt, msmooth, ssmooth, mindur, maxdur)
})
parallel::stopCluster(cl)
}
if(Sys.info()[1] == "Linux") { # Run parallel in linux
if(pb)
ad <- pbmcapply::pbmclapply(1:nrow(X), mc.cores = parallel, function (i) {
adFUN(i, X, flim, wl, bp, envt, msmooth, ssmooth, mindur, maxdur)
}) else
ad <- parallel::mclapply(1:nrow(X), mc.cores = parallel, function (i) {
adFUN(i, X, flim, wl, bp, envt, msmooth, ssmooth, mindur, maxdur)
})
}
if(!any(Sys.info()[1] == c("Linux", "Windows"))) # parallel in OSX
{
cl <- parallel::makeForkCluster(getOption("cl.cores", parallel))
doParallel::registerDoParallel(cl)
sp <- foreach::foreach(i = 1:nrow(X)) %dopar% {
adFUN(i, X, flim, wl, bp, envt, msmooth, ssmooth, mindur, maxdur)
}
parallel::stopCluster(cl)
}
} else {
if(pb)
ad <- pbapply::pblapply(1:nrow(X), function(i)
{adFUN(i, X, flim, wl, bp, envt, msmooth, ssmooth, mindur, maxdur)
}) else
ad <- lapply(1:nrow(X), function(i)
{adFUN(i, X, flim, wl, bp, envt, msmooth, ssmooth, mindur, maxdur)
})
}
results <- do.call(rbind, ad)
#rename rows
rownames(results) <- 1:nrow(results)
#adjust time coordinates based on known deviance when using ssmooth
if(!is.null(ssmooth) & !is.null(smadj))
{if(smadj == "start" | smadj == "both") results$start <- results$start-((threshold*2.376025e-07)-1.215234e-05)*ssmooth
if(smadj == "end" | smadj == "both") results$end <- results$end-((threshold*-2.369313e-07)+1.215129e-05)*ssmooth }
results1 <- results
#remove NAs so the ones with no detections are printed
results$start[is.na(results$start)] <- -2
results$end[is.na(results$end)] <- -1
# long spectrograms
if(ls & img) {
if(any(parallel == 1, Sys.info()[1] == "Linux") & pb) message("Producing long spectrogram:")
#function for long spectrograms (based on lspec function)
lspeFUN2 <- function(X, z, fl = flim, sl = sxrow, li = rows, fli = fli, pal, fast.spec) {
#subset for a sound file
Y <- X[!is.na(X$start) & X$sound.files == z, ]
#reset graphic parameters
collev = seq(-40, 0, 1)
gr = FALSE
cex = 1
#loop to print spectros (modified from lspec function)
rec <- tuneR::readWave(as.character(z)) #read wave file
f <- rec@samp.rate #set sampling rate
frli<- fl #in case flim is higher than can be due to sampling rate
if(frli[2] > ceiling(f/2000) - 1) frli[2] <- ceiling(f/2000) - 1
dur <- duration(rec)
if(!length(grep("[^[:digit:]]", as.character(dur/sl)))) #if duration is multiple of sl
rec <- seewave::cutw(wave = rec, f = f, from = 0, to = dur-0.001, output = "Wave") #cut a 0.001 segment of rec
dur <- seewave::duration(rec) #set duration
#loop over pages
for (j in 1:ceiling(dur/(li*sl))){
if(set) fna<-paste(substring(z, first = 1, last = nchar(as.character(z))-4),
"-autodetec.ls","-th" ,threshold , "-env.", envt, "-bp", bp[1],".",bp[2], "-smo", smo, "-midu", mindur,
"-mxdu", maxdur, "-pw", power, sep = "") else
fna<-paste(substring(z, first = 1, last = nchar(as.character(z))-4), "-autodetec.ls", sep = "")
if(it == "tiff") tiff(filename = paste(fna, "-p", j, ".tiff", sep = ""),
res = 160, units = "in", width = 8.5, height = 11) else
jpeg(filename = paste(fna, "-p", j, ".jpeg", sep = ""),
res = 160, units = "in", width = 8.5, height = 11)
par(mfrow = c(li, 1), cex = 0.6, mar = c(0, 0, 0, 0), oma = c(2, 2, 0.5, 0.5), tcl = -0.25)
#creates spectrogram rows
x <- 0
while(x <= li-1){
x <- x + 1
if(all(((x)*sl+li*(sl)*(j-1))-sl < dur & (x)*sl+li*(sl)*(j-1) < dur)){ #for rows with complete spectro
spectro.INTFUN(rec, f = f, wl = 512, flim = frli, tlim = c(((x)*sl+li*(sl)*(j-1))-sl, (x)*sl+li*(sl)*(j-1)), collevels = collev, grid = gr, scale = FALSE, palette = pal, axisX = TRUE,
fast.spec = fast.spec, ...)
if(x == 1) text((sl-0.01*sl) + (li*sl)*(j - 1), frli[2] - (frli[2]-frli[1])/10, paste(substring(z, first = 1,
last = nchar(as.character(z))-4), "-p", j, sep = ""), pos = 2, font = 2, cex = cex)
if(nrow(Y) > 0)
{
abline(v = c(Y$start, Y$end), col = "red", lty = 2)
text(x = ((Y$start + Y$end)/2), y = frli[2] - 2*((frli[2] - frli[1])/12), labels = Y$selec, font = 4)
}
} else
{ #for rows with incomplete spectro (final row)
if(all(((x)*sl+li*(sl)*(j-1))-sl < dur & (x)*sl+li*(sl)*(j-1) > dur)){
spectro.INTFUN(seewave::pastew(seewave::noisew(f = f, d = (x)*sl+li*(sl)*(j-1)-dur+1, type = "unif",
listen = FALSE, output = "Wave"), seewave::cutw(wave = rec, f = f, from = ((x)*sl+li*(sl)*(j-1))-sl,
to = dur, output = "Wave"), f =f, output = "Wave"), f = f, wl = 512, flim = frli,
tlim = c(0, sl), collevels = collev, grid = gr, scale = FALSE, palette = pal, axisX = FALSE, fast.spec = fast.spec, ...)
if(x == 1) text((sl-0.01*sl) + (li*sl)*(j - 1), frli[2] - (frli[2]-frli[1])/10, paste(substring(z, first = 1, last = nchar(as.character(z))-4), "-p", j, sep = ""), pos = 2, font = 2, cex = cex)
#add axis to last spectro row
axis(1, at = c(0:sl), labels = c((((x)*sl+li*(sl)*(j-1))-sl):((x)*sl+li*(sl)*(j-1))) , tick = TRUE)
if(nrow(Y) > 0)
{
abline(v = c(Y$start, Y$end) - (((x)*sl+li*(sl)*(j-1))-sl), col = "red", lty = 2)
text(x = ((Y$start + Y$end)/2) - (((x)*sl+li*(sl)*(j-1))-sl), frli[2] - 2*((frli[2] - frli[1])/12), labels = Y$selec, font = 4)
}
#add line indicating end of sound file
abline(v = dur-(((x)*sl+li*(sl)*(j-1))-sl), lwd = 2.5)
usr<-par("usr")
polygon(x = rep(c(sl - ((x)*sl+li*(sl)*(j-1)-dur), usr[2]), each = 2), y = c(usr[3], usr[4], usr[4], usr[3]), col = "white")
#add text indicating end of sound files
text(dur-(((x)*sl+li*(sl)*(j-1))-sl), frli[2]-(frli[2]-frli[1])/2, "END OF SOUND FILE", pos = 4, font = 2, cex = 1.1)
} else
{
plot(1, 1, col = "white", col.axis = "white", col.lab = "white",
xaxt = "n", yaxt = "n")
usr<-par("usr")
polygon(x = rep(c(sl - ((x)*sl+li*(sl)*(j-1)-dur), usr[2]), each = 2), y = c(usr[3], usr[4], usr[4], usr[3]), col = "white")
#add text indicating end of sound files
text(dur-(((x)*sl+li*(sl)*(j-1))-sl), frli[2]-(frli[2]-frli[1])/2, "END OF SOUND FILE", pos = 4, font = 2, cex = 1.1)
}
}
}
dev.off() #reset graphic device
}
}
if(parallel > 1) {if(Sys.info()[1] == "Windows")
{
z <- NULL #only to avoid non-declared objects
cl <- parallel::makeCluster(parallel)
doParallel::registerDoParallel(cl)
a1 <- parallel::parLapply(cl, unique(results$sound.files), function(z)
{
lspeFUN2(X = results, z = z, fl = flim, sl = sxrow, li = rows, pal = pal, fast.spec = fast.spec)
})
parallel::stopCluster(cl)
}
if(Sys.info()[1] == "Linux") { # Run parallel in Linux
if(pb)
a1 <- pbmcapply::pbmclapply(unique(results$sound.files), mc.cores = parallel, function(z) {
lspeFUN2(X = results, z = z, fl = flim, sl = sxrow, li = rows, pal = pal, fast.spec = fast.spec)
}) else
a1 <- parallel::mclapply(unique(results$sound.files), mc.cores = parallel, function(z) {
lspeFUN2(X = results, z = z, fl = flim, sl = sxrow, li = rows, pal = pal, fast.spec = fast.spec)
})
}
if(!any(Sys.info()[1] == c("Linux", "Windows"))) # parallel in OSX
{
cl <- parallel::makeForkCluster(getOption("cl.cores", parallel))
sp <- foreach::foreach(i = unique(results$sound.files)) %dopar% {
lspeFUN2(X = results, z = z, fl = flim, sl = sxrow, li = rows, pal = pal, fast.spec = fast.spec)
}
parallel::stopCluster(cl)
}
} else {
if(pb)
a1 <- pbapply::pblapply(unique(results$sound.files), function(z)
{
lspeFUN2(X = results, z = z, fl = flim, sl = sxrow, li = rows, pal = pal, fast.spec = fast.spec)
}) else a1 <- lapply(unique(results$sound.files), function(z)
{
lspeFUN2(X = results, z = z, fl = flim, sl = sxrow, li = rows, pal = pal, fast.spec = fast.spec)
})
}
}
return(results1)
}
|
fd7c258c7eaa2be3e26377220d79a8facf511b01
|
644b8fb5d5dc672e3f77e0f10793cfd01915f3b1
|
/BMLGrid.Rcheck/tests/testthat/testRunBMLGrid_cpp1.R
|
ca7ac3824caa932cfde219c9f0d4a4d3e303753b
|
[] |
no_license
|
huragok/STA242HW4
|
ff74c279fdd2ed6d2ea92c3faa3904c97fd3601e
|
c82ac2a36fffdbaf42d09b0a2cfd7e9ccbfeb85f
|
refs/heads/master
| 2020-03-29T21:50:23.238586
| 2015-05-12T03:37:30
| 2015-05-12T03:37:30
| 35,847,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,200
|
r
|
testRunBMLGrid_cpp1.R
|
library(BMLGrid)
context("Run BMLGrid simulation in c++")
# Illegal inputs will cause an error directly due to c++ strict typing
g <- createBMLGrid(0, 0, c(red = 0, blue = 0))
g.out <- crunBMLGrid1(g, 0)
test_that("Degenerate cases", {
expect_equal(nrow(g.out), 0)
expect_equal(ncol(g.out), 0)
expect_equal(sum(1 == g.out), 0)
expect_equal(sum(2 == g.out), 0)
})
nrep <- 5
numSteps <- 10000
g1.list <- replicate(nrep, createBMLGrid(100, 99, c(red = 1980, blue = 1980)), simplify = FALSE) # The critical case
g1.list.out.c <- sapply(g1.list, crunBMLGrid1, numSteps)
g1.list.out <- sapply(g1.list, runBMLGrid, numSteps)
g2.list <- replicate(nrep, createBMLGrid(100, 99, c(red = 1000, blue = 2000)), simplify = FALSE) # The ordered case
g2.list.out.c <- sapply(g2.list, crunBMLGrid1, numSteps)
g2.list.out <- sapply(g2.list, runBMLGrid, numSteps)
g3.list <- replicate(nrep, createBMLGrid(100, 99, c(red = 3000, blue = 2500)), simplify = FALSE) # The grid lock case
g3.list.out.c <- sapply(g3.list, crunBMLGrid1, numSteps)
g3.list.out <- sapply(g3.list, runBMLGrid, numSteps)
g4.list <- replicate(nrep, createBMLGrid(100, 99, c(red = 0, blue = 2500)), simplify = FALSE) # No red cars and blue cars move smoothly
g4.list.out.c <- sapply(g4.list, crunBMLGrid1, numSteps)
g4.list.out <- sapply(g4.list, runBMLGrid, numSteps)
g5.list <- replicate(nrep, createBMLGrid(100, 99, c(red = 8000, blue = 0)), simplify = FALSE) # No blue cars and red cars move smoothly
g5.list.out.c <- sapply(g5.list, crunBMLGrid1, numSteps)
g5.list.out <- sapply(g5.list, runBMLGrid, numSteps)
g6.list <- replicate(nrep, createBMLGrid(100, 99, c(red = 4950, blue = 4950)), simplify = FALSE) # Total grid lock
g6.list.out.c <- sapply(g6.list, crunBMLGrid1, numSteps)
g6.list.out <- sapply(g6.list, runBMLGrid, numSteps)
test_that("Same result as the original runBMLGrid()", {
expect_equal(all(g1.list.out == g1.list.out.c), TRUE)
expect_equal(all(g2.list.out == g2.list.out.c), TRUE)
expect_equal(all(g3.list.out == g3.list.out.c), TRUE)
expect_equal(all(g4.list.out == g4.list.out.c), TRUE)
expect_equal(all(g5.list.out == g5.list.out.c), TRUE)
expect_equal(all(g6.list.out == g6.list.out.c), TRUE)
})
|
ef1d3a0d896e754a7d21866eef811f925c14b9bf
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleadexchangebuyerv14.auto/man/Buyer.Rd
|
6d00876aee57156675c1a58cf0e53866f63d32b8
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 393
|
rd
|
Buyer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{Buyer}
\alias{Buyer}
\title{Buyer Object}
\usage{
Buyer(accountId = NULL)
}
\arguments{
\item{accountId}{Adx account id of the buyer}
}
\value{
Buyer object
}
\description{
Buyer Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
|
15721af3614a89cae071255f79730ea190f40d90
|
5ff2a3097691efac719ac50e3be4eca3627e860e
|
/man/action.Rd
|
b2e86ca4fcac7599f383fc7654299eeebadf0ebf
|
[] |
no_license
|
AlgoSkyNet/rj
|
c0f991041446aee9a00a97a451ab1b09091b7c0e
|
6e000df51ea1ceab28c4e6d78b8ad40d9acb5e7e
|
refs/heads/master
| 2021-04-24T04:23:29.281090
| 2020-03-09T03:05:13
| 2020-03-09T03:05:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 856
|
rd
|
action.Rd
|
\name{update_status}
\alias{accept}
\alias{reject}
\alias{update_status}
\alias{withdraw}
\title{Update the status of an article.}
\usage{
update_status(article, status, comments = "", date = Sys.Date())
reject(article, comments = "", date = Sys.Date())
accept(article, comments = "", date = Sys.Date())
withdraw(article, comments = "", date = Sys.Date())
}
\arguments{
\item{article}{as \code{\link{article}} object, path, or
ID. See \code{\link{as.article}} for more details about
how article is located.}
\item{status}{new status to add}
\item{comments}{any additional comments}
\item{date}{date of status update. If omitted defaults to
today.}
}
\description{
\code{reject}, \code{accept} and \code{withdraw} update the
status, move the file to the correct directory and draft an
email from a template of the corresponding name.
}
|
dab93945cd543b8931b2095430fb6950be16adaa
|
e59a11834b12ffc260d068b8478416beac8adb5d
|
/man/find_db.Rd
|
84141e7f38754d9f7ff54f8b5d38f9bec565f84e
|
[] |
no_license
|
slevu/garel
|
c9f3020a35f4653695cfd9af1b421aca9a21f758
|
50ef9d6234cc1d0627c2ff1cea55d40af0270094
|
refs/heads/master
| 2020-03-20T02:02:40.739987
| 2019-02-28T11:58:11
| 2019-02-28T11:58:11
| 137,097,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 432
|
rd
|
find_db.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dnabin_functions.R
\name{find_db}
\alias{find_db}
\title{Find BLAST database}
\usage{
find_db(infile = "path.msa", dbdir = "blastDB", verbose = TRUE)
}
\arguments{
\item{infile}{Path to MSA}
\item{dbdir}{Folder of BLAST databases}
\item{verbose}{explain stuff}
}
\value{
Path to database
}
\description{
Find subtype on infile name and then database
}
|
3b0074c91083db64f252b8b289a7c753f254804c
|
f687a4ebaa56b56d1b2930a5c3e37c5ca2ad1fd9
|
/man/myplot_combine.Rd
|
a2d834d97fb6bca058b5d911c9c73b76b02e6f8e
|
[
"MIT"
] |
permissive
|
Telogen/txm
|
48e1b5588d748c48da744d30cc323b03d5984927
|
0b0fece9a16f3f879fa3874a29bf3736a9e3ef39
|
refs/heads/main
| 2023-07-07T06:45:02.752496
| 2023-06-28T08:57:33
| 2023-06-28T08:57:33
| 202,585,480
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 238
|
rd
|
myplot_combine.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myplot_ggplot.R
\name{myplot_combine}
\alias{myplot_combine}
\title{Combine several ggplots}
\usage{
myplot_combine()
}
\description{
Combine several ggplots
}
|
9d4cc3ca5e425aba4b17ebd29ddbc1c2d6d1a63e
|
9bbc486e62be782fa7c46b4339a23763b08a98e2
|
/man/pre_factor_to_logical.Rd
|
e9f3018d42f1815cbfbcf6b97b04ed48ee3169da
|
[] |
no_license
|
cran/emil
|
fd72078935ff24d7de02d0b96a98d196be748002
|
79b73221a96f8cf7930a137939d370c730192111
|
refs/heads/master
| 2021-01-18T22:29:42.902522
| 2018-07-30T11:00:06
| 2018-07-30T11:00:06
| 22,472,758
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,276
|
rd
|
pre_factor_to_logical.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.r
\name{pre_factor_to_logical}
\alias{pre_factor_to_logical}
\title{Convert factors to logical columns}
\usage{
pre_factor_to_logical(data, feature, base = 1L, drop = TRUE)
}
\arguments{
\item{data}{Pre-processed data set, as produced by \code{\link{pre_split}}.}
\item{feature}{Character vector with names of features to convert.
Defaults to all factors in the data set.}
\item{base}{Sent to \code{\link{factor_to_logical}}. To specify different bases for
different columns supply a vector or list with named elements.}
\item{drop}{Sent to \code{\link{factor_to_logical}}. To specify different bases for
different columns supply a vector or list with named elements.}
}
\description{
Factors will be converted to one logical column per level (or one fewer if a
base level is specified).
}
\examples{
x <- mtcars[-1]
x <- transform(x,
cyl = factor(cyl, ordered=TRUE),
vs = factor(vs),
gear = factor(gear)
)
y <- mtcars$mpg
cv <- resample("crossvalidation", y)
data <- pre_split(x, y, cv[[1]]) \%>\%
pre_factor_to_logical(base = c(cyl="4", vs="0"),
drop=c(cyl=FALSE, gear=FALSE))
data$fit$x
}
\author{
Christofer \enc{Bäcklin}{Backlin}
}
|
67dc961f456eb2f79de34d72cd62d4578a55ca49
|
26c9badc0e8b56e041bd77259d3772cea3d4747a
|
/tests/testthat.R
|
9db8afdfa3472338347f4226de32d15c4e395669
|
[
"MIT"
] |
permissive
|
five-dots/rutils
|
067756e4060e39b3795f66d2d4c67eae0859e702
|
03d304b84e8db7c233f0b3715e05d8d0b851e39c
|
refs/heads/master
| 2020-06-09T12:53:17.374791
| 2020-04-08T05:48:54
| 2020-04-08T05:48:54
| 193,440,847
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(rutils)
test_check("rutils")
|
3571ca757b92ba952b8adfc1af49e79b88a0c23e
|
92064c36d8f28bdcd0ef8fb5e4e1ba0f3d028c48
|
/R/factorize.R
|
0c77ac531f90b7c6b55b30f34840e63b31fb0b92
|
[] |
no_license
|
philxiang/arulesCBA
|
02d7413b497b7d4698c245458fd7911da54c2132
|
7751748ee413ff6b769bc81b9dbebb11ce549e30
|
refs/heads/master
| 2021-01-17T13:08:30.481437
| 2017-04-03T19:19:05
| 2017-04-03T19:19:05
| 95,408,274
| 0
| 0
| null | 2017-06-26T04:23:21
| 2017-06-26T04:23:21
| null |
UTF-8
|
R
| false
| false
| 585
|
r
|
factorize.R
|
factorize <- function(formula, data, method="cluster", categories=10){
formula <- as.formula(formula)
class <- as.character(formula[[2]])
if(as.character(formula[[3]]) != ".")
stop("Formula needs to be of the form class ~ .")
cols.to.discretize <- (colnames(data) != class & unlist(lapply(data, is.numeric)))
data[cols.to.discretize] <- lapply(data[cols.to.discretize], function(x) discretize(x, method=method, categories=categories))
cls <- data[[class]]
data[[class]] <- NULL
data <- cbind(data, cls)
colnames(data)[length(data)] <- class
return(data)
}
|
253e8eb2f2b28d42ebd647bb9230b3af8f3836af
|
a087ca2adce2f03f5706f25c9849737a811dc73e
|
/corsicacombo/ui.R
|
e07f4c9c966ae7e6b03b74e11b1b2df3ad6be654
|
[] |
no_license
|
pbulsink/Apps
|
ecc37ecd8a8740ded552747302020bb6b4a394bf
|
40a13e2fc75ec4d88e18dfd7988ee27922c1dfd1
|
refs/heads/master
| 2021-01-12T05:27:56.883481
| 2016-03-18T22:56:53
| 2016-03-18T22:56:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,935
|
r
|
ui.R
|
########################################################################################################################################################################################################
######################## TESTING #######################################################################################################################################################################
########################################################################################################################################################################################################
# User Interface
# Corsica Combo App
# Last edited 3-16-2016
# Manny
require(shinydashboard)
shinyUI(navbarPage("Combos", id = "tab", inverse = F, windowTitle = "Corsica | Combos",
tabPanel("Lines", value = "line",
dashboardPage(
dashboardHeader(disable = TRUE),
dashboardSidebar(disable = TRUE),
dashboardBody(
# Formatting
tags$head(tags$style(".container-fluid {font-size: 13px; color: #2B547E; background-color: #ECF0F5;}")),
tags$head(tags$style(".box {background-color: #F5F5F5; border-color: #4863A0; border: solid; border-width: 1px;}")),
tags$head(tags$style(".box-header {background-color: #4863A0; color: #FFFFFF;}")),
tags$head(tags$style(".rightAlign{float:right;} .bottom{height:50px}")),
tags$head(tags$style(".center {text-align: center; margin: 23px 0 0 0;}")),
tags$head(tags$style(".center-low {text-align: center; margin: 29px 0 0 0;}")),
tags$style(type = "text/css", ".shiny-output-error {visibility: hidden;}", ".shiny-output-error:before { visibility: hidden;}"),
tags$style(".navbar-default {background-color: #4863A0; border-color: #ffffff;}"),
tags$style(".navbar-default .navbar-nav li a {background-color: #4863A0; color: #ffffff;}"),
tags$style(".navbar-default .navbar-nav .active a {background-color: #ffffff; color: #4863A0;}"),
tags$style(".navbar-default .navbar-brand {background-color: #4863A0; color: #ffffff;}"),
tags$style(".dataTable thead tr {background-color: #4863A0; color: #ffffff;}"),
# Header text
fluidRow(
column(6, h2("Line Stats")),
column(6, tags$div(class = "rightAlign", checked = NA, tags$a(href = "http://www.corsica.hockey/", target = "_parent", tags$h2("Corsica ↩", style = "color: #2B547E;"))))
),
# Help text
fluidRow(
column(6, helpText("Loading the data may take a few seconds. Thanks for your patience.")),
column(6, tags$div(class = "rightAlign", helpText("Confused? Consult the", tags$a(href = "http://www.corsica.hockey/blog/2016/02/03/glossary/", "Glossary"))))
),
# Input row 1
fluidRow(
column(2, uiOutput("l1")),
column(2, selectInput("lstrength", "Strength State", choices = c("All", "5v5", "5v4", "4v5", "4v4", "3v3"), selected = "5v5")),
column(2, selectInput("ladjust", "Adjustment", choices = c("None", "Score and Venue", "Score, Zone and Venue"), selected = "None")),
column(2, selectInput("ltype", "Season Type", choices = c("Regular", "Playoffs", "Both"), selected = "Regular")),
column(2, uiOutput("l3")),
column(2, tags$div(class = "center-low", checkboxInput("laggregate", "Aggregate Seasons", value = TRUE)))
),
# Input row 2
fluidRow(
column(2, uiOutput("l2")),
column(2, selectInput("lreport", "Report", choices = c("On-Ice", "Off-Ice", "Relative", "Individual", "Context", "Counts"), selected = "On-Ice")),
column(3, sliderInput("ltoi", "TOI Minimum", min = 0, max = 5000, value = 50, step = 10)),
column(3, uiOutput("lname")),
column(2, tags$div(class = "center", downloadButton("ldl", "Download File")))
),
# Output
DT::dataTableOutput("t1")
)
)),
tabPanel("Pairings", value = "pair",
dashboardPage(
dashboardHeader(disable = TRUE),
dashboardSidebar(disable = TRUE),
dashboardBody(
# Header text
fluidRow(
column(6, h2("Pairing Stats")),
column(6, tags$div(class = "rightAlign", checked = NA, tags$a(href = "http://www.corsica.hockey/", target = "_parent", tags$h2("Corsica ↩", style = "color: #2B547E;"))))
),
# Help text
fluidRow(
column(6, helpText("Loading the data may take a few seconds. Thanks for your patience.")),
column(6, tags$div(class = "rightAlign", helpText("Confused? Consult the", tags$a(href = "http://www.corsica.hockey/blog/2016/02/03/glossary/", "Glossary"))))
),
# Input row 1
fluidRow(
column(2, uiOutput("p1")),
column(2, selectInput("pstrength", "Strength State", choices = c("All", "5v5", "5v4", "4v5", "4v4", "3v3"), selected = "5v5")),
column(2, selectInput("padjust", "Adjustment", choices = c("None", "Score and Venue", "Score, Zone and Venue"), selected = "None")),
column(2, selectInput("ptype", "Season Type", choices = c("Regular", "Playoffs", "Both"), selected = "Regular")),
column(2, uiOutput("p3")),
column(2, tags$div(class = "center-low", checkboxInput("paggregate", "Aggregate Seasons", value = TRUE)))
),
# Input row 2
fluidRow(
column(2, uiOutput("p2")),
column(2, selectInput("preport", "Report", choices = c("On-Ice", "Off-Ice", "Relative", "Individual", "Context", "Counts"), selected = "On-Ice")),
column(3, sliderInput("ptoi", "TOI Minimum", min = 0, max = 5000, value = 50, step = 10)),
column(3, uiOutput("pname")),
column(2, tags$div(class = "center", downloadButton("pdl", "Download File")))
),
# Output
DT::dataTableOutput("t2")
)
)),
tabPanel("WOWY", value = "wowy",
dashboardPage(
dashboardHeader(disable = TRUE),
dashboardSidebar(disable = TRUE),
dashboardBody(
# Header text
fluidRow(
column(6, h2("With Or Without You")),
column(6, tags$div(class = "rightAlign", checked = NA, tags$a(href = "http://www.corsica.hockey/", target = "_parent", tags$h2("Corsica ↩", style = "color: #2B547E;"))))
),
# Help text
fluidRow(
column(6, helpText("Set your desired parameters, then press the Load button. Try to limit your query to only the desired information in order to hasten the load time.")),
column(6, tags$div(class = "rightAlign", helpText("Confused? Consult the", tags$a(href = "http://www.corsica.hockey/blog/2016/02/03/glossary/", "Glossary"))))
),
# Input row 1
fluidRow(
column(2),
column(8,
column(7, dateRangeInput("wdate", "Date Range", min = "2007-10-01", max = Sys.Date(), start = "2015-10-01", end = Sys.Date(), format = "yyyy-mm-dd")),
column(5, uiOutput("wname"))
),
column(2)
),
# Input row 2
fluidRow(
column(1),
column(10,
column(5, selectInput("wadjust", "Adjustment", choices = c("None", "Score and Venue", "Score, Zone and Venue"), selected = "None")),
column(2, selectInput("wvenue", "Venue", choices = c("Any", "Home", "Away"), selected = "Any")),
column(3, selectInput("wstrength", "Strength State", choices = c("All", "5v5", "5v4", "4v5", "4v4", "5v3", "3v5", "3v3"), selected = "5v5")),
column(2, tags$div(class = "center", actionButton("go", "Load", width = "100%"), style = "border-radius: 4px; box-shadow: 3px 3px 3px;"))
),
column(1)
),
# Dashboard output
uiOutput("dash")
)
)),
tabPanel("Assists", value = "assists",
dashboardPage(
dashboardHeader(disable = TRUE),
dashboardSidebar(disable = TRUE),
dashboardBody(
# Header text
fluidRow(
column(6, h2("Assist Networks")),
column(6, tags$div(class = "rightAlign", checked = NA, tags$a(href = "http://www.corsica.hockey/", target = "_parent", tags$h2("Corsica ↩", style = "color: #2B547E;"))))
),
# Help text
fluidRow(
column(6, helpText("")),
column(6, tags$div(class = "rightAlign", helpText("Confused? Consult the", tags$a(href = "http://www.corsica.hockey/blog/2016/02/03/glossary/", "Glossary"))))
),
# Input row 1
fluidRow(
column(3, uiOutput("ps1")),
column(3, uiOutput("ps2")),
column(3, uiOutput("pteam")),
column(3, selectInput("atype", "Assist Type", choices = c("Any", "Primary", "Secondary"), selected = "Any"))
),
fluidRow(
box(
plotOutput("passplot"),
width = 12,
title = "Assist Network",
solidHeader = TRUE,
collapsible = TRUE
)
)
)
))
))
|
2e3af8139f47df164094cfe7c83ebe35863bb5d1
|
aade6e3b9887f3b74e4ac75584c007be82f9fb23
|
/man/get_cumulative_sum.Rd
|
1bc9f683181a332ea5505bb1da1c18690d90ee00
|
[
"Apache-2.0"
] |
permissive
|
Dxiaomai/conta
|
da0b02e62a6aaa9167d14e7374670cf847605dce
|
cf17f9010068266ead3176098b714ec84b8f159e
|
refs/heads/master
| 2020-09-08T14:38:58.695204
| 2019-11-07T18:57:20
| 2019-11-08T18:29:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 324
|
rd
|
get_cumulative_sum.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conta_graphics.R
\name{get_cumulative_sum}
\alias{get_cumulative_sum}
\title{Get cumulative sum of a set of numbers}
\usage{
get_cumulative_sum(v)
}
\arguments{
\item{v}{vector of numbers}
}
\description{
Get cumulative sum of a set of numbers
}
|
4d766a545266039a46c5c6f027bd34e719fd30c1
|
17fdd34b68df267b8262d532adddba733879b0b8
|
/man/predict.pp.Rd
|
dc44388cb23b9916705dc61d2a9fae7ae5c9503a
|
[] |
no_license
|
kevinmhadi/khtools
|
f0b57e0be0014084f2f194465ab4a924fe502268
|
85d64808f8decd71f30510ccd18f38986031be74
|
refs/heads/master
| 2023-07-19T21:50:22.341824
| 2023-07-19T01:46:03
| 2023-07-19T01:46:03
| 235,495,453
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 260
|
rd
|
predict.pp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glmnet_utils.R
\name{predict.pp}
\alias{predict.pp}
\title{predict preProcess}
\usage{
\method{predict}{pp}(pp.res, newdat, apply.prefun = TRUE)
}
\description{
predict preProcess
}
|
2a17398dddfbc33974003f1ef68c38ce73abc4cf
|
afc1a7a38eb2ba8d2cd6e91032102d0acb2911d1
|
/R/moviefinder.R
|
095c19d77ada7d797285011c27243a7ddae66fa2
|
[] |
no_license
|
Ollie-gnodde/moviefinder
|
b26901abc9105cdbe583a2e88b570655320ee0b3
|
3014b23a2654dd86d4de5c0fb376a7b36e86fe15
|
refs/heads/master
| 2020-12-02T22:14:38.752253
| 2017-07-07T13:26:58
| 2017-07-07T13:26:58
| 96,100,329
| 0
| 2
| null | 2017-07-03T11:22:17
| 2017-07-03T10:40:54
|
R
|
UTF-8
|
R
| false
| false
| 485
|
r
|
moviefinder.R
|
#' The Movie Finder function
#'
#' This package has 2 functions, Discovr and search.
#'
#' The Discovr package helps you to find movies that you wish to watch based on 4 inputs.
#' These 4 inputs are genre, minimum imdb rating, main actor and director.
#' The function is called by movie_disc()
#'
#' The second function is search.
#' This function will allow you to see all the important information of a movie
#' The function is called using MovieFinder
"_PACKAGE"
#> [1] "_PACKAGE"
|
c857163642ef3c16fb944313438c55d3ae4cd78f
|
49a842bbdcc72a4c7a022ecb526e84d926f7dda9
|
/class_3/quiz_2/main_quiz_2.R
|
ae94cc567295a9137a1640f29c741bdf807022df
|
[] |
no_license
|
JAVIS25/JHU-Coursera-Data-Science
|
b3c4f6af82b11bd977daa02e3e18a69d2b91405a
|
cbe19a38548eb33b43be428922bc60eacd169221
|
refs/heads/master
| 2022-02-13T19:41:04.399779
| 2018-01-25T06:28:42
| 2018-01-25T06:28:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 736
|
r
|
main_quiz_2.R
|
library(jsonlite)
library(httpuv)
library(httr)
library(XML)
key = "bd807dc7a8261ec392fa"
secret = "f9d6b9cd11555ae9f3e051b96035c4b2488e58c2"
myapi <- oauth_app("github",
key = key,
secret = secret)
github_token <- oauth2.0_token(
oauth_endpoints('github'),myapi)
req <-
GET('https://api.github.com/users/jtleek/repos',
config(token = github_token))
temp <- content(req)
temp1 <- jsonlite::fromJSON(toJSON(temp))
x <- htmlTreeParse('http://biostat.jhsph.edu/~jleek/contact.html',
useInternal = TRUE)
x <- read.fwf('https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for',
widths=c(12, 7, 4, 9, 4, 9, 4, 9, 4), skip = 4)
|
5a08d3bcc3f894dcd1d61f2c2c384702750932f8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rstanarm/examples/posterior_vs_prior.Rd.R
|
1e5594145b71b25799c2ad1682fc00a778b31649
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,596
|
r
|
posterior_vs_prior.Rd.R
|
library(rstanarm)
### Name: posterior_vs_prior
### Title: Juxtapose prior and posterior
### Aliases: posterior_vs_prior posterior_vs_prior.stanreg
### ** Examples
## Not run:
##D if (!exists("example_model")) example(example_model)
##D # display non-varying (i.e. not group-level) coefficients
##D posterior_vs_prior(example_model, pars = "beta")
##D
##D # show group-level (varying) parameters and group by parameter
##D posterior_vs_prior(example_model, pars = "varying",
##D group_by_parameter = TRUE, color_by = "vs")
##D
##D # group by parameter and allow axis scales to vary across facets
##D posterior_vs_prior(example_model, regex_pars = "period",
##D group_by_parameter = TRUE, color_by = "none",
##D facet_args = list(scales = "free"))
##D
##D # assign to object and customize with functions from ggplot2
##D (gg <- posterior_vs_prior(example_model, pars = c("beta", "varying"), prob = 0.8))
##D
##D gg +
##D ggplot2::geom_hline(yintercept = 0, size = 0.3, linetype = 3) +
##D ggplot2::coord_flip() +
##D ggplot2::ggtitle("Comparing the prior and posterior")
##D
##D # compare very wide and very narrow priors using roaches example
##D # (see help(roaches, "rstanarm") for info on the dataset)
##D roaches$roach100 <- roaches$roach1 / 100
##D wide_prior <- normal(0, 10)
##D narrow_prior <- normal(0, 0.1)
##D fit_pois_wide_prior <- stan_glm(y ~ treatment + roach100 + senior,
##D offset = log(exposure2),
##D family = "poisson", data = roaches,
##D prior = wide_prior)
##D posterior_vs_prior(fit_pois_wide_prior, pars = "beta", prob = 0.5,
##D group_by_parameter = TRUE, color_by = "vs",
##D facet_args = list(scales = "free"))
##D
##D fit_pois_narrow_prior <- update(fit_pois_wide_prior, prior = narrow_prior)
##D posterior_vs_prior(fit_pois_narrow_prior, pars = "beta", prob = 0.5,
##D group_by_parameter = TRUE, color_by = "vs",
##D facet_args = list(scales = "free"))
##D
##D
##D # look at cutpoints for ordinal model
##D fit_polr <- stan_polr(tobgp ~ agegp, data = esoph, method = "probit",
##D prior = R2(0.2, "mean"), init_r = 0.1)
##D (gg_polr <- posterior_vs_prior(fit_polr, regex_pars = "\\|", color_by = "vs",
##D group_by_parameter = TRUE))
##D # flip the x and y axes
##D gg_polr + ggplot2::coord_flip()
## End(Not run)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.