blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b424aa98f6a66a956d31c3cf5b1451b9b0474b5f | 4caefeae8af5018d9eb122b51239ab98deabc443 | /man/GetNums.Rd | f7b626ebf8fa9ed7a20de5b50e013f9a03b10490 | [] | no_license | cran/klassR | d9c20f1ce6bc40a115520bb7bbca5d8e7ef1f458 | 7538934ac4ea9e8e6b9bfcef866652cbd56c7a01 | refs/heads/master | 2023-05-12T02:09:02.944937 | 2023-05-08T06:40:02 | 2023-05-08T06:40:02 | 236,618,178 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 319 | rd | GetNums.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Klass_list.R
\name{GetNums}
\alias{GetNums}
\title{Get target ID numbers from Url}
\usage{
GetNums(x)
}
\arguments{
\item{x}{Url address}
}
\value{
Number
}
\description{
Get target ID numbers from Url
}
\keyword{internal}
|
26102caa1e2c3d28af2b774733a0370f4774a41f | 952616f9632510296d2a13d5cd2efcfc999e5370 | /storeyQ.R | 4387dfa539623ad658329ea40b81d74775f5bfc1 | [] | no_license | vaqm2/Util | 1e0b1674d3e2f5d00ea8cec87b51972414db7299 | f3d65ea060ecc6ccc188e03e8a12a03bccdd4006 | refs/heads/master | 2023-06-08T13:40:33.658005 | 2023-06-05T12:03:38 | 2023-06-05T12:03:38 | 87,819,867 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,983 | r | storeyQ.R | #!/usr/bin/env Rscript
require(qvalue, quietly = TRUE)
require(dplyr, quietly = TRUE)
require(data.table, quietly = TRUE)
require(ggplot2, quietly = TRUE)
args = commandArgs(trailingOnly = TRUE)
assoc = fread(args[1], header = T)
assoc = assoc %>%
mutate(MAF = ifelse(FREQ > 0.5, 1 - FREQ, FREQ)) %>%
mutate(CLASS = ifelse(MAF >= 0.05, "COMMON", "RARE"))
rare_assoc = assoc %>%
filter(CLASS == "RARE") %>%
mutate(Q = qvalue(p = P)$qvalues)
common_assoc = assoc %>%
filter(CLASS == "COMMON") %>%
mutate(Q = qvalue(p = P)$qvalues)
common_assoc = common_assoc %>%
mutate(Observed = -1 * log10(P)) %>%
arrange(desc(Observed))
rare_assoc = rare_assoc %>%
mutate(Observed = -1 * log10(P)) %>%
arrange(desc(Observed))
logp_expected_common_df = -1 * log10(qunif(ppoints(nrow(common_assoc)))) %>%
as.data.frame()
logp_expected_rare_df = -1 * log10(qunif(ppoints(nrow(rare_assoc)))) %>%
as.data.frame()
colnames(logp_expected_common_df) = c("Expected")
colnames(logp_expected_rare_df) = c("Expected")
common_assoc = cbind(common_assoc, logp_expected_common_df)
rare_assoc = cbind(rare_assoc, logp_expected_rare_df)
assoc = rbind(common_assoc, rare_assoc)
sFDR_threshold_common = common_assoc %>%
arrange(Q) %>%
filter(Q <= 0.05) %>%
select(Expected) %>%
tail(1)
sFDR_threshold_rare = rare_assoc %>%
arrange(Q) %>%
filter(Q <= 0.05) %>%
select(Expected) %>%
tail(1)
p = ggplot(assoc, aes(x = Expected,
y = Observed,
color = CLASS,
shape = CLASS)) +
geom_point() +
geom_abline(slope = 1) +
theme_bw() +
scale_color_manual(values = c("blue", "red")) +
scale_x_continuous(breaks = seq(0, max(assoc$Expected), 1)) +
scale_y_continuous(breaks = seq(0, max(assoc$Observed), 1)) +
theme(legend.title = element_blank())
if(nrow(sFDR_threshold_common) == 1) {
p = p + geom_vline(xintercept = sFDR_threshold_common$Expected,
lty = 2,
color = "blue") +
annotate("text",
label = "sFDR Common SNPs = 0.05",
x = sFDR_threshold_common$Expected - 0.1,
y = 2,
angle = 90,
color = "blue")
}
if(nrow(sFDR_threshold_rare) == 1) {
p = p + geom_vline(xintercept = sFDR_threshold_rare$Expected,
lty = 2,
color = "red") +
annotate("text",
label = "sFDR Rare SNPs = 0.05",
x = sFDR_threshold_rare$Expected - 0.1,
y = 2,
angle = 90,
color = "red")
}
png(paste(args[2], "_QQ.png", sep = ""),
width = 8,
height = 8,
units = "in",
res = 300)
p
dev.off()
write.table(assoc,
paste(args[2], "_sFDR.txt", sep = ""),
row.names = F,
quote = F,
sep = " ")
# ---------------END OF SCRIPT ----------------- # |
5425d54eb68df0ec25ed04fef4a4d3dc25e11d3a | fe1d056f0e844eefed90f565fe22e160b2594110 | /Supplemental Code.R | d6a16c43f1fd10692ef5569648b81b31020b6048 | [] | no_license | stewart6/SRKW-MultiState | 7b41b430ab50a97421069ff5371620ba61d28929 | 8a242c02b5cd1a76535ad983ffe0d85c1fa78c9e | refs/heads/master | 2023-03-13T06:44:20.427033 | 2021-02-23T23:54:21 | 2021-02-23T23:54:21 | 258,288,085 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,685 | r | Supplemental Code.R | # Supplemental code for Stewart et al., 2020
# Survival of the Fattest: Linking body condition to prey availability and survivorship of killer whales
# This code will run body condition multi-state models for Southern Resident killer whale L Pod.
# Required packages:
library(R2jags)
# Required Data Files:
# SRKW Body Condition Classes.RData
# Chinook Salmon Indices.RData
# SRKW_MultiState.jags
#######################
# 1) LOAD DATA FILES
#######################
# Load the Body Condition Matrices for L pod
# (make sure this .RData file is in your working directory)
load("SRKW Body Condition Classes.RData")
# Notes: 2007 is the 'initialization' year. All whales start in condition class 3
# and we disregard the transition probabilities from 2007-2008 and don't include
# salmon data for 2008 so the covariate fits are not influenced by initialization.
# Body condition classes 1-5, mortality is logged at 6, unmeasured whales logged as NA.
# No measurements taken 2009-2012, 2014, but known deaths are included for all years 2008-2019.
# Load the Chinook Salmon abundance indices
load("Chinook Salmon Indices.RData")
# Notes:
# Salmon abundance data are from the Fishery Regulation Assessment Model (FRAM - refer to Methods)
# Fraser, Columbia, and Puget are aggregate abundances of all Chinook stocks returning to
# the Fraser River, Columbia River, and Puget Sound, respectively.
# NOF, SWVI, OR, and Salish are regional indices that include all Chinook salmon from ANY stock
# that are present in the North of Cape Falcon (Washington coast), Southwest Vancouver Island,
# Oregon coast, and Salish Sea regions, respectively.
# Named vectors are Z-scored abundance indices, created by subtracting mean abundance from annual abundance and dividing by the standard deviation, within each region
# The Chinook_Indices object contains all of the Chinook abundance and Z-scored data,
# while each abundance index also has its own named vector object for inclusion in the model loop below.
# The leading NA value in each named abundance vector is to account for the initialization year (see above)
# Load the Age Matrices and Sex matrices for L pod:
load("SRKW AgeClasses.RData")
# Notes:
# 1 = Not yet born (in all models, mortality probability is forced to 0 for age/sex class 1)
# 2 = Calf
# 3 = Juvenile
# 4 = Young Female
# 5 = Old Female
# 6 = Young Male
# 7 = Old Male
#############################
# 2) RUN THE NULL MODEL
#############################
# Choose which pod to run
BC <- as.matrix(LBC)
AgeSex <- as.matrix(LAgeClass)
jags.data <- list(n.occasions = dim(BC)[2], #number of time steps
n.ind = dim(BC)[1], #number of animals
n.bc = 5, #number of condition classes
BC = BC, #body condition data
Params = 3, #total number of transition parameters (G, S, D)
AgeSex = AgeSex) #vector of which whales are male (1 / 0)
parameters <- c( "G", "S", "D", "M", "Base.M", "Mean.M")
# Set some initial values for mortality probability
inits <- function(){
Base.M = runif(7,-10,-5)
M = runif(5,-10,-5)
list(Base.M=Base.M,
M=M)
}
nc= 3 #number of chains
ni = 100000 #number of iterations
nb = 50000 #burn-in length
nt = 50 #thinning
SRKW_LPod_Null <- jags(jags.data, inits=inits, parameters, "SRKW_MultiState_Null.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, working.directory = getwd())
attach.jags(SRKW_LPod_Null)
#############################
# 3) RUN THE TIME-ONLY MODEL
#############################
# Choose which pod to run
BC <- as.matrix(LBC)
AgeSex <- as.matrix(LAgeClass)
jags.data <- list(n.occasions = dim(BC)[2], #number of time steps
n.ind = dim(BC)[1], #number of animals
n.bc = 5, #number of condition classes
BC = BC, #body condition data
Params = 3, #total number of transition parameters (G, S, D)
AgeSex = AgeSex) #vector of which whales are male (1 / 0)
parameters <- c( "G", "S", "D", "M", "Base.M", "Mean.M")
# Set some initial values for mortality probability
inits <- function(){
Base.M = runif(7,-10,-5)
M = runif(5,-10,-5)
list(Base.M=Base.M,
M=M)
}
nc= 3 #number of chains
ni = 100000 #number of iterations
nb = 50000 #burn-in length
nt = 50 #thinning
SRKW_LPod_Time <- jags(jags.data, inits=inits, parameters, "SRKW_MultiState_Time.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, working.directory = getwd())
attach.jags(SRKW_LPod_Time)
#############################
# 4) RUN THE COVARIATE MODEL
#############################
BC <- as.matrix(LBC)
Cov <- Puget
AgeSex <- LAgeClass
jags.data <- list(n.occasions = dim(BC)[2], #number of time steps
n.ind = dim(BC)[1], #number of animals
n.bc = 5, #number of condition classes
BC = BC, #body condition data
cov = Cov, #covariate data
Params = 3, #total number of transition parameters (EG, G, S, G, ED, M)
AgeSex = AgeSex) #vector of which whales are male (1 / 0)
parameters <- c( "G", "S", "D", "M", "slope", "intercept", "Base.M", "Mean.M")
# Set some initial values for mortality probability
inits <- function(){
Base.M = runif(7,-10,-5)
M = runif(5,-10,-5)
list(Base.M=Base.M,
M=M)
}
nc= 3 #number of chains
ni = 100000 #number of iterations
nb = 50000 #burn-in length
nt = 50 #thinning
SRKW_LPod_Puget <- jags(jags.data, inits=inits, parameters, "SRKW_MultiState_Cov.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, working.directory = getwd())
attach.jags(SRKW_LPod_Puget)
|
17b73c8a5eeb663bcaf458f467a49fc7cb8988bc | 81780d7000220293b9cecb54d4def069faa7d649 | /R/extract_icd_codes.R | 8efa4f61ecee9b8a196ba831c67ece80d54f4942 | [
"Apache-2.0"
] | permissive | pwatrick/DrugRepurposingToolKit | 8ef405a602e6e100306365e3c9acf9d4cd56bc2a | 8c0f8c26013b8efec5c89afb68f182e98794bc3c | refs/heads/main | 2023-04-18T10:57:15.211297 | 2022-08-09T11:14:26 | 2022-08-09T11:14:26 | 352,338,175 | 2 | 5 | null | null | null | null | UTF-8 | R | false | false | 4,987 | r | extract_icd_codes.R | #' Function to get ICD data for calculating elixhauser scores
#'
#'
#' @description
#' \code{extract_icd_codes} extracts ICD codes for calculating Elixhauser comorbidity scores.
#'
#' @details
#' This function extracts ICD codes for calculating Elixhauser comorbidity scores.
#'
#' Updated: 2021-10-05
#'
#' @param drug_concept_id A string
#' @param biomarker_concept_id A string
#' @param table_name A string
#' @param table_type A string, either c("covariates", "drugs", "biomarkers", "icds")
#' @param elix_icds A string
#' @export
extract_icd_codes <- function(drug_concept_id, biomarker_concept_id, table_name, table_type, elix_icds) {
sql_query <- glue::glue("
(
WITH pw_codesets AS
(
SELECT s3.codeset_id, s3.concept_id, s3.concept_name
FROM (
(SELECT 1 as codeset_id, s1.concept_id, s1.concept_name
FROM (
SELECT t1.concept_id, t1.concept_name
FROM (SELECT * FROM `concept` WHERE domain_id = 'Drug') t1
INNER JOIN (SELECT * FROM `concept_ancestor` WHERE ancestor_concept_id = {drug_concept_id}) t2
ON (t1.concept_id = t2.descendant_concept_id)
GROUP BY t1.concept_id, t1.concept_name
) s1)
UNION DISTINCT
(SELECT 2 as codeset_id, s2.concept_id, s2.concept_name
FROM (
SELECT t1.concept_id, t1.concept_name
FROM (SELECT * FROM `concept` WHERE domain_id = 'Measurement') t1
INNER JOIN (SELECT * FROM `concept_ancestor` WHERE ancestor_concept_id = {biomarker_concept_id}) t2
ON (t1.concept_id = t2.descendant_concept_id)
GROUP BY t1.concept_id, t1.concept_name
) s2)) s3
),
pw_index_date AS
(
SELECT s1.person_id,
{drug_concept_id} as drug_concept_id,
s1.drug_exposure_start_date
FROM (
(SELECT s2.person_id, s2.drug_concept_id, s3.concept_name as drug_concept_name, s2.drug_exposure_start_date
FROM (SELECT * FROM `drug_exposure`) s2
INNER JOIN (SELECT concept_id, concept_name FROM pw_codesets WHERE codeset_id = 1) s3
ON (s2.drug_concept_id = s3.concept_id))
) s1
),
pw_obsperiod AS
(
SELECT s1.person_id,
s1.drug_concept_id,
min(s1.drug_exposure_start_date) as first_drug_exposure,
max(s1.drug_exposure_start_date) as last_drug_exposure
FROM (
(SELECT s2.person_id, s2.drug_concept_id, s2.drug_exposure_start_date
FROM (SELECT * FROM pw_index_date) s2
INNER JOIN (SELECT * FROM `visit_occurrence` WHERE visit_concept_id = 9202) s3
ON (s2.person_id = s3.person_id AND s2.drug_exposure_start_date = s3.visit_start_date))
) s1
GROUP BY s1.person_id, s1.drug_concept_id
),
pw_start_end AS
(
SELECT s1.person_id,
s1.drug_concept_id,
DATE_SUB(s1.first_drug_exposure, INTERVAL 12 MONTH) as start_date,
DATE_ADD(s1.first_drug_exposure, INTERVAL 12 MONTH) as end_date
FROM (SELECT * FROM pw_obsperiod) s1
),
pw_elixhauser_codes AS
(
SELECT concept_id,
concept_name,
vocabulary_id,
concept_code,
replace(concept_code, '.', '') as concept_code_strip
FROM `concept`
WHERE vocabulary_id IN ('ICD10CM', 'ICD9CM')
AND concept_id IN ({elix_icds})
),
pw_icds AS
(
SELECT s1.person_id, s2.concept_code, s1.condition_date, s2.vocabulary_id
FROM (SELECT person_id,
condition_source_concept_id,
condition_source_value,
condition_start_date as condition_date
FROM `condition_occurrence`
) s1
INNER JOIN (SELECT * FROM pw_elixhauser_codes) s2
ON (s1.condition_source_concept_id = s2.concept_id)
GROUP BY s1.person_id, s2.concept_code, s1.condition_date, s2.vocabulary_id
),
pw_icds_selected AS
(
SELECT s1.person_id, s1.condition_date, s1.concept_code, s1.vocabulary_id
FROM (SELECT * FROM pw_icds) s1
INNER JOIN (SELECT * FROM pw_start_end) s2
ON (s1.person_id = s2.person_id AND
s1.condition_date >= s2.start_date AND
s1.condition_date <= s2.end_date)
GROUP BY s1.person_id, s1.condition_date, s1.concept_code, s1.vocabulary_id
),
pw_icds_op AS
(
SELECT s1.person_id, s1.concept_code, s1.condition_date, s1.vocabulary_id
FROM (
SELECT s2.person_id, s2.concept_code, s2.condition_date, s2.vocabulary_id
FROM (SELECT * FROM pw_icds_selected) s2
INNER JOIN (SELECT * FROM `visit_occurrence` WHERE visit_concept_id = 9202) s3
ON (s2.person_id = s3.person_id AND s2.condition_date = s3.visit_start_date)
GROUP BY s2.person_id, s2.concept_code, s2.condition_date, s2.vocabulary_id
) s1
)
select * from pw_icds_op);")
return(sql_query)
}
|
fa3860b9527bbaeeab8d130685aa2636e17b40c5 | 1f939bb0b5a6c9cae0289e14f668c3d7a817e7f1 | /tests/testthat/test-gnrs_get_counties.R | 2e3d3441cffdd50e2c3066f0a8944ee7e0fb5c03 | [] | no_license | EnquistLab/RGNRS | 5caf8ec94fbb2a9ab02937be001ad952410a7425 | d43eb6b1a56fa9b038aa08a9efec3a9d61761fc4 | refs/heads/master | 2021-11-20T19:58:13.037741 | 2021-10-12T18:41:01 | 2021-10-12T18:41:01 | 170,739,603 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,445 | r | test-gnrs_get_counties.R | context("gnrs_get_counties")
safe_nrow <- function(x){
if(is.null(x)) {
return(0)}else{
return(nrow(x))
}
}
test_that("example works", {
# skip_if_offline()
# skip_on_cran()
#
vcr::use_cassette("states_for_counties",
{ states <- GNRS_get_states(url = url) })
vcr::use_cassette("us_counties",
{ us_counties <- GNRS_get_counties(state_province_id =
states$state_province_id[
which(states$country_iso == "US")],
url = url) })
expect_equal(object = class(us_counties), expected = "data.frame")
expect_gt(object = safe_nrow(us_counties),expected = 100)
})
test_that("default input returns data.frame", {
# skip_if_offline()
# skip_on_cran()
vcr::use_cassette("all_counties",
{ counties <- GNRS_get_counties(url = url) })
expect_equal(object = class(counties), expected = "data.frame")
expect_gt(object = safe_nrow(counties), expected = 1000)
})
test_that("bad input returns error or NULL", {
vcr::use_cassette("bad_state_id",
{ bad_state_id <- GNRS_get_counties(1, url = url) })
expect_null(object = bad_state_id)
expect_error(object = GNRS_get_counties("Optimus Prime", url = url))
})
|
bab957f8eafe3868a24eac80dd1b8201478fb8c0 | b4650720f3ba8decae7493ffc189e36e5b01ee63 | /Naive_Bayes_method.R | d4b135554d1f6ded7f431e377036f428e7e45348 | [
"MIT"
] | permissive | AYaddaden/machine_learning_R | 0d677b15fde857d3b69b9ddf678c26e65ace4362 | 5a53bf9763b6b6cdb1dd4d098c3589223bbcda8d | refs/heads/master | 2020-04-01T02:33:20.780150 | 2018-10-12T17:24:22 | 2018-10-12T17:24:22 | 152,784,167 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 595 | r | Naive_Bayes_method.R | library(class)
library(e1071)
summary(iris)
dim(iris)
NB <- naiveBayes(iris[,1:4],iris[,5])
NB
table(predict(NB,iris[,-5]),iris[,5])
pairs(iris[1:4],main="Iris Data(red=setosa,green=versicolor,blue=virginica)",pch=21,bg=c("red","green3","blue")[unclass(iris$Species)])
n = dim(iris)[1]
n
index = sample(n,0.7*n)
index
appren = iris[index,]
test = iris[-index,]
nb.model<- naiveBayes(Species~.,data=appren)
pred=predict(object = nb.model,newdata=test)
test.mod<- cbind(test,pred)
head(test.mod,5)
confusion = table(test.mod$Species,test.mod$pred)
confusion
round(prop.table(confusion),2)
|
7b4911fecfda08f9c61303d3c8b56816a377bcf9 | b91579cd9adf8939cd80c01d72512b9a7f871676 | /man/mk_cond.Rd | 557405446b65f582429fa16d1408f46bd94a69da | [] | no_license | DataXujing/DiagrammeR | fac732998cbd5462c088769b50825654608f3f8c | e83aed30e86b63921deef0d6d54084d12c65d2dc | refs/heads/master | 2021-01-23T04:14:33.941462 | 2017-05-29T15:35:46 | 2017-05-29T15:35:46 | 92,923,116 | 1 | 0 | null | 2017-05-31T08:26:39 | 2017-05-31T08:26:38 | null | UTF-8 | R | false | true | 3,243 | rd | mk_cond.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mk_cond.R
\name{mk_cond}
\alias{mk_cond}
\title{Helper for making conditions for some functions}
\usage{
mk_cond(...)
}
\arguments{
\item{...}{sets of 3 elements for each condition. Each
set of three elements is the: (1) node or edge attribute
name (character value), (2) the conditional operator
(character value), and (3) the non-attribute operand. If
there are multiple conditions to be specified, then a
\code{&} or \code{|} operator must be used between each
condition, specifying an \code{AND} or \code{OR}.}
}
\value{
a string to be used for any \code{conditions}
argument.
}
\description{
Create one or multiple conditions for
all traversal functions (\code{trav_...}) and
certain selection functions (\code{select_nodes()} and
\code{select_edges()}). This helper could be invoked
for these functions' \code{conditions} argument.
}
\examples{
# Create a node data frame (ndf)
ndf <-
create_node_df(
n = 4,
type = "basic",
label = TRUE,
value = c(3.5, 2.6, 9.4, 2.7))
# Create an edge data frame (edf)
edf <-
create_edge_df(
from = c(1, 2, 3),
to = c(4, 3, 1),
rel = c("z", "z", "a"),
value = c(6.4, 2.9, 5.0))
# Create a graph with the ndf and edf
graph <-
create_graph(
nodes_df = ndf,
edges_df = edf)
# Select edges where the `rel` label is `z` using
# the `mk_cond()` helper function
graph_1 <-
graph \%>\%
select_edges(
conditions =
mk_cond(
"rel", "==", "z"))
# Verify that an edge selection has been made; the
# edges corresponding to this condition are the
# `1->4` and 2->3` edges with edge IDs `1` and `2`
get_selection(graph_1)
#> [1] 1 2
# Select edges based on the relationship label
# being `z` and the `value` being < 5.0
graph_2 <-
graph \%>\%
select_edges(
mk_cond(
"rel", "==", "z",
"&",
"value", "<", 5.0))
# Verify that an edge selection has been made; the
# edge corresponding to these conditions is the
# `2->3` edge with ID `2`
get_selection(graph_2)
#> [1] 2
# We can mix condition strings and conditions made
# with the `mk_cond()` helper function
# being `z` and the `value` being < 5.0
graph_2b <-
graph \%>\%
select_edges(
conditions =
c("rel == 'z'",
mk_cond("value", "<", 5.0)))
# This selection will be the same as that previous;
# note that conditions collected as a vector with
# `c()` are AND conditions
get_selection(graph_2) == get_selection(graph_2b)
#> [1] TRUE
# Because we are not specifying conditions as
# single strings, we can use objects from the
# workspace (or function calls) to compose the
# condition(s)
# Create the `rel_select` character vector
rel_select <- "a"
# Use a condition that gets the `rel` operand
# from an object, and, a `value` operand that
# is calculated from the mean of its values
# in the graph's edge data frame (~4.77)
graph_3 <-
graph \%>\%
select_edges(
mk_cond(
"rel", "==", rel_select,
"&",
"value", ">", get_edge_attrs(., "value") \%>\%
mean()))
# Verify that an edge selection has been made; the
# edge corresponding to these conditions is the
# `3->1` edge with ID `3`
get_selection(graph_3)
#> [1] 3
}
|
866e355a78a3ed480268c889ef42ae6382e8f299 | 6d65a534673543684f0a97740e7e7b831f50ea47 | /inst/scripts/hh1/Ch14-apple3.r | fcd3172cd83af7b404ebb334fe7ded81d609b835 | [] | no_license | cran/HH | 91151d240d6ecc1334fd79f1b0dfbbc28ca68df6 | a6ee768cedcebd4477bb9a5b4d0baa3d16e4dca0 | refs/heads/master | 2022-09-01T20:50:52.605306 | 2022-08-09T15:10:07 | 2022-08-09T15:10:07 | 17,691,800 | 3 | 3 | null | null | null | null | UTF-8 | R | false | false | 10,782 | r | Ch14-apple3.r | ## The names of the components of trellis objects
## are different in R than in S-Plus.
## These data reprinted in \cite{hand:1994} are originally from Pearce,
## S.C., 1983, The Agricultural Field Experiment, Wiley.
##
## The response is crop yield in pounds and the covariable is yield
## in bushels in a prior period under the same growing conditions.
## The treatments are growing conditions, where level 6 is a control.
## There are 4 blocks. Hand implies that treat is significant iff
## the covariable is taken into account.
##
## if.R(r=data(apple),
## s={
## apple <- read.table(hh("datasets/apple.dat"), header=TRUE)
## apple$treat <- factor(apple$treat)
## contrasts(apple$treat) <- contr.treatment(6)
## apple$block <- factor(apple$block)
## })
data(apple)
apple.ancova.1 <- aov(yield ~ block + pre*treat, data=apple)
anova(apple.ancova.1)
apple.ancova.2 <- aov(yield ~ block + pre + treat, data=apple)
anova(apple.ancova.2)
apple.ancova.2b <- aov(yield ~ block + treat + pre, data=apple)
anova(apple.ancova.2b)
apple.ancova.2 <- update(apple.ancova.2, x=TRUE)
apple.ancova.2$x
coef(apple.ancova.2)
predict(apple.ancova.2)
## find and remove block effect from response variable and covariable
yield.block.effect <- fitted(lm(yield ~ block, data=apple))-mean(apple$yield)
pre.block.effect <- fitted(lm(pre ~ block, data=apple))-mean(apple$pre)
yield.block <- apple$yield-yield.block.effect
pre.block <- apple$pre-pre.block.effect
apple <- cbind(apple, yield.block=yield.block, pre.block=pre.block)
## Same sums of squares as apple.ancova.1 and apple.ancova.2
## for pre and treat adjusted for block
## The sum of the pre:treat and residual sum of squares is correct.
## The residual Df includes the block df and is therefore wrong.
## Therefore we suppress the residual Means Square and the F tests
apple.ancova.3 <- ancova(yield.block ~ pre.block*treat, data=apple,
blocks=apple$block)
tmp3 <- anova(apple.ancova.3)[,1:3]
tmp3[4,3] <- NA
tmp3
apple.ancova.3b <- ancova(yield.block ~ treat*pre.block, data=apple,
blocks=apple$block)
tmp3b <- anova(apple.ancova.3b)[,1:3]
tmp3b[4,3] <- NA
tmp3b
## Same sums of squares as apple.ancova.1 and apple.ancova.2
## for pre and treat adjusted for block
## The residual sum of squares is correct.
## The residual Df includes the block df and is therefore wrong.
## Therefore we suppress the residual Means Square and the F tests
apple.ancova.4 <- ancova(yield.block ~ pre.block + treat, data=apple)
tmp4 <- anova(apple.ancova.4)[,1:3]
tmp4[3,3] <- NA
tmp4
apple.ancova.4b <- ancova(yield.block ~ treat + pre.block, data=apple)
tmp4b <- anova(apple.ancova.4b)[,1:3]
tmp4b[3,3] <- NA
tmp4b
apple.ancova.6 <- ancova(yield.block ~ treat, x=pre.block, data=apple)
tmp6 <- anova(apple.ancova.6)[,1:3]
tmp6[2,3] <- NA
tmp6
predict.lm(apple.ancova.4, type="terms")
yield.block.pre <-
apple$yield.block -
predict.lm(apple.ancova.4, type="terms", terms="pre.block")
apple <- cbind(apple, yield.block.pre=as.vector(yield.block.pre))
apple.ancova.5 <- ancova(yield.block.pre ~ treat, x=pre.block, data=apple)
tmp5 <- anova(apple.ancova.5)[,1:2]
tmp5
if.R(r=
attr(apple.ancova.5, "trellis")$y.limits <- attr(apple.ancova.3, "trellis")$y.limits
, s=
attr(apple.ancova.5, "trellis")$ylim <- attr(apple.ancova.3, "trellis")$ylim
)
apple.ancova.7 <- ancova(yield.block ~ pre.block, groups=treat, data=apple)
tmp7 <- anova(apple.ancova.7)[,1:3]
tmp7[2,3] <- NA
tmp7
apple.ancova.8 <- ancova(yield ~ pre * treat, data=apple,
blocks=apple$block)
tmp8 <- anova(apple.ancova.8)[,1:2]
tmp8
## first step at printing all 6 panels together
simplify.legend <- function(x) {
if.R(r={
x$legend <- NULL
x$sub <- NULL
}, s={
x$key <- NULL
x$sub <- NULL
})
x
}
if.R(r={
bot <- 3
mid <- 2
top <- 1
}, s={
bot <- 1
mid <- 2
top <- 3
})
aa5 <- attr(apple.ancova.5, "trellis")
aa4 <- attr(apple.ancova.4, "trellis")
aa6 <- attr(apple.ancova.6, "trellis")
aa7 <- attr(apple.ancova.7, "trellis")
aa3 <- attr(apple.ancova.3, "trellis")
aa8 <- attr(apple.ancova.8, "trellis")
print(simplify.legend(aa5), split = c(1,bot,1,3), more = TRUE) # bottom of 6
print(simplify.legend(aa4), split = c(1,mid,1,3), more = TRUE) # middle
print(simplify.legend(aa6), split = c(1,top,1,3), more = FALSE)# middle
print(simplify.legend(aa7), split = c(1,bot,1,3), more = TRUE) # middle
print(simplify.legend(aa3), split = c(1,mid,1,3), more = TRUE) # middle
print(simplify.legend(aa8), split = c(1,top,1,3), more = FALSE)# top of 6
## export.eps(hh("dsgntwo/figure/apple.ancova0.eps"))
## modify trellis parameters
## second step at printing all 6 panels together
simplify.legend.labels <- function(x) {
if.R(r={
x$legend <- NULL
x$sub <- NULL
x$par.strip.text <- list(cex=.5)
tmp.scales <- list(alternating=FALSE,
x=list(cex=.5),
y=list(at=seq(200,375,25), cex=.6))
x <- update(x, scales=tmp.scales)
x$main$cex <- 1
x$xlab <- NULL
x$ylab <- NULL
}, s={
x$key <- NULL
x$sub <- NULL
x$par.strip.text <- list(cex=1)
tmp.scales <- list(alternating=FALSE,
x=list(cex=.8),
y=list(at=seq(200,375,25), cex=.9))
x$scales <- tmp.scales
x$main$cex <- 1.8
x$xlab <- NULL
x$ylab <- NULL
})
x
}
print(simplify.legend.labels(aa5), position = c(0, 0.00/1.35, 1, 0.40/1.35), more = TRUE) # bottom of 6
print(simplify.legend.labels(aa4), position = c(0, 0.40/1.35, 1, 0.80/1.35), more = TRUE) # middle
print(simplify.legend.labels(aa6), position = c(0, 0.80/1.35, 1, 1.20/1.35), more = FALSE) # middle
print(simplify.legend.labels(aa7), position = c(0, 0.00/1.35, 1, 0.40/1.35), more = TRUE) # middle
print(simplify.legend.labels(aa3), position = c(0, 0.40/1.35, 1, 0.80/1.35), more = TRUE) # middle
print(simplify.legend.labels(aa8), position = c(0, 0.80/1.35, 1, 1.35/1.35), more = FALSE) # top of 6
## export.eps(hh("dsgntwo/figure/apple.ancova.eps"))
a.xlim <- range(apple$pre, pre.block)
a.ylim <- range(apple$yield, yield.block)
a.y <- if.R(s=
t(bwplot(block ~ yield, data=apple,
main="yield --- observed by block",
par.strip.text=list(cex=1),
xlim=a.ylim,
strip=function(...)
strip.default(..., strip.names = c(TRUE, TRUE))))
,r=
bwplot(yield ~ block, data=apple,
main="yield --- observed by block",
par.strip.text=list(cex=1),
ylim=a.ylim,
strip=function(...)
strip.default(..., strip.names = c(TRUE, TRUE)))
)
a.p <- if.R(s=
t(bwplot(block ~ pre, data=apple,
main="pre --- observed by block",
par.strip.text=list(cex=1),
xlim=a.xlim,
strip=function(...)
strip.default(..., strip.names = c(TRUE, TRUE))))
,r=
bwplot(pre ~ block, data=apple,
main="pre --- observed by block",
par.strip.text=list(cex=1),
ylim=a.xlim,
strip=function(...)
strip.default(..., strip.names = c(TRUE, TRUE)))
)
a.y.b <- if.R(s=
t(bwplot(block ~ yield.block, data=apple,
main="yield --- adjusted for block",
par.strip.text=list(cex=1),
xlim=a.ylim,
strip=function(...)
strip.default(..., strip.names = c(TRUE, TRUE))))
,r=
bwplot(yield.block ~ block, data=apple,
main="yield --- adjusted for block",
par.strip.text=list(cex=1),
ylim=a.ylim,
strip=function(...)
strip.default(..., strip.names = c(TRUE, TRUE)))
)
a.p.b <- if.R(s=
t(bwplot(block ~ pre.block, data=apple,
main="pre --- adjusted for block",
par.strip.text=list(cex=1),
xlim=a.xlim,
strip=function(...)
strip.default(..., strip.names = c(TRUE, TRUE))))
,r=
bwplot(pre.block ~ block, data=apple,
main="pre --- adjusted for block",
par.strip.text=list(cex=1),
ylim=a.xlim,
strip=function(...)
strip.default(..., strip.names = c(TRUE, TRUE)))
)
if.R(r={
bot <- 2
top <- 1
}, s={
bot <- 1
top <- 2
})
print(a.y, split = c(1,top,2,2), more = TRUE) # left top
print(a.p, split = c(2,top,2,2), more = TRUE) # right top
print(a.y.b, split = c(1,bot,2,2), more = TRUE) # left bottom
print(a.p.b, split = c(2,bot,2,2), more = FALSE) # right bottom
## export.eps(hh("dsgntwo/figure/apple.y.p.eps"))
## apple.ancova.2 and apple.ancova.4 have the same Sums of Squares in
## the anova table and the same regression coefficients.
summary.lm(apple.ancova.2, corr=FALSE)
summary.lm(apple.ancova.4, corr=FALSE)
## apple.ancova.2 has the correct residual df, hence Mean Squares and F tests.
## apple.ancova.4 has the wrong residual df, hence Mean Square and F tests.
if.R(r={
## glht must be done with apple.ancova.2
tmp <-
glht(apple.ancova.2, linfct=mcp(treat=contrMat(table(apple$treat), type="Dunnett", base=6)))
confint(tmp)
plot(tmp)
apple.mmc <-
mmc(apple.ancova.2, linfct=mcp(treat=contrMat(table(apple$treat), type="Dunnett", base=6)))
plot(apple.mmc, x.offset=12, ry=c(245,310))
## export.eps(hh("dsgntwo/figure/apple.mmc.eps"))
print(apple.mmc)
plotMatchMMC(apple.mmc$mca)
## export.eps(hh("dsgntwo/figure/apple.multicomp.mca.eps"))
}, s={
## multicomp must be done with apple.ancova.2
tmp <-
multicomp(apple.ancova.2, comparisons="mcc", method="dunnett", valid.check=FALSE,
focus="treat")
tmp
plot(tmp)
## export.eps(hh("dsgntwo/figure/apple.multicomp.eps"))
## find out which rows of lmat we need
zapsmall(tmp$lmat)
## keep just the treatment rows
apple.mmc <-
multicomp.mmc(apple.ancova.2,
comparisons="mcc", method="dunnett", valid.check=FALSE,
focus="treat", lmat.rows=7:12, x.offset=10, plot=FALSE)
plot(apple.mmc, col.iso=16, x.offset=10)
## export.eps(hh("dsgntwo/figure/apple.mmc.eps"))
print(apple.mmc)
plotMatchMMC(apple.mmc$mca)
})
## export.eps(hh("dsgntwo/figure/apple.multicomp.mca.eps"))
|
6a7ad7636c2b96af00f471aaf672baaa3c2be1e6 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.customer.engagement/man/connect_disassociate_routing_profile_queues.Rd | a6d9151f2d2c1f13e8e6875e6ca945e2484da493 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 995 | rd | connect_disassociate_routing_profile_queues.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_disassociate_routing_profile_queues}
\alias{connect_disassociate_routing_profile_queues}
\title{Disassociates a set of queues from a routing profile}
\usage{
connect_disassociate_routing_profile_queues(
InstanceId,
RoutingProfileId,
QueueReferences
)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance. You can \href{https://docs.aws.amazon.com/connect/latest/adminguide/find-instance-arn.html}{find the instance ID}
in the Amazon Resource Name (ARN) of the instance.}
\item{RoutingProfileId}{[required] The identifier of the routing profile.}
\item{QueueReferences}{[required] The queues to disassociate from this routing profile.}
}
\description{
Disassociates a set of queues from a routing profile.
See \url{https://www.paws-r-sdk.com/docs/connect_disassociate_routing_profile_queues/} for full documentation.
}
\keyword{internal}
|
0c49fe1ca319919f09d7371e11a6a4d32fcfe248 | e998914a3408bac5c3df9477c7a147550ed378a7 | /R/individuals.R | b5a7b09b9ca00ea099ed0b57ed97f9e677cd4596 | [] | no_license | stranda/rmetasim | 22316ada7bae2715f76ccf6fe2458165b1c91d30 | 21e93e7f9b9f40b6c5d64ee0e5ce1830e58355ee | refs/heads/master | 2023-08-21T22:30:58.005380 | 2023-08-08T15:28:23 | 2023-08-08T15:28:23 | 31,664,064 | 3 | 4 | null | 2020-01-30T13:51:46 | 2015-03-04T15:27:14 | C++ | UTF-8 | R | false | false | 252 | r | individuals.R | #Allan Strand 9/29/01
#
#returns a vector of populations assignments for each individual in Rland
landscape.populations <- function(Rland)
{
if (is.landscape(Rland))
{
(Rland$individuals[,1]%/%Rland$intparam$stages)+1
}
}
|
13ada2c5ab3843f62dc9e5d69c803c46c702ddc0 | ab370403614ef5114791eb6aa7d125c932f7461e | /treePDF.r | 339ba2131abeefcea770b9b34010ad46d52108a4 | [] | no_license | jhnwllr/enbBodySize | 43a930d105fb70a0b46f3da3fdc6d05810f79633 | 25059248c0af7c592572d0b97cf1c8e389e218c7 | refs/heads/master | 2020-04-05T12:24:08.826049 | 2018-11-09T15:26:11 | 2018-11-09T15:26:11 | 156,868,274 | 0 | 0 | null | null | null | null | ISO-8859-2 | R | false | false | 3,910 | r | treePDF.r |
By permission of the Faculty of Science, Lund University, Sweden.
To be defended in the Blue Hall, Ecology Building, Sölvegatan 37, Lund Sweden on Friday 26th January, 2018 13.00 - 15.00.
Faculty opponent
Dr. Locke Rowe,
Dean of school of graduate studies and
Vice-provost of graduate research and education.
Distinguished professor of Ecology and Evolutionary Biology
University of Toronto, Canada
List of Papers
I. Waller, J., & Svensson, E. I. (2016). The measurement of selection when detection is imperfect: How good are naďve methods?. Methods in Ecology and Evolution, 7(5), 538-548.
II. Waller, J., Willink B., Tschol M., & Svensson E. I. (2018). The odonate phenotypic database. www.odonatephenotypicdatabase.org. submitted to Scientific Data.
III. Waller, J., & Svensson, E. I. (2017). Body size evolution in an old insect order: No evidence for Cope's Rule in spite of fitness benefits of large size. Evolution.
IV. Waller, J. (2018). Is the blunderbuss a misleading visual metaphor for stasis and punctuated evolution? submitted to American Naturalist.
V. Waller, J., & Svensson, E. I. (2017). Temperature, latitude, and birds: factors influencing geographic body size patterns in an old insect order (Odonata). manuscript.
VI. Waller, J., Kell, A., Ballesta, M., Giraud, A., Abbott, J., & Svensson, E. (2017). Limited genetic variation for male mating success reveals low evolutionary potential for thermal plasticity in Drosophila melanogaster. bioRxiv, 166801. Submitted to Genetical Research.
VII. Waller, J. & Svensson, E. I. (2018). Selection on thermal plasticity in small ectotherms: a study of two small insects species (damselflies of the genus Calopteryx). manuscript.
Papers I & III are reprinted with permission of the publisher.
Other interesting papers published during thesis but not included:
1. Svensson, E. I., & Waller, J. T. (2013). Ecology and sexual selection: evolution of wing pigmentation in calopterygid damselflies in relation to latitude, sexual dimorphism, and speciation. The American Naturalist, 182(5), E174-E195.
2. Gosden, T. P., Waller, J. T., & Svensson, E. I. (2015). Asymmetric isolating barriers between different microclimatic environments caused by low immigrant survival. Proceedings of the Royal Society of London B: Biological Sciences, 282(1802), 20142459.
3. Svensson, E. I., Nordén, A., Waller, J. T., & Runemark, A. (2016). Linking intra-and interspecific assortative mating: Consequences for asymmetric sexual isolation. Evolution, 70(6), 1165-1179.
Author Contributions
I. Waller and Svensson conceived the study. Svensson advised direction of study and assisted in framing manuscript in a broader context. Waller performed simulations and data analysis and writing of the manuscript.
II. Waller, Willink and Svensson conceived, planned, and designed the database. Tschol contributed a significant amount of data to the database. Waller, Willink, Tschol, and Svensson contributed to the manuscript. Waller published the website.
III. Waller and Svensson conceived and planned the study. Waller and Svensson wrote the manuscript. Waller conducted the data analysis and preparation.
IV. Waller conceived the commentary, wrote the manuscript, and conducted the data analysis and simulations.
V. Waller and Svensson conceived and designed the study. Waller and Svensson wrote and contributed to the manuscript. Waller prepared the data and conducted the data analysis.
VI. Waller, Svensson and Abbott conceived and designed the study. Kell, Ballesta, and Giraud conducted experiments. Waller, Svensson, Kell, Ballesta, and Abbott contributed to the manuscript. Waller, Kell, and Ballesta prepared the data and conducted data analysis.
VII. Waller and Svensson conceived and designed the study. Waller conducted the field experiments. Waller prepared and analyzed the data. Waller and Svensson wrote the manuscript.
|
6c099501f953e8ae57ecbcf3be21ba1571327c44 | 3adcb125d650d0dfcb523734136012d788c0bf49 | /man/gen_summary_data.Rd | dfa6c4dde4ee25849ae3f2f0e76aec14e6086c99 | [] | no_license | connor-duffin/stormRainfallExtras | 95c3d3cb8f653038d67afbba293e7f4bd4ed3ce8 | 7bafaacda092ad9ab7e6608185589d494fd78f19 | refs/heads/master | 2020-04-10T20:29:19.056212 | 2018-12-19T08:15:44 | 2018-12-19T08:15:44 | 161,269,256 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 723 | rd | gen_summary_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monthly-plots.R
\name{gen_summary_data}
\alias{gen_summary_data}
\title{Generate the monthly summary data for a given site.}
\usage{
gen_summary_data(data_file, int_width = 0.8, model = 1, samples = 10,
n_cores = 1)
}
\arguments{
\item{data_file}{An output file from \code{\parallel_logistic_sampler}.}
\item{int_width}{Width of the posterior probability interval.}
\item{model}{List index of the desired model.}
\item{samples}{Number of samples to take from the given model.}
\item{n_cores}{Number of cores to use in parallel.}
}
\value{
A list of the two summaries.
}
\description{
This is a wrapper for \code{\get_monthly_summaries}.
}
|
1fbcba7618a1ba33907314dd8d997d9595ac16cc | ed7e5ec1dedce44eca68d6bb274b02d7904a10b5 | /Script/Analysis_05_Merge-and-Harmonize-Publishers.R | 8d995dbd6338d28e245b66803ea8556c0439f120 | [
"CC0-1.0"
] | permissive | andreaspacher/academic-publishers | 0915316b363153f2711aa5be16371d325592984b | d2e8465198dc6fe7b6ba6196cdb26c174743cdd4 | refs/heads/main | 2023-04-15T20:16:12.457517 | 2022-09-15T07:53:18 | 2022-09-15T07:53:18 | 334,488,019 | 10 | 5 | null | 2022-09-15T07:53:19 | 2021-01-30T18:53:09 | R | UTF-8 | R | false | false | 3,916 | r | Analysis_05_Merge-and-Harmonize-Publishers.R | #
# IMPORT DATA
#
library(readr)
csv_doaj <- read_csv(file = "Output\\Preliminary_Lists\\01_publishers_DOAJ.csv")
csv_publons <- read_csv(file = "Output\\Preliminary_Lists\\02_publishers_Publons.csv")
csv_scopus <- read_csv(file = "Output\\Preliminary_Lists\\03_publishers_Scopus.csv")
csv_romeo <- read_csv(file = "Output\\Preliminary_Lists\\04_publishers_SherpaRomeo.csv")
names(csv_doaj) <- c("Publisher", "Journals")
names(csv_publons) <- c("Publisher", "Journals", "Reviews")
names(csv_scopus) <- c("Publisher", "Journals")
names(csv_romeo) <- c("Publisher", "Journals")
# ============================
# Preliminary Results
# ============================
#
# DOAJ 8101 Publishers
# Publons 5145 Publishers
# Romeo 4273 Publishers
# Scopus 11881 Publishers
#
csv_publons$Reviews <- NULL
alljournals <- list(
DOAJ = csv_doaj,
Publons = csv_publons,
Romeo = csv_romeo,
Scopus = csv_scopus
)
# How many journals should a publisher have in order to be listed?
# This example shows a threshold of 15 journals per publisher
threshold <- 15
alljournals <- lapply(alljournals, function(x) subset(x, Journals >= threshold))
csv_doaj <- as.data.frame(alljournals[1])
csv_publons <- as.data.frame(alljournals[2])
csv_romeo <- as.data.frame(alljournals[3])
csv_scopus <- as.data.frame(alljournals[4])
names(csv_doaj) <- c("Publisher", "DOAJ_Journals")
names(csv_publons) <- c("Publisher", "Publons_Journals")
names(csv_romeo) <- c("Publisher", "Romeo_Journals")
names(csv_scopus) <- c("Publisher", "Scopus_Journals")
# ============================
# Preliminary Results 2
# ============================
#
# DOAJ 113 Publishers
# Publons 219 Publishers
# Romeo 260 Publishers
# Scopus 162 Publishers
# ALL 568 distinct names! (before harmonization)
doaj_publons <- merge(csv_doaj, csv_publons, by = "Publisher", all = T)
romeo_scopus <- merge(csv_romeo, csv_scopus, by = "Publisher", all = T)
alljournaldata <- merge(doaj_publons, romeo_scopus, by = "Publisher", all = T)
# harmonize publisher names
# you can add additional names to harmonize into "03_publishers_harmonization.txt"
# separated by tabs (\t) (first, the deviation, then, second, after the tab, the harmonization)
harmonia <- read.delim(file = "Data\\03_publishers_harmonization.txt", header = TRUE, sep = "\t")
for (i in 1:nrow(harmonia)) {
alljournaldata$Publisher[alljournaldata$Publisher == harmonia$Deviation[i]] <- harmonia$Harmonization[i]
}
alljournaldata$DOAJ_Journals <- as.numeric(alljournaldata$DOAJ_Journals)
alljournaldata$Publons_Journals <- as.numeric(alljournaldata$Publons_Journals)
alljournaldata$Romeo_Journals <- as.numeric(alljournaldata$Romeo_Journals)
alljournaldata$Scopus_Journals <- as.numeric(alljournaldata$Scopus_Journals)
# manual correction necessary...
# b/c for some reason, the harmonization-step does not work
# with all publishers
alljournaldata <- alljournaldata %>%
mutate(Publisher = case_when(
grepl("Hemeroteca", Publisher) ~ "Institut d'Estudis Catalans",
grepl("Bologna", Publisher) ~ "University of Bologna Press",
grepl("Mulino", Publisher) ~ "Il Mulino",
grepl("Johns Hop", Publisher) ~ "John Hopkins University Press",
TRUE ~ Publisher
))
library(data.table)
DT <- data.table(alljournaldata)
Encoding(DT$Publisher) <- 'latin1'
DT$Publisher <- stringi::stri_trans_general(DT$Publisher, 'Latin-ASCII')
DT <- DT[, lapply(.SD, sum, na.rm = T), by = Publisher]
DT <- DT[order(Publisher)]
DT <- dplyr::mutate(DT, maxjournals = pmax(DOAJ_Journals, Publons_Journals, Romeo_Journals, Scopus_Journals))
DT <- DT[order(-maxjournals)]
currdate <- Sys.Date()
write.csv(DT, paste0("Output\\Preliminary_Lists\\allpublishers-", currdate, ".csv"), row.names = FALSE)
# Next step is manual:
# find all relevant links to journal catalogues,
# and collect CSS selectors etc, for every publisher,
# and save as "Data\04_publishers.xlsx" |
3bb0c41b472e9a1120688b92a8bb1d688621b297 | 0235b5f10015e5cf34e757bcbeb6c07558469a84 | /rankall.R | 116bb5c63a78c03e5cabd5a3722e664c779bbf04 | [] | no_license | erinboyle05/PA3 | 456fe27a53eae852d567edbb0b5c3d588ad2e8f9 | 86192fafae867b6840ebea6f3447d42b90e1757c | refs/heads/master | 2016-09-09T21:38:49.376861 | 2015-09-18T02:43:56 | 2015-09-18T02:43:56 | 42,633,382 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,867 | r | rankall.R | ## Function rankall() sorts the
## The read columns are: [2] hospital.name, [7] state,
##[11] MR_Heart Attack, [17] MR_Heart Failure, [23] MR_Pneumonia
rankall <- function(outcome, num = "best") {
file_name <- "outcome-of-care-measures.csv"
if (outcome == "heart attack") {
rawdata <- read.csv(file_name, colClasses = "character")[,c(2,7,11)]
names(rawdata) <- c("hospital", "state", "heart attack")
rawdata[,2] <- as.factor(rawdata[,2])
rawdata[,3] <- suppressWarnings(as.numeric(rawdata[,3]))
}
else if (outcome == "heart failure") {
rawdata <- read.csv(file_name, colClasses = "character")[,c(2,7,17)]
names(rawdata) <- c("hospital", "state", "heart failure")
rawdata[,2] <- as.factor(rawdata[,2])
rawdata[,3] <- suppressWarnings(as.numeric(rawdata[,3]))
}
else if (outcome == "pneumonia") {
rawdata <- read.csv(file_name, colClasses = "character")[,c(2,7,23)]
names(rawdata) <- c("hospital", "state", "pneumonia")
rawdata[,2] <- as.factor(rawdata[,2])
rawdata[,3] <- suppressWarnings(as.numeric(rawdata[,3]))
}
else {
stop('invalid outcome')
}
states <- levels(rawdata[,2])
output <- data.frame()
for (i in 1:length(states)) {
statedata <- rawdata[grep(states[i], rawdata[,2]),]
if (num == "best") {
# num == 1
statedata <- statedata[complete.cases(statedata),]
orderdata <- statedata[order(statedata[,3],
statedata[,1]),]
output <- rbind.data.frame(output, orderdata[1,1:2])
}
else if (num == "worst") {
orderdata <- statedata[order(statedata[,3],
statedata[,1],decreasing = TRUE),]
statedata <- statedata[complete.cases(statedata),]
output <- rbind.data.frame(output, orderdata[1,1:2])
}
else {
statedata <- statedata[complete.cases(statedata),]
orderdata <- statedata[order(statedata[,3],
statedata[,1]),]
output <- rbind.data.frame(output, orderdata[num,1:2])
}
}
# output <- as.data.frame(matrix(output, length(states), 2, byrow = TRUE))
# colnames(output) <- c("hospital", "state")
# rownames(output) <- output[,2]
# return(output)
output
} |
253e564b2dc2c2c09c6f316d5ba32c29ca74ac39 | 40ed3c92c59e477ed44f4096a0ffbf9664284e56 | /getSwitches.R | c8c21a0f07bab2fd285299c9e63fb28400b6314a | [] | no_license | Transipedia/dekupl-annotation | 78ac50892046fe070342b04911756b865b280818 | c26a790495e2b0126c48722213b3808971a8f6ce | refs/heads/legacy | 2021-01-18T17:54:05.370836 | 2018-08-17T10:58:29 | 2018-08-17T10:58:29 | 86,822,205 | 5 | 7 | null | 2018-10-15T15:45:53 | 2017-03-31T13:29:52 | Shell | UTF-8 | R | false | false | 4,750 | r | getSwitches.R | #!/usr/bin/env Rscript
args <- commandArgs(TRUE)
if (length(args)==0){
stop("missing arguments !\nUsage : ./getSwitches.R <output directory> <DiffContigsInfos.tsv> <sample_conditions.tsv> <normalized_counts.tsv>")
}
library(DESeq2)
#### input data ####
home<-args[1]
#DiffContigsInfos from dekupl
all_contigs<-args[2]
#design file from dekupl
sample_conditions<-args[3]
#normalized counts (gene expression) from dekupl (Kallisto)
normalizedGeneCounts<-args[4]
all_contigs<-read.delim(all_contigs,check.names=F)
sample_conditions<-read.delim(sample_conditions,check.names=F)
normalizedGeneCounts<-read.delim(normalizedGeneCounts,check.names=F)
#### process data ####
#retrieve mapped contigs
mapped_DE_contigs<-all_contigs[which(all_contigs$gene%in%normalizedGeneCounts$id),]
#keep in normalized counts only rows in which we have mapped contigs
normalizedGeneCounts<-normalizedGeneCounts[which(normalizedGeneCounts$id%in%mapped_DE_contigs$gene),]
#contigs with their counts : keep only columns contig ID, gene ID and contig counts
#after the 37th column (log2FC), we have the counts
tab_counts_DEkupl<-mapped_DE_contigs[,c(1,22,38:(ncol(mapped_DE_contigs)))]
#intersect KALLISTO gene IDs & DEKUPL gene IDs
tab_counts_Kallisto<-merge(tab_counts_DEkupl,normalizedGeneCounts, by.x="gene", by.y="id", all.x=T, all.y=F)
#reorganize columns in order to have contig ID, gene ID & KALLISTO counts (same order as tab_counts_DEkupl)
tab_counts_Kallisto<-tab_counts_Kallisto[,c(2,1,(ncol(tab_counts_DEkupl)+1):(ncol(tab_counts_Kallisto)))]
#order both tables following the contig ID
tab_counts_Kallisto<-tab_counts_Kallisto[order(tab_counts_Kallisto$ID),]
tab_counts_DEkupl<-tab_counts_DEkupl[order(tab_counts_DEkupl$ID),]
#keep the same header for both tables
names(tab_counts_Kallisto)[3:ncol(tab_counts_Kallisto)]<-names(tab_counts_DEkupl)[3:ncol(tab_counts_DEkupl)]
#prepare contigs with their counts for DESeq2 (row names = contig ID, and we keep only counts without any other columns)
rownames(tab_counts_DEkupl)<-tab_counts_DEkupl$ID
tab_counts_DEkupl[,c(1,2)]<-NULL
#get conditions name
cond1<-as.character(sample_conditions[1,2])
cond2<-unique(as.character(sample_conditions[,2][sample_conditions[,2]!=cond1]))
#get number of samples for each condition
rep_cond1<-nrow(sample_conditions[which(sample_conditions[,2]==cond1),])
rep_cond2<-nrow(sample_conditions[which(sample_conditions[,2]==cond2),])
#set design
samples<-data.frame(row.names=names(tab_counts_DEkupl),condition=c(rep(cond1,rep_cond1),rep(cond2,rep_cond2)))
#create DESeqDataSet object from design & contigs DE
dds<-DESeqDataSetFromMatrix(countData=as.matrix(round(tab_counts_DEkupl)),colData=samples,design=~condition)
#compute normalization factor for each contig at each sample, thanks to their normalized gene counts from Kallisto
normFactors<-as.matrix((tab_counts_Kallisto[,3:ncol(tab_counts_Kallisto)]+1)/exp(rowMeans(log(tab_counts_Kallisto[,3:ncol(tab_counts_Kallisto)]+1))))
#allocation of normalization factors
normalizationFactors(dds)<-normFactors
#estimate overdispersion parameters
#it's possible to have issues with estimateDispersions() if you have a low number of contigs ("dispersion trend not well captured")
#so, we use fitType="mean" instead of the default "parametric"
getDispersions<-function(my_object=""){
dds<-try(estimateDispersions(my_object))
if (class(dds)=="try-error"){
cat("with fitType='parametric', the dispersion trend was not well captured by the function, we will use instead fitType='mean'")
dds<-estimateDispersions(my_object,fitType="mean")
}
return(dds)
}
dds<-getDispersions(dds)
#binomiale negative test
dds<-nbinomWaldTest(dds)
#results
#we turn off all kind of filters to avoid "NA" values for outliers
DESeq2Result<-results(dds,independentFiltering=F,cooksCutoff=F)
#extract padj
DESeq2Result<-DESeq2Result[c("padj","stat")]
#make a custom table with contig ID,mean cond1, mean cond2, log2FC, padj, normalized counts for all libraries
new_result<-data.frame(id=row.names(DESeq2Result),DU_Pvalue=DESeq2Result$padj,DU_stat=DESeq2Result$stat,row.names=NULL)
#merge the initial table of contigs with the result
all_contigs<-merge(all_contigs,new_result, by.x="ID", by.y="id", all.x=T, all.y=F)
#unmapped/intergenic contigs are given the value "NA"
all_contigs$DU_Pvalue[is.na(all_contigs$DU_Pvalue)]<-"NA"
all_contigs$DU_stat[all_contigs$DU_Pvalue=="NA"]<-"NA"
#put column DU_Pvalue & DU_stat just after initial log2FC (before the counts)
all_contigs<-all_contigs[,c(1:37,(ncol(all_contigs))-1,ncol(all_contigs),38:((ncol(all_contigs))-2))]
write.table(all_contigs,file=paste(home,"DiffContigsInfos.tsv",sep=""),sep="\t",row.names=F, col.names=T, quote=F)
|
e4240ee9fd45157f2376fc8f35179f5431f83790 | 72631add74b64fac1e5702f45e6c3daec8c561d4 | /snptest-annotate-pipeline/03-plot.R | a7aaa8535f42bed9010a4dab925ef931e5c51e35 | [] | no_license | Hiuyu/gwas | 4025b10421be931acfa711fbab00b91d806afbcf | 2525ac97d4a929a15f7f84c184821a498783d6f4 | refs/heads/master | 2021-01-17T07:36:26.491721 | 2015-01-15T14:54:59 | 2015-01-15T14:54:59 | 29,854,133 | 0 | 1 | null | 2015-01-26T09:30:46 | 2015-01-26T09:30:46 | null | UTF-8 | R | false | false | 754 | r | 03-plot.R | args = commandArgs(trailingOnly = TRUE)
infile = args[1]
scriptdir = args[2]
outfn = args[3]
print(infile)
source(paste(scriptdir, "qqman.r", sep="/"))
data = read.table(infile, h=T)
print("read table")
dim(data)
# SNP chr position coded_all noncoded_all strand_genome beta SE pval AF_coded_all HWE_pval callrate n_total imputed used_for_imp oevar_imp
print("reformat")
data2 = data.frame(SNP=data$SNP, CHR=data$chr, BP=data$position, P=data$pval, EAF=data$AF_coded_all)
head(data2)
print("MAF filter 10%")
print(nrow(data2))
data2 = data2[which(data2$EAF >= 0.1 & data2$EAF <= 0.9),]
print(nrow(data2))
print(paste("make plot", outfn))
png(outfn, width=2400, height=1400)
manhattan(data2)
dev.off()
|
3e611397ced136bc3209b9a81715824f86a05209 | bb973497edfd6444622506e4cf9364aa3d398188 | /Assignment 1/complete.R | 1afa0fdc88ad5e49329ff3f634afe2964148c9a8 | [] | no_license | johnyu0424/R-Programming | 43083d1e48c0e593e597287ecc6ab422bbc2ec0f | b666d803f355e39214cac5a8aea99f48e15ee246 | refs/heads/master | 2021-01-23T14:52:41.620035 | 2015-01-22T00:53:21 | 2015-01-22T00:53:21 | 29,572,811 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,695 | r | complete.R | ## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
complete <- function(directory, id = 1:332)
{
# get the full path of the directory
fPath <- paste(getwd(), directory, sep = "/")
# find all .csv files in the given directory
files <- list.files(path = fPath, pattern="*.csv")
# add the full path to all the .csv files
files <- paste(fPath, files, sep = "/")
# get the files in question
fiq <- files[id]
# initialize nobs to have some dummy data, will be remove later
nobs <- 0
# loop through all files in question
for(file in fiq)
{
# load the data into pData
pData <- read.csv(file, header = T)
# using complete.cases to find all data that contain complete data
# the function return a logical vector
completeData <- complete.cases(pData)
# from the logical vector count all the elements that are TRUE, which is
# the count of all complete cases.
numCompleteData <- length(completeData[completeData == TRUE])
# add the count into nobs vector
nobs <- c(nobs, numCompleteData)
}
# remove the first dummy data that was added earlier
nobs <- nobs[-1]
# creating the data frame using id and nobs
completeDataFrame <- data.frame(id, nobs)
# return the complete data frame
completeDataFrame
} |
09ed5f9fac25475462b6b1b3d9086a6651be47a0 | 14c63b79db0c2310441f030bb93ab07a0e0fcc0d | /R/solve_deps.R | a64f504af9202c1e38e8714cf51172ca489e5fe0 | [
"Apache-2.0"
] | permissive | tgirke/GEN242 | f8cdc8064f734b8c6a186a673e24aed915a9d729 | 1667230320482a3202b2e3a42a2c7480b7768049 | refs/heads/main | 2023-07-20T10:22:33.358061 | 2023-07-17T18:00:09 | 2023-07-17T18:00:09 | 472,539,483 | 3 | 7 | Apache-2.0 | 2022-04-21T01:03:41 | 2022-03-21T22:57:08 | HTML | UTF-8 | R | false | false | 1,491 | r | solve_deps.R | # setup
options(repos=structure(c(CRAN="http://cran.rstudio.com/")))
FORCE_UPDATE <- FALSE
if(!requireNamespace("remotes") | FORCE_UPDATE) install.packages("remotes")
if(!requireNamespace("yaml") | FORCE_UPDATE) install.packages("yaml")
pkgs <- yaml::yaml.load_file("deps.yaml")
# define func
install_deps <- function(pkgs, type, FORCE_UPDATE){
if(length(pkgs) < 1) return(cat("No ", type, " pkgs to install\n"))
switch(type,
'CRAN' = {
for(pkg in pkgs){
if(!requireNamespace(pkg) | FORCE_UPDATE) install.packages(pkg)
else cat(pkg, " found skip\n")
}
},
"Bioc" = {
if(!requireNamespace("BiocManager") | FORCE_UPDATE) install.packages("BiocManager")
for(pkg in pkgs){
if(!requireNamespace(pkg) | FORCE_UPDATE) BiocManager::install(pkg, update = FORCE_UPDATE)
else cat(pkg, " found skip\n")
}
},
"Github" = {
for(pkg in pkgs){
if(sum(gregexpr("/", pkg, fixed = TRUE)[[1]] > 0) != 1) stop("Invalid Github pkg name: ", pkg, ". e.g. user/repo")
if(!requireNamespace(strsplit(pkg, "/")[[1]][2]) | FORCE_UPDATE) remotes::install_github(pkg)
else cat(pkg, " found skip\n")
}
}
)
cat("Install ", type, " pkgs done.\n")
}
# install
install_deps(pkgs$CRAN, "CRAN", FORCE_UPDATE)
install_deps(pkgs$Bioc, "Bioc", FORCE_UPDATE)
install_deps(pkgs$Github, "Github", FORCE_UPDATE)
dir.create("public", showWarnings = FALSE, recursive = TRUE)
cat("Deps installation done.\n")
|
4eecaa9210545ed8284f1de272ccbb77abb54fdb | 2c381c17bf826631df214c4ee9de13094e5efb5b | /R/biclusterGUI.R | 0e601c47b891d3c8fc1fd486c21a8fdb685463e7 | [] | no_license | jonalim/mfBiclust | 572452e3fd3392e1cf8c3ccd36012297abb41104 | 23225f6ace79a6aa2088926e7cb05e80578fe410 | refs/heads/master | 2020-03-19T08:02:15.453038 | 2019-02-12T17:26:54 | 2019-02-12T17:26:54 | 136,170,076 | 1 | 0 | null | 2019-01-16T17:59:05 | 2018-06-05T11:54:09 | R | UTF-8 | R | false | false | 2,854 | r | biclusterGUI.R | #' @include BiclusterExperiment.R
NULL
#' Explore a BiclusterExperiment
#'
#' Opens a shiny GUI to analyze and visualize bicluster analyses.
#'
#' @param obj a BiclusterExperiment, an ExpressionSet, or an object
#' coercible to a matrix. If missing, the user must import data from a comma-
#' or space-delimited file using the GUI.
#' @param ... Other parameters. \code{dbg = TRUE} Runs the GUI in a debug mode
#' that uses algorithm parameters tweaked to sacrifice accuracy for speed.
#'
#' @return A Shiny app object
#'
#' @examples
#' \dontrun{biclusterGUI()
#' biclusterGUI(yeast_benchmark[[1]])
#' biclusterGUI(BiclusterExperiment(yeast_benchmark[[1]]))
#' }
#'
#' @export
#' @name biclusterGUI
#' @import shiny shinythemes
#' @importFrom shinyjs useShinyjs click enable disable show hide html runjs
setGeneric("biclusterGUI", signature = "obj",
function(obj = NULL, ...)
standardGeneric("biclusterGUI"))
#' @describeIn biclusterGUI Default method
#' @importFrom DT DTOutput renderDT datatable
setMethod("biclusterGUI", c(obj = "BiclusterExperiment"),
definition = function(obj, ...) {
## define UI parameters
plotHeight <- 600
plotHeightSmall <- 300
dbg <- list(...)$dbg
if(is.null(dbg)) { dbg <- FALSE }
# what server.R is expecting, if no good BiclusterExperiment is available
if(all(is.na(as.matrix(obj)))) { obj <- NULL }
userBce <- obj # server.R has access to this variable
# Display infinity as "Inf" in json (without this, Inf values completely
# missing in datatables)
tojson_args.old <- getOption("DT.TOJSON_ARGS")
options("DT.TOJSON_ARGS" = list(na = 'string'))
on.exit(expr = options("DT.TOJSON_ARGS" = tojson_args.old), add = TRUE)
shinyApp(
ui = source(system.file("shinyApp", "ui.R", package = "mfBiclust"),
local = TRUE)$value,
server = source(system.file("shinyApp", "server.R", package = "mfBiclust"),
local = TRUE)$value,
options = list(launch.browser = TRUE, fullstacktrace = TRUE)
)
})
#' @describeIn biclusterGUI Coerces \code{obj} to a
#' \code{\link{BiclusterExperiment-class}} object and runs GUI
setMethod("biclusterGUI", c(obj = "ExpressionSet"),
definition = function(obj, ...) {
biclusterGUI(as(obj, "BiclusterExperiment"), ...)
})
#' @describeIn biclusterGUI Runs GUI without pre-loading a dataset. Use the
#' "Data" tab to load data.
setMethod("biclusterGUI", c(obj = "missing"), function(...) {
biclusterGUI(obj = BiclusterExperiment(m = matrix()), ...)
})
#' @describeIn biclusterGUI Attempts to encapsulate \code{obj} in a
#' \code{\link{BiclusterExperiment-class}} object and pass it to the GUI
setMethod("biclusterGUI", c(obj = "ANY"),
definition = function(obj, ...) {
biclusterGUI(BiclusterExperiment(obj), ...)
})
|
bfca8643fb2288840473225d170f99e2467a148f | a8f863e504b9e61e2ed052d5b5fe55f957c4b47c | /R/KPf.R | 40a7b5965f9bc67b143490ef5292877d6601f48a | [] | no_license | petrkunes/LRA | 8235ecc0bbd5b2b4c9b0a5a9175ae460f29b2235 | 7aa7e5ca81000bbd5b75319b6998b2d63d222404 | refs/heads/master | 2022-12-23T00:04:46.457422 | 2022-12-14T22:07:54 | 2022-12-14T22:07:54 | 28,092,734 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,034 | r | KPf.R | #' Pollen dispersal-deposition coefficient
#'
#' Calculates the taxon-specific pollen dispersal-deposition coefficient required for \code{\link{REVEALS}} function.
#'
#' @param vg fall speed of pollen in m/s
#' @param u wind speed in m/s
#' @param Zmax maximum extent of the region in metres
#' @param model type of the deposition model for the type of sedimentation basin: "peatland" (Prentice, 1985), "lake" (Sugita, 1993)
#' @param dwm type of dispersal model used: "gpm neutral" (Prentice, 1985), "lsm unstable" - from \code{\link{DispersalFactorK}} function in \code{DISQOVER} package (Theuerkauf, et al. 2016)
#'
#'
#' @references Prentice, I.C. 1985. Pollen representation, source area, and basin size: Toward a unified theory of pollen analysis. Quaternary Research 23: 76–86.
#' @references Sugita, S. 1993. A Model of Pollen Source Area for an Entire Lake Surface. Quaternary Research 39: 239–244.
#' @references Sugita, S. 2007. Theory of quantitative reconstruction of vegetation I: pollen from large sites REVEALS regional vegetation composition. Holocene 17: 229–241.
#' @references Theuerkauf, M., Couwenberg, J., Kuparinen, A., & Liebscher, V. 2016. A matter of dispersal: REVEALSinR introduces state-of-the-art dispersal models to quantitative vegetation reconstruction. Vegetation History and Archaeobotany 25: 541–553.
#'
#'
#'
#' @export
KPf <- function(vg, u, Zmax, radius, model, dwm) {
b <- 75.2 * vg / u
#Prentice bog model
if (dwm == "gpm neutral") {
if (model == "peatland") {
KP <-
(exp(-1 * b * radius ^ 0.125) - exp(-1 * b * (Zmax) ^ 0.125))
}
if (model == "lake")
{
#Sugita lake model
xa <- b * (Zmax - radius) ^ (1 / 8)
xb <- b * (Zmax + radius) ^ (1 / 8)
xc <- b * (radius + radius) ^ (1 / 8)
KP <-
(4 * pi * radius / (b ^ 8)) * (Igamma(8, xa) - Igamma(8, xb) + Igamma(8, xc)) ## NEED TO CHECK!
}
}
if (dwm == "lsm unstable") {
KP <- DispersalFactorK(vg, model, radius, dwm, Zmax)
}
return(KP)
}
|
d8a496eb8de1dab22452c404461c40625ac45aab | 939b4981b396f7c62c98e1c00fe9fe2ff7e795b6 | /AdaBoost on dtrees.R | eba8b30aee29385cf7e51e61908ffa17cd0e5ee3 | [] | no_license | olologin/AdaBoost | a9c394ca9aae1b4af11ff3d6a867a2deed40203e | 5bf7697b822a826fa616b16c0859f92bc63c9b85 | refs/heads/master | 2021-01-21T02:37:38.551782 | 2015-06-16T16:27:02 | 2015-06-16T16:27:02 | 35,389,062 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,105 | r | AdaBoost on dtrees.R | library(kernlab)
library(caret)
weightedMode <- function(y, W) {
posIdx <- which(y==unique(y)[1])
negIdx <- which(y!=unique(y)[1])
if(sum(W[posIdx]) > sum(W[negIdx])) {
return (unique(y)[1])
} else {
return (unique(y)[2])
}
}
entropy <- function(W, y) {
posIdx <- which(y==unique(y)[1])
negIdx <- which(y!=unique(y)[1])
posProb <- sum(W[posIdx])/sum(W)
negProb <- sum(W[negIdx])/sum(W)
result <- 0
if(posProb > 0) {
result = result + posProb*log2(posProb)
}
if(negProb > 0) {
result = result + negProb*log2(negProb)
}
-result
}
gain <- function(X, W, y) {
bestColNumber <- 1
bestSep <- 1
bestGain <- -100
H <- entropy(W, y)
for (colNumber in 1:ncol(X)) {
column <- X[,colNumber]
dataSize <- length(column)
sorted <- sort(column, index.return=TRUE)
for (sepN in 1:(dataSize-1)) {
#weightedSum <- (sepN/dataSize) * entropy(W[sorted$ix[1:sepN]], y[sorted$ix[1:sepN]]) +
#((dataSize-sepN)/dataSize)*entropy(W[sorted$ix[(sepN+1):dataSize]], y[sorted$ix[(sepN+1):dataSize]])
# случай когда признаки равны, мы не можем разделить их, пропускаем
if(sorted$x[sepN]==sorted$x[sepN+1])
next
weightedSum <- (sum(W[sorted$ix[1:sepN]])/sum(W)) * entropy(W[sorted$ix[1:sepN]], y[sorted$ix[1:sepN]]) +
(sum(W[sorted$ix[(sepN+1):dataSize]])/sum(W))*entropy(W[sorted$ix[(sepN+1):dataSize]], y[sorted$ix[(sepN+1):dataSize]])
gain <- H-weightedSum
if (gain > bestGain) {
bestGain <- gain
bestColNumber <- colNumber
bestSep <- sorted$x[sepN] + ((sorted$x[sepN+1] - sorted$x[sepN])/2)
}
}
}
list(colNumber=bestColNumber, sep=bestSep, gain=bestGain)
}
create_dtree <- function(X, W, y, depth) {
X <- as.matrix(X)
mValue <- weightedMode(y, W)
if(length(unique(y))==1 || depth==0) {
return (list(isLeaf=TRUE, class=mValue))
} else {
partition <- gain(X, W, y)
leftIdx <- which(X[,partition$colNumber] <= partition$sep)
rightIdx <- which(X[,partition$colNumber] > partition$sep)
return (list(isLeaf=FALSE,
l=create_dtree(X[leftIdx, ], W[leftIdx], y[leftIdx], depth-1),
r=create_dtree(X[rightIdx, ], W[rightIdx], y[rightIdx], depth-1),
colNumber=partition$colNumber,
sep=partition$sep))
}
}
classify_dtree_singleX <- function(dtree, singleX) {
if(dtree$isLeaf) {
return (dtree$class)
}
if(singleX[dtree$colNumber] > dtree$sep) {
return (classify_dtree_singleX(dtree$r, singleX))
} else {
return (classify_dtree_singleX(dtree$l, singleX))
}
}
classify_dtree <- function(dtree, X) {
y <- c()
for (i in 1:nrow(X)) {
y <- c(y, classify_dtree_singleX(dtree, X[i,]))
}
y
}
# Loading and normalization of data
data1 = as.matrix(read.csv("non linear dataset.csv", header = FALSE))
#data1 = as.matrix(read.csv("ex2data1.txt", header = FALSE))
normalizeData <- function(X) {
for(i in 1:ncol(X)) {
m <- mean(X[,i])
X[,i] <- X[,i] - m
X[,i] <- X[,i] / sd(X[,i])
}
X
}
#X <- normalizeData(data1[,-3])
X <- data1[,-3]
l <- nrow(X)
y <- data1[,3]
y[which(y==0)]=-1
plot(X, pch=y+20)
T <- 20 # How much elementary algs to learn
# vector with objects weights
W <- rep(1/l, l)
# vector with classifiers weights
A <- rep(0, T)
# matrix where each n-th column contains weight of n-th classifier
B <- list()
for (t in 1:T) {
dtree <- create_dtree(X[,], W, y, 4)
B[[t]] <- dtree
h <- classify_dtree(dtree, X)
idx <- which(h*y<0)
#Eta <- sum(W*exp(-y*h))
Eta <- sum(W[idx])
if(Eta > 0.5){
T <- t-1
break
}
#A[t] <- (1/2)*log(1+((1-Eta)/Eta))
A[t] <- (1/2)*log(((1-Eta)/Eta))
#A[t] <- log(((1-Eta)/Eta))+log(1)
#W <- W*(exp(A[t]*-y*h))#/(sqrt((1-Eta)/Eta))
W[idx] <- W[idx]*exp(-y[idx]*A[t]*h[idx])
W <- W/sum(W)
#print(W)
}
s = rep(0, length(y))
for(t in 1:T) {
s = s + (A[t]*classify_dtree(B[[t]], X))
}
points(X[which((s<0)!=(y<0)),], pch=35, col="red")
#A <- A/sum(A)
confusionMatrix(s<0, y<0) |
000e9a9cc9af5f9ae281ba1226072674108bca67 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/quadmesh/examples/triangulate_quads.Rd.R | 0023529708e0a1baaa791530f2b338741de26b58 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 635 | r | triangulate_quads.Rd.R | library(quadmesh)
### Name: triangulate_quads
### Title: Triangles from quads
### Aliases: triangulate_quads
### ** Examples
triangulate_quads(cbind(c(1, 2, 4, 3), c(3, 4, 6, 5)))
qm <- quadmesh(raster::crop(etopo, raster::extent(140, 160, -50, -30)))
tri <- triangulate_quads(qm$ib)
plot(t(qm$vb))
tri_avg <- colMeans(matrix(qm$vb[3, tri], nrow = 3), na.rm = TRUE)
scl <- function(x) (x - min(x))/diff(range(x))
tri_col <- grey(seq(0, 1, length = 100))[scl(tri_avg) * 99 + 1]
## tri is qm$ib converted to triangles for the same vertex set
polygon(t(qm$vb)[rbind(tri, NA), ])
polygon(t(qm$vb)[rbind(tri, NA), ], col = tri_col)
|
2d593b27440e79dce75533927874222d57c440b8 | 5f40566424b73bdc2e4f663ef60b6668014eb614 | /scripts/PlotFig1.R | 13ed0815faac4ac0dfd02d94a3bc3bc8a1150ee7 | [] | no_license | zfuller5280/MutationSelection | 8a214efc9ba800f81385f72bad6ae428b7f851c6 | 847d659a71a0f8bd04bcd68fa26a18b0b99ad255 | refs/heads/master | 2023-07-31T13:23:15.136650 | 2022-07-07T18:26:48 | 2022-07-07T18:26:48 | 230,666,811 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,946 | r | PlotFig1.R | #Packages
library(tidyr)
library(ggplot2)
library("HistogramTools")
library("ggbeeswarm")
require(gridExtra)
library(dplyr)
library(MASS)
#X Chr Posteriors
avg_x_param_posts<-read.table("../data_files/avg_x.stats.6_1_22.tsv", header=TRUE)
#Autosome Posteriors
exp_posteriors<-read.table("../data_files/autosome_hs.summary_stats.3_30.tsv",header=T)
#Make plot of hs distribution for autosomes
legend.col <- function(col, lev){
opar <- par
n <- length(col)
bx <- par("usr")
box.cx <- c(bx[2] + (bx[2] - bx[1]) / 1000,
bx[2] + (bx[2] - bx[1]) / 1000 + (bx[2] - bx[1]) / 50)
box.cy <- c(bx[3], bx[3])
box.sy <- (bx[4] - bx[3]) / n
xx <- rep(box.cx, each = 2)
par(xpd = TRUE)
for(i in 1:n){
yy <- c(box.cy[1] + (box.sy * (i - 1)),
box.cy[1] + (box.sy * (i)),
box.cy[1] + (box.sy * (i)),
box.cy[1] + (box.sy * (i - 1)))
polygon(xx, yy, col = col[i], border = col[i])
}
par(new = TRUE)
plot(0, 0, type = "n",
ylim = c(min(lev), max(lev)),
yaxt = "n", ylab = "",
xaxt = "n", xlab = "",
frame.plot = FALSE)
axis(side = 4, las = 2, tick = FALSE, line = .25)
par <- opar
}
dev.off()
auto_P<-ecdf(exp_posteriors$log10_map)
exp_posteriors$ecdf<-auto_P(exp_posteriors$log10_map)
#Make plot for autosomes
plot(exp_posteriors$log10_map,exp_posteriors$ecdf,xlim=c(-7,0),cex=0.25,xlab="log10(hs)",ylab="Cumulative Probability",main="Autosomes",ylim=c(0,1))
abline(v=-2,lty=3,col="red")
rbPal <- colorRampPalette(c('blue','cyan'))
exp_posteriors$ci_width<-exp_posteriors$log10_ci_high - exp_posteriors$log10_ci_low
ci_levels<-seq(6,0,-0.1)
exp_posteriors$Col <- rbPal(length(ci_levels))[as.numeric(cut(exp_posteriors$ci_width,breaks=ci_levels))]
segments(exp_posteriors$log10_ci_low,exp_posteriors$ecdf,exp_posteriors$log10_ci_high,lwd=.125,col=exp_posteriors$Col)
legend.col(col = rbPal(length(ci_levels)), lev = ci_levels)
#Distribution of hs for X chromosome
x_P<-ecdf(avg_x_param_posts$avg_hs_log10_map)
avg_x_param_posts$ecdf<-x_P(avg_x_param_posts$avg_hs_log10_map)
min(avg_x_param_posts$ecdf)
avg_x_param_posts$ci_width<-avg_x_param_posts$avg_hs_log10_ci_high - avg_x_param_posts$avg_hs_log10_ci_low
rbPal <- colorRampPalette(c('blue','cyan'))
avg_x_param_posts$Col <- rbPal(length(ci_levels))[as.numeric(cut(avg_x_param_posts$ci_width,breaks=ci_levels))]
#Make plot for X
plot(avg_x_param_posts$avg_hs_log10_map,avg_x_param_posts$ecdf,xlim=c(-7,0),cex=0.5,xlab="log10(hs)",ylab="Cumulative Probability",main="X Chromosome",ylim=c(0,1))
abline(v=-2,lty=3,col="red")
segments(avg_x_param_posts$avg_hs_log10_ci_low,avg_x_param_posts$ecdf,avg_x_param_posts$avg_hs_log10_ci_high,lwd=.35,col=avg_x_param_posts$Col)
legend.col(col = rbPal(length(ci_levels)), lev = ci_levels)
#Distribution of hs for X chromosome
x_P<-ecdf(avg_x_param_posts$hs_log10_map)
avg_x_param_posts$ecdf<-x_P(avg_x_param_posts$hs_log10_map)
min(avg_x_param_posts$ecdf)
avg_x_param_posts$ci_width<-avg_x_param_posts$hs_log10_ci_high - avg_x_param_posts$hs_log10_ci_low
rbPal <- colorRampPalette(c('blue','cyan'))
avg_x_param_posts$Col <- rbPal(length(ci_levels))[as.numeric(cut(avg_x_param_posts$ci_width,breaks=ci_levels))]
#Make plot for X
plot(avg_x_param_posts$hs_log10_map,avg_x_param_posts$ecdf,xlim=c(-7,0),cex=0.5,xlab="log10(hs)",ylab="Cumulative Probability",main="X Chromosome",ylim=c(0,1))
abline(v=-2,lty=3,col="red")
segments(avg_x_param_posts$hs_log10_ci_low,avg_x_param_posts$ecdf,avg_x_param_posts$hs_log10_ci_high,lwd=.35,col=avg_x_param_posts$Col)
legend.col(col = rbPal(length(ci_levels)), lev = ci_levels)
#Prepare jitter plots for different hs in different compartments
x_posteriors_exp_plot<-avg_x_param_posts[!(avg_x_param_posts$Gene %in% par_gene_list),]
x_posteriors_exp_plot$strong_sel<-ifelse(x_posteriors_exp_plot$avg_hs_log10_ci_low>-2,"strong","other")
x_posteriors_exp_plot_total<-x_posteriors_exp_plot
x_posteriors_exp_plot_total$Combined_XCI_status<-"XChrom"
exp_posteriors$strong_sel<-ifelse(exp_posteriors$log10_ci_low>-2,"strong","other")
exp_posteriors$Combined_XCI_status<-"Autosomes"
sampled_subset_posteriors<-exp_posteriors[sample(nrow(exp_posteriors), 1000),]
plot_cols<-c("Gene","strong_sel","Combined_XCI_status","log10_map")
x_posteriors_exp_compare<-rbind(sampled_subset_posteriors[,plot_cols], x_posteriors_exp_compare[,plot_cols])
x_posteriors_exp_compare$Combined_XCI_status<-factor(x_posteriors_exp_compare$Combined_XCI_status, levels=c("Autosomes","XChrom","PAR"),ordered = T)
dev.off()
x_posteriors_exp_compare<-subset(x_posteriors_exp_compare, !(Combined_XCI_status=="PAR"))
#Make jitter plot and show
ggplot(x_posteriors_exp_compare, aes(x=Combined_XCI_status, y=log10_map)) +
geom_boxplot( outlier.shape=NA) +
geom_quasirandom(aes(color=strong_sel),shape=21,bandwidth=0.4,varwidth=TRUE,method="pseudorandom")+
xlab("Gene Status") + ylab("log10(hs) MAP") + scale_color_brewer(palette="Paired") + theme_bw()
|
30a03456116d62cf7f9fb71cce6eeedaab3f5ce8 | a56ff0b3c477baf984a5a74bf889d181961faebe | /R/R/harmonFit.R | 2cf718324eaf3916b1b08d5b4a0bd0cad396b849 | [] | no_license | CONABIO/satmo-analysis | 69a6df82f72a4f2429e3427a9a54417b53b44125 | a2db053d0e84684832d36f8ad12bf12ecfe39912 | refs/heads/master | 2021-01-01T16:04:29.480257 | 2017-07-19T23:18:45 | 2017-07-19T23:18:45 | 97,769,609 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 931 | r | harmonFit.R | library(lubridate)
# Function to produce a df with fitted model from a zoo time-series
harmonFit <- function(x, formula = response ~ trend + harmon, order = 3, fit = TRUE){
df <- data.frame(dates = index(x),
ddates = decimal_date(index(x)),
response = as.vector(x))
df$trend <- as.vector(df$dates - min(df$dates) + 1)
df$season <- yday(df$dates)
# Build harmonic regressors (matrix within data.frame)
harmon <- outer(2 * pi * as.vector(df$ddates), 1:order)
harmon <- cbind(apply(harmon, 2, cos), apply(harmon, 2, sin))
colnames(harmon) <- c(paste("cos", 1:order, sep = ""), paste("sin", 1:order, sep = ""))
# Add matrix of harmonic regressors to df
df$harmon <- harmon
if(fit) {
model <- lm(formula = formula, data = df)
df$prediction <- predict(model, df)
df$residuals <- df$response - df$prediction
}
return(df)
}
|
231de24113b4dc38621c2c8298df2041916dcaa8 | f37271e1da1592da73062c9252e04b1a9a61e5b9 | /R Projects/Billing/RMSModel.R | e95395a33c352bf9f4872fa4f65aaf5e5832141c | [] | no_license | veeraps/RCode | 417b560f3a42355b5e4f265ccc639b46d1b52640 | dde4a5680a9b92928f6771195da8a49f9e21b897 | refs/heads/master | 2021-08-08T09:13:35.761342 | 2017-11-10T02:50:33 | 2017-11-10T02:50:33 | 110,194,460 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 676 | r | RMSModel.R | setwd("C:/RMS Laptop/Billing/RMS")
rms = read.csv("rmsmodel.csv")
str(rms)
#rms$RDate = as.Date(rms$Date,"%d/%m/%y")
#rms$Date = NULL
rmstest = read.csv("rmsmodeltest.csv")
str(rmstest)
RMSModel = lm(Revenue ~ Team.Size, data = rms)
summary(RMSModel)
sse = sum(RMSModel$residuals^2)
sse
RMSModel2 = lm(Revenue ~ Team.Size + Long.Term.Onsite , data = rms)
summary(RMSModel2)
sse2 = sum(RMSModel2$residuals^2)
sse2
RMSModel3 = lm(Revenue ~ Team.Size + Long.Term.Onsite + Short.Term.Onsite + Offshore.Buffer + Onsite.Buffer, data = rms)
summary(RMSModel3)
sse3 = sum(RMSModel3$residuals^2)
sse3
pred = predict ( RMSModel3, newdata = rmstest )
pred
rmstest$Revenue
|
6e35387583d133d671cc79f9b63863767cd71ae3 | a33ed4e8a5014650a65d4d3619fe0a3c215ec096 | /3-visualVariance/main.R | bb7360efa96a0d82b6e67803aebf1d93afcaf253 | [] | no_license | resendedaniel/math | da1ac72dc7599aad1c2ea81885a7e3212ed8104c | be2b6beb0277103d09b8118fac565e3c28c9d36b | refs/heads/master | 2016-09-02T00:26:55.054910 | 2016-01-08T13:41:59 | 2016-01-08T13:41:59 | 29,709,216 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 740 | r | main.R | local_plot <- function(x, y) {
var1 <- sum((x - mean(x)) * (y - mean(y))) / length(x)
var2 <- mean(x * y) - mean(x)*mean(y)
slope <- (mean(x*y)-mean(x)*mean(y))/(mean(x^2)-mean(x)^2)
par(mfrow=c(1,1))
plot(x, y, xlim=c(0, 10), ylim=c(0, 10), main=paste("Variance =", var1), frame=F)
abline(h=mean(y), lty = 3)
abline(v=mean(x), lty = 3)
for(i in seq_along(x)) {
segments(x[i], y[i], x[i], mean(y), lty = 2)
segments(x[i], y[i], mean(x), y[i], lty = 2)
# segments(x[i], y[i], mean(x), mean(y))
}
abline(a=mean(y)-slope*mean(x), b=slope)
}
x <- sample(1:10, 5, replace=T)
y <- sample(1:10, 5, replace=T)
# x <- runif(5) * 10
# y <- runif(5) * 10
local_plot(x, y) |
b2f6d26b7a7b2a9dfe4f88ed4e6526ed8474c24c | ce858042340eb4b26be8bc627f0d0be2226270e7 | /rscript/mylist.r | 5fa7ca19cd2690a6738aa85342c011b037e24dce | [] | no_license | tbilab/Plink-Based-GWAS | 403169df50ea124929a4ee2d76254d16a600dad9 | 04702be59091668a126fe3f52edc3b8a113b1d37 | refs/heads/master | 2021-07-11T20:33:14.702973 | 2021-07-02T19:49:38 | 2021-07-02T19:49:38 | 160,553,756 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,264 | r | mylist.r | #read in phenotype data
#args=commandArgs(TRUE)
#input=args[1]
#load(paste(input,"GRID_DEMOGRAPHICS.Aug2017.RData",sep ="" ))
#selectedPatientsTable<-read.csv(paste(input,"20180514_PatientsIntensityCutoff.csv",sep = ""),header=T,as.is=T,row.names=1)
#sum(selectedPatientsTable[,6]==0)#78 trts, 7052 controls
#datafam=read.table(paste(input,"totaldataqc.fam",sep = ""),header = FALSE,sep = " ")
#phenotype data
#extract ids from datafam
#install.packages("stringr", dependencies = TRUE)
#library(stringr)
#substrRight <- function(x, n){
# substr(x, nchar(x)-n+1, nchar(x))
#}
#datafam[,2]=as.character(datafam[,2])
#datafam[,7]=substrRight(datafam[,2], 10)
#colnames(datafam)[7]="GRID_DEMOGRAPHICS"
#selectedPatientsTable[,7]=row.names(selectedPatientsTable)
#colnames(selectedPatientsTable)[7]="GRID_DEMOGRAPHICS"
#pheno=merge(datafam,selectedPatientsTable,by="GRID_DEMOGRAPHICS")
#pheno=merge(pheno,demographics,by="GRID_DEMOGRAPHICS")
#pheno=pheno[,c(2:3,13,17:19)]
#colnames(pheno)=c("FID","IID","OUTCOME","AGE","SEX","RACE")
#prepare the extract ids mylist.txt
#mylist=data.frame(matrix(NA,nrow(pheno),2))
#colnames(mylist)=c("FID","IID")
#mylist$FID=pheno$FID
#mylist$IID=pheno$IID
#write.table(mylist,file = paste(input,"mylist.txt",sep = ""),row.names=FALSE,col.names=FALSE,quote = FALSE,sep = " ")
#read in phenotype file
args=commandArgs(TRUE)
input=args[1]
rawdatadir=args[2]
load(paste(input,"GRID_DEMOGRAPHICS.Aug2017.RData",sep ="" ))
selectedPatientsTable<-read.csv(paste(input,"20180514_PatientsIntensityCutoff.csv",sep = ""),header=T,as.is=T,row.names=1)
datafam=read.table(paste(rawdatadir,"totaldatanodup.fam",sep = ""),header = FALSE,sep = " ")
selectedPatientsTable[,7]=row.names(selectedPatientsTable)
colnames(selectedPatientsTable)[7]="GRID_DEMOGRAPHICS"
colnames(datafam)[1]="GRID_DEMOGRAPHICS"
pheno=merge(datafam,selectedPatientsTable,by="GRID_DEMOGRAPHICS",sort = FALSE)
pheno=merge(pheno,demographics,by="GRID_DEMOGRAPHICS")
pheno=pheno[which(pheno$SEX!="UNK"),]
#prepare the extract ids mylist.txt
mylist=data.frame(matrix(NA,nrow(pheno),2))
colnames(mylist)=c("FID","IID")
mylist$FID=pheno[,1]
mylist$IID=pheno[,2]
write.table(mylist,file = paste(input,"mylist.txt",sep = ""),row.names=FALSE,col.names=FALSE,quote = FALSE,sep = " ")
|
77f83dea3e8a45ce64f44884159a2c459d0dbb9e | 29585dff702209dd446c0ab52ceea046c58e384e | /ScreenClean/demo/demoGS.R | d3dcb61b492c92f6a9271e3c5a4b31c3cc6672e5 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,032 | r | demoGS.R |
pause <- function() {
cat("Press ENTER/RETURN/NEWLINE to continue.")
readLines(n=1)
invisible()
}
######################################################
#
# This is a fixed design simulation example for the GS/UPS.
#
######################################################
### set up the parameters of the simulation example
p <- 1000
n <- p
### The parameters we used for simulation
v <- 0.4
r <- 3
### The true minimal signal strength
tau <- sqrt(2*r*log(p))
### The true sparsity level
sp <- p^(1-v)
pause()
### use the square root of Omega as the predictor matrix
### where Omega is a tri-diagonal matrix with off-diagonal element rho
rho <- 0.45
ii <- c(1:p,1:(p-1),2:p)
jj <- c(1:p,2:p,1:(p-1))
xx<-c(rep(1,p),rho*rep(1,2*(p-1)))
Omega <- sparseMatrix(ii,jj,x = xx)
eigenOmega <- eigen(Omega)
OmegaVec <- eigenOmega$vectors
OmegaVal <- eigenOmega$values
OmegaRoot <- OmegaVec %*% diag(sqrt(OmegaVal)) %*% t(OmegaVec)
X <- OmegaRoot
pause()
### generate the signals as in Jin and et al (2012)
uun <- rnorm(p)^2 * (runif(p)<0.2)/6
taubeta <- (1+uun)*tau # /diag.scale
signbeta <- sample(c(-1,1),p,replace=T)
supportbeta <- (runif(p)<p^(-v))
signbeta<-supportbeta*signbeta
beta <- signbeta*taubeta
### number of true signals
sum(supportbeta)
pause()
### generate the response variable
noise <- rnorm(n)
Y <- X %*% beta + noise
pause()
### threshold the gram matrix
gram<-t(X)%*%X
delta <- 0.01/log(p)
gram.gram<-ThresholdGram(gram,delta)
gram<-gram.gram$gram
gram.bias<-gram.gram$gram.bias
pause()
### search for all the connected subgraphs with no more than nm nodes
nm <- 3
neighbor <- (gram!=0)
cg.all <- FindAllCG(neighbor,nm)
pause()
### GS for fixed design
y.tilde<-t(X)%*%Y
### The essential tuning parameters of the screening step (v, r)
### are tied to the sparsity level and the minimal signal strength
### sp = p^(1-v) and tau = sqrt(2*r*log(p)).
### Here we use the (v, r) in the generative model from which
### we generate the data. This is the oracle case.
survivor <- ScreeningStep(y.tilde, gram, cg.all, nm, v, r)
### lambda and uu are the tuning parameters of the cleaning step,
### which are simple functions of the essential tuning parameters (v, r).
lambda <- sqrt(2*v*log(p))
uu <- tau
estimate.gs <- CleaningStep(survivor, y.tilde, gram, lambda, uu)
ham.gs <- sum(signbeta != sign(estimate.gs))
### the estimated sparse level
sum(estimate.gs!=0)
### hamming error of the graphlet screening
ham.gs
pause()
### when nm = 1 in the screening step, it is identical to that of UPS.
survivor <- ScreeningStep(y.tilde, gram, cg.all, 1, v, r)
estimate.ups <- CleaningStep(survivor, y.tilde, gram, lambda, uu)
ham.ups <- sum(signbeta != sign(estimate.ups))
### the estimated sparse level of UPS
sum(estimate.ups!=0)
### hamming error of UPS
ham.ups
pause()
### GS for fixed design, perform GS with perturbed parameters.
### Now, instead of using the true parameters (v, r) in the generative model
### from which we simulate the data, we use perturbated (v, r) to tune the
### graphlet screening. This example shows the robustness of graphlet screening
### against the moderate estimation error in tuning paramter estimation.
### In this example, the perturbed parameters (vp, rp) are obtained
### by adding/substracting 10% to/from the real parameters (v, r).
vp <- v*(1+0.1*sample(c(-1,1),1))
rp <- r*(1+0.1*sample(c(-1,1),1))
vp
rp
survivor.perturb <- ScreeningStep(y.tilde, gram, cg.all, nm, vp, rp)
### The tuning parameters of the cleaning step are tied to (vp, rp)
lambdap <- sqrt(2*vp*log(p))
uup <- sqrt(2*rp*log(p))
estimate.gs.perturb <- CleaningStep(survivor.perturb, y.tilde, gram, lambdap, uup)
ham.gs.perturb <- sum(signbeta != sign(estimate.gs.perturb))
### the estimated sparse level
sum(estimate.gs.perturb!=0)
### hamming error of the graphlet screening with perturbed parameters.
ham.gs.perturb
|
cd6827a225dcb8364664a0be456780dcdc2cc6a7 | 85c442f5a2c0eb6df21bfc526c26f68b294ddeb6 | /ui.R | 7afdb4207311a975c2f0077f20edb45c7e7f4f52 | [] | no_license | clancya3/DDPWeek4 | b8bed3d8b43c0668e7943327706379f231887a57 | 4cd3d17c21fd24232673acd0f3128cd036712226 | refs/heads/master | 2020-04-03T04:44:19.452431 | 2018-10-28T01:14:10 | 2018-10-28T01:14:10 | 155,022,454 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 821 | r | ui.R | #Slope/Int
library(shiny)
shinyUI(fluidPage(
#Title
titlePanel("Finding Least Squares Regression Line"),
#Side panel
sidebarLayout(
sidebarPanel(
sliderInput("b1",
"Slope Estimate",
min = -10,
max = 0,
value = -5,
step = .1),
sliderInput("b0",
"Intercept Estimate",
min = 0,
max = 50,
value = 25,
step = 1),
checkboxInput("show_bestfitline", "Show Line of Best Fit", value = FALSE)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("regplot"),
h4("Root Mean Square Error"),
textOutput("RMSE")
)
)
))
|
47f453dc4ee6c9062932089a152ddd9fca057ee8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ExpDes/examples/lsd.Rd.R | 41afe43a43394cc2463e2e2cf3a7a48c6aaaf6f7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 212 | r | lsd.Rd.R | library(ExpDes)
### Name: lsd
### Title: Multiple comparison: Least Significant Difference test
### Aliases: lsd
### ** Examples
data(ex1)
attach(ex1)
crd(trat, ig, quali = TRUE, mcomp='lsd', sigT = 0.05)
|
01c24961370729f812f590bd0d4ab1419e90a3f3 | 8fae4c31677a2a2b7b0b2aebe75e3113b8dec8d3 | /man/crypto_prices.Rd | d39d53f275d472fd7006cfa1d3cb6a3cc7205143 | [
"MIT"
] | permissive | alejoelpaisa/tsviz | c2c2e8ce6d6f921cb0ca7448dc256e2ea7454d8f | 040deb3846da10a4a595c7e75ada8f9b0136b76a | refs/heads/master | 2023-05-03T05:04:19.284383 | 2020-10-19T21:43:13 | 2020-10-19T21:43:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 643 | rd | crypto_prices.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crypto_prices.R
\docType{data}
\name{crypto_prices}
\alias{crypto_prices}
\title{Prices of 3 crypto currencies}
\format{A data frame with 1174 rows and 4 variables:
\itemize{
\item \emph{Date}: date when the price was recorded
\item \emph{LTC}: closing price of Litecoin
\item \emph{BTC}: closing price of Bitconin
\item \emph{EHT}: closing price of Ethereum
}}
\usage{
crypto_prices
}
\description{
A dataset closing prices for Litecoin, Bitcoin and Ethereum
on 1174 days, between 2016-04-01 and 2019-07-01.
Prices are recorded in US dollars.
}
\keyword{datasets}
|
7a17c6d90fc90bc13eb1abc9708e16dd43874f17 | 8c8b764940890caed8a6edd1c4496b4e69387c3c | /server.R | e8d2f824d6742ba213f8542fa7ccd2b4b766779d | [] | no_license | k-alari/bland-altman | 5664382e49a0e18c9c800cf1eefbb49adac52be8 | 346467b23a41f501c4d4005657df5634c313c9d2 | refs/heads/main | 2023-06-03T10:13:34.734525 | 2021-06-21T17:28:30 | 2021-06-21T17:28:30 | 379,006,389 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 33,467 | r | server.R | #
# This R Shiny applet was designed as a companion to the following publication:
#
# https://doi.org/10.1080/1091367X.2020.1853130
#
#
# Paper Abstract: There are two schools of thought in statistical analysis, frequentist,
# and Bayesian. Though the two approaches produce similar estimations and predictions in
# large-sample studies, their interpretations are different. Bland Altman analysis is a
# statistical method that is widely used for comparing two methods of measurement. It
# was originally proposed under a frequentist framework, and it has not been used under
# a Bayesian framework despite the growing popularity of Bayesian analysis. It seems
# that the mathematical and computational complexity narrows access to Bayesian Bland
# Altman analysis. In this article, we provide a tutorial of Bayesian Bland Altman
# analysis. One approach we suggest is to address the objective of Bland Altman
# analysis via the posterior predictive distribution. We can estimate the probability
# of an acceptable degree of disagreement (fixed a priori) for the difference between
# two future measurements. To ease mathematical and computational complexity, an
# interface applet is provided with a guideline.
library(shiny)
library(DT)
library(metRology)
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
# Input values - regardless of prior specification method
delta=input$delta
n.samp=input$n.samp
model.check=input$model.check
#Convert inputted data into numeric vector
data.receive=input$data.receive
data.receive = as.character(data.receive) #Make sure it is a character
temp = gsub( " ", "", data.receive ) #Remove spaces
d = as.numeric( unlist( strsplit( data.receive, split="," ) ) ) #Split by comma and list as numeric vector
n = length(d)
dbar = mean(d)
v = sum((d-dbar)^2) / n
# Function to obtain a0, b0, mu0, and lambda0 - For Normal
parameters = function(sigma.hat, u.sigma, l.mu, u.mu) {
# Distribution on sigma
f = function(a0, b0, sigma){
ifelse( exp(-b0*(1/sigma^2)) == 0, 0,
((b0^(a0+0.5))/(gamma(a0 + 0.5))) * ((1/(sigma^2)) ^ (a0 + 0.5-1)) * exp(-b0*(1/sigma^2)) * abs(-2/(sigma^3))
)
}
# Estimate integral using Riemann sums
n = 1000
delta.x = u.sigma / n
epsilon = 0.000001
#Starting points
delta = 0
gamma = 101
#To get right endpoints
right=rep(NA, n)
right[1] = delta.x
for(i in 2:n){
right[i] = right[i-1]+delta.x
}
# Storage to evaluate f at right endpoints
f.sigma = rep(NA, length(right))
# To obtain values for a0 and b0
for (K in 1:n) {
a0 = (delta[K] + gamma[K])/2
b0 = (a0 + 1) * (sigma.hat ^ 2)
f.sigma = f(a0=a0, b0=b0, sigma = right)
# If probability is too big (> 0.95)
if(sum(f.sigma*delta.x, na.rm = TRUE) > 0.95){
delta[K+1] = delta[K]
gamma[K+1] = a0
}
# If probability is too small (< 0.95)
if(sum(f.sigma*delta.x, na.rm = TRUE) < 0.95){
delta[K+1] = a0
gamma[K+1] = gamma[K]
}
if(gamma[K] - delta[K] < epsilon) break
}
# To get value for mu0
mu0 = (u.mu + l.mu)/2
# Now to get lambda0
n = 100000
delta=0
gamma=100
for(K in 1:n){
lambda0 = (delta[K] + gamma[K])/2
p = pt.scaled(u.mu, 2*a0, mu0, sqrt(b0/(a0*lambda0)), mu0) - pt.scaled(l.mu, 2*a0, mu0, sqrt(b0/(a0*lambda0)), mu0)
if(p > 0.95){
delta[K+1] = delta[K]
gamma[K+1] = lambda0 }
if(p < 0.95){
delta[K+1] = lambda0
gamma[K+1] = gamma[K]
}
if((gamma[K] - delta[K]) < epsilon) break
}
values = list( a0, b0, mu0, lambda0 )
names(values) = c( "a0", "b0", "mu0", "lambda0" )
values
}
# Function that performs analysis and produces graphs
# Norma-Gamma Prior
BA.Bayesian = function( d, delta, a0, b0, mu0, lambda0, n.samp=10000, model.check="prop.agree" ) {
n = length(d)
dbar = mean(d)
v = sum( (d-dbar)^2 ) / n
lambda1 = lambda0 + n
mu1 = ( lambda0*mu0+n*dbar ) / lambda1
a1 = a0 + n/2
b1 = b0 + n/2 * ( v+lambda0*(dbar-mu0)^2/lambda1 )
mu.samp = tau.samp = rep( NA, n.samp )
mu.samp[1] = dbar
tau.samp[1] = 1/v
set.seed(123) ### set seed for consistent result
for ( i in 2:n.samp ) {
mu.samp[i] = rnorm( 1, mu1, 1/sqrt(lambda1*tau.samp[i-1]) )
tau.samp[i] = rgamma( 1, a1+0.5, b1+0.5*lambda1*(mu.samp[i]-mu1)^2 ) }
sigma.samp = 1/sqrt(tau.samp)
theta1.samp = mu.samp-1.96*sigma.samp
theta2.samp = mu.samp+1.96*sigma.samp
d.samp = rnorm( n.samp, mu.samp, sigma.samp )
##### HIGHEST DENSITY INTERVAL
p.grid = round( seq( 0.01, 0.99, 0.01 ), 2 )
n.grid = 0.05 * length(p.grid)
mu.Q = quantile( mu.samp, p.grid )
sigma.Q = quantile( sigma.samp, p.grid )
theta1.Q = quantile( theta1.samp, p.grid )
theta2.Q = quantile( theta2.samp, p.grid )
diff.Q = quantile( d.samp, p.grid )
rslt.mu.Q = rslt.sigma.Q = rslt.theta1.Q = rslt.theta2.Q = rslt.diff.Q = matrix( NA, n.grid, 3 )
for ( i in 1:n.grid ) {
index1 = which( p.grid == p.grid[i] ); index2 = which( p.grid == p.grid[i] + 0.95 )
rslt.mu.Q[i,] = c( mu.Q[index1], mu.Q[index2], mu.Q[index2] - mu.Q[index1] )
rslt.sigma.Q[i,] = c( sigma.Q[index1], sigma.Q[index2], sigma.Q[index2] - sigma.Q[index1] )
rslt.theta1.Q[i,] = c( theta1.Q[index1], theta1.Q[index2], theta1.Q[index2] - theta1.Q[index1] )
rslt.theta2.Q[i,] = c( theta2.Q[index1], theta2.Q[index2], theta2.Q[index2] - theta2.Q[index1] )
rslt.diff.Q[i,] = c( diff.Q[index1], diff.Q[index2], diff.Q[index2] - diff.Q[index1] )
}
mu.HDI = rslt.mu.Q[ which.min( rslt.mu.Q[,3] ), 1:2 ]
sigma.HDI = rslt.sigma.Q[ which.min( rslt.sigma.Q[,3] ), 1:2 ]
theta1.HDI = rslt.theta1.Q[ which.min( rslt.theta1.Q[,3] ), 1:2 ]
theta2.HDI = rslt.theta2.Q[ which.min( rslt.theta2.Q[,3] ), 1:2 ]
diff.HDI = rslt.diff.Q[ which.min( rslt.diff.Q[,3] ), 1:2 ]
p = c( 0.025, 0.05, 0.25, 0.5, 0.75, 0.95, 0.975 )
mu.rslt = c( round(mean(mu.samp), 3), round(quantile( mu.samp, prob=p ),3), paste("(", round(mu.HDI[1],3), ", ", round(mu.HDI[2], 3), ")", sep="" ) )
sigma.rslt = c( round(mean(sigma.samp),3), round(quantile( sigma.samp, prob=p ),3), paste("(", round(sigma.HDI[1],3), ", ", round(sigma.HDI[2],3), ")", sep="" ) )
theta1.rslt = c( round(mean(theta1.samp),3), round(quantile( theta1.samp, prob=p ),3), paste("(", round(theta1.HDI[1],3), ", ", round(theta1.HDI[2],3), ")", sep="" ) )
theta2.rslt = c( round(mean(theta2.samp),3), round(quantile( theta2.samp, prob=p ),3), paste("(", round(theta2.HDI[1],3), ", ", round(theta2.HDI[2],3), ")", sep="" ) )
d.rslt = c( round(mean(d.samp),3), round(quantile( d.samp, prob=p ), 3), paste("(", round(diff.HDI[1], 3), ", ", round(diff.HDI[2], 3), ")", sep="") )
post = rbind( mu.rslt, sigma.rslt, theta1.rslt, theta2.rslt, d.rslt )
rownames(post) = c( "mu", "sigma", "theta1", "theta2", "difference" )
colnames(post) = c( "mean", "2.5%", "5%", "25%", "50%", "75%", "95%", "97.5%", "95% HDI")
post.h1 = mean( theta1.samp >= -delta & theta2.samp <= delta )
post.pred.agree = mean( abs(d.samp) <= delta )
### added: model checking
stat.new = stat.new2 = rep( NA, n.samp )
stat.obs = mean( ( d - mean(d) ) ^ 3 ) / var(d) ^ (3/2)
stat.obs2 = mean( abs(d) < delta )
for ( i in 1:n.samp ) {
d.new = sample( d.samp, size=n, replace=TRUE )
stat.new[i] = mean( ( d.new - mean(d.new) ) ^ 3 ) / var(d.new) ^ (3/2)
stat.new2[i] = mean( abs(d.new) < delta )
}
if ( input$model.check == "skewness" ) ppp = mean( stat.new > stat.obs )
if ( input$model.check == "prop.agree" ) ppp = mean( stat.new2 > stat.obs2 )
par( mfrow=c(2,3) )
hist( mu.samp, xlab=expression(mu), col="red", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ mu), cex=0.9 )
axis(1)
hist( sigma.samp, xlab=expression(sigma), col="orange", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ sigma), cex=0.9 )
axis(1)
plot( mu.samp, sigma.samp, xlab=expression(mu), ylab=expression(sigma), main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution"), cex=0.9 )
mtext( side=3, line=0, adj=0.1, text=bquote("of" ~ mu ~ "and" ~ sigma), cex=0.9 )
axis(1); axis(2)
hist( theta1.samp, xlab=expression(theta[1]), col="darkgreen", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ theta[1]), cex=0.9 )
mtext( side=3, line=0.5, adj=0.1, text=bquote("(lower bound)" ), cex=0.9 )
axis(1)
hist( theta2.samp, xlab=expression(theta[2]), col="blue", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ theta[2]), cex=0.9 )
mtext( side=3, line=0.5, adj=0.1, text=bquote("(upper bound)"), cex=0.9 )
axis(1)
hist( d.samp, xlab=expression(tilde(D)), col="purple", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Predictive"), cex=0.9 )
mtext( side=3, line=0, adj=0.1, text=bquote("Distribution of" ~ tilde(D)), cex=0.9 )
axis(1)
out = list( post, post.h1, post.pred.agree, ppp )
names(out) = c("post","post.h1","post.pred.agree","ppp" )
out
}
### Independent Normal-Gamma Prior: calculating (a0, b0, mu0, lambda0) given (sigma.hat, u.sigma, l.mu, u.mu)
parameters2 = function( sigma.hat, u.sigma, l.mu, u.mu ) {
# Distribution on sigma
f = function(a0, b0, sigma){
ifelse( exp(-b0*(1/sigma^2)) == 0, 0,
((b0^(a0+0.5))/(gamma(a0 + 0.5))) * ((1/(sigma^2)) ^ (a0 + 0.5-1)) * exp(-b0*(1/sigma^2)) * abs(-2/(sigma^3))
)
}
# Estimate integral using Riemann sums
n = 1000
delta.x = u.sigma / n
epsilon = 0.000001
#Starting points
delta = 0
gamma = 101
#To get right endpoints
right=rep(NA, n)
right[1] = delta.x
for(i in 2:n){
right[i] = right[i-1]+delta.x
}
# Storage to evaluate f at right endpoints
f.sigma = rep(NA, length(right))
# To obtain values for a0 and b0
for (K in 1:n) {
a0 = (delta[K] + gamma[K])/2
b0 = (a0 + 1) * (sigma.hat ^ 2)
f.sigma = f(a0=a0, b0=b0, sigma = right)
# If probability is too big (> 0.95)
if(sum(f.sigma*delta.x, na.rm = TRUE) > 0.95){
delta[K+1] = delta[K]
gamma[K+1] = a0
}
# If probability is too small (< 0.95)
if(sum(f.sigma*delta.x, na.rm = TRUE) < 0.95){
delta[K+1] = a0
gamma[K+1] = gamma[K]
}
if(gamma[K] - delta[K] < epsilon) break
}
# To get value for mu0
mu0 = (u.mu + l.mu)/2
# Now to get lambda0
n = 100000
delta=0
gamma=1000
for(K in 1:n){
lambda0 = (delta[K] + gamma[K])/2
p = pnorm( u.mu, mu0, sqrt(1/lambda0) ) ### this is the modification for independent NG
if(p > 0.95){
delta[K+1] = delta[K]
gamma[K+1] = lambda0 }
if(p < 0.95){
delta[K+1] = lambda0
gamma[K+1] = gamma[K]
}
if((gamma[K] - delta[K]) < epsilon) break
}
values = list( a0, b0, mu0, lambda0 )
names(values) = c( "a0", "b0", "mu0", "lambda0" )
values
}
# Independent Normal Gamma Prior Posterior Analysis
BA.Bayesian2 = function( d, delta, a0, b0, mu0, lambda0, n.samp=10000, model.check="prop.agree" ) {
n = length(d)
dbar = mean(d)
v = sum( (d-dbar)^2 ) / n
a1 = a0 + n/2
se = sqrt(v/n)
mu.grid = seq( dbar-5*se, dbar+5*se, 10*se/1000 )
mu.samp = tau.samp = rep( NA, n.samp )
mu.samp[1] = dbar
b1 = b0 + 0.5 * ( lambda0 * ( mu.samp[1] - mu0 )^2 + n * v + n * ( dbar - mu.samp[1] )^2 )
tau.samp[1] = rgamma( 1, a1, b1 )
set.seed(123) ### set seed for consistent result
for ( i in 2:n.samp ) {
b1.grid = b0 + 0.5 * ( lambda0 * ( mu.grid - mu0 )^2 + n * v + n * ( dbar - mu.grid )^2 )
f.grid = exp( -tau.samp[i-1] * b1.grid )
mu.samp[i] = sample( mu.grid, prob=f.grid, size=1 )
b1 = b0 + 0.5 * ( lambda0 * ( mu.samp[i] - mu0 )^2 + n * v + n * ( dbar - mu.samp[i] )^2 )
tau.samp[i] = rgamma( 1, a1, b1 ) }
sigma.samp = 1/sqrt(tau.samp)
theta1.samp = mu.samp-1.96*sigma.samp
theta2.samp = mu.samp+1.96*sigma.samp
d.samp = rnorm( n.samp, mu.samp, sigma.samp )
##### HIGHEST DENSITY INTERVAL
p.grid = round( seq( 0.01, 0.99, 0.01 ), 2 )
n.grid = 0.05 * length(p.grid)
mu.Q = quantile( mu.samp, p.grid )
sigma.Q = quantile( sigma.samp, p.grid )
theta1.Q = quantile( theta1.samp, p.grid )
theta2.Q = quantile( theta2.samp, p.grid )
diff.Q = quantile( d.samp, p.grid )
rslt.mu.Q = rslt.sigma.Q = rslt.theta1.Q = rslt.theta2.Q = rslt.diff.Q = matrix( NA, n.grid, 3 )
for ( i in 1:n.grid ) {
index1 = which( p.grid == p.grid[i] ); index2 = which( p.grid == p.grid[i] + 0.95 )
rslt.mu.Q[i,] = c( mu.Q[index1], mu.Q[index2], mu.Q[index2] - mu.Q[index1] )
rslt.sigma.Q[i,] = c( sigma.Q[index1], sigma.Q[index2], sigma.Q[index2] - sigma.Q[index1] )
rslt.theta1.Q[i,] = c( theta1.Q[index1], theta1.Q[index2], theta1.Q[index2] - theta1.Q[index1] )
rslt.theta2.Q[i,] = c( theta2.Q[index1], theta2.Q[index2], theta2.Q[index2] - theta2.Q[index1] )
rslt.diff.Q[i,] = c( diff.Q[index1], diff.Q[index2], diff.Q[index2] - diff.Q[index1] )
}
mu.HDI = rslt.mu.Q[ which.min( rslt.mu.Q[,3] ), 1:2 ]
sigma.HDI = rslt.sigma.Q[ which.min( rslt.sigma.Q[,3] ), 1:2 ]
theta1.HDI = rslt.theta1.Q[ which.min( rslt.theta1.Q[,3] ), 1:2 ]
theta2.HDI = rslt.theta2.Q[ which.min( rslt.theta2.Q[,3] ), 1:2 ]
diff.HDI = rslt.diff.Q[ which.min( rslt.diff.Q[,3] ), 1:2 ]
p = c( 0.025, 0.05, 0.25, 0.5, 0.75, 0.95, 0.975 )
mu.rslt = c( round(mean(mu.samp), 3), round(quantile( mu.samp, prob=p ),3), paste("(", round(mu.HDI[1],3), ", ", round(mu.HDI[2], 3), ")", sep="" ) )
sigma.rslt = c( round(mean(sigma.samp),3), round(quantile( sigma.samp, prob=p ),3), paste("(", round(sigma.HDI[1],3), ", ", round(sigma.HDI[2],3), ")", sep="" ) )
theta1.rslt = c( round(mean(theta1.samp),3), round(quantile( theta1.samp, prob=p ),3), paste("(", round(theta1.HDI[1],3), ", ", round(theta1.HDI[2],3), ")", sep="" ) )
theta2.rslt = c( round(mean(theta2.samp),3), round(quantile( theta2.samp, prob=p ),3), paste("(", round(theta2.HDI[1],3), ", ", round(theta2.HDI[2],3), ")", sep="" ) )
d.rslt = c( round(mean(d.samp),3), round(quantile( d.samp, prob=p ), 3), paste("(", round(diff.HDI[1], 3), ", ", round(diff.HDI[2], 3), ")", sep="") )
post = rbind( mu.rslt, sigma.rslt, theta1.rslt, theta2.rslt, d.rslt )
rownames(post) = c( "mu", "sigma", "theta1", "theta2", "difference" )
colnames(post) = c( "mean", "2.5%", "5%", "25%", "50%", "75%", "95%", "97.5%", "95% HDI")
post.h1 = mean( theta1.samp >= -delta & theta2.samp <= delta )
post.pred.agree = mean( abs(d.samp) <= delta )
### added: model checking
stat.new = stat.new2 = rep( NA, n.samp )
stat.obs = mean( ( d - mean(d) ) ^ 3 ) / var(d) ^ (3/2)
stat.obs2 = mean( abs(d) < delta )
for ( i in 1:n.samp ) {
d.new = sample( d.samp, size=n, replace=TRUE )
stat.new[i] = mean( ( d.new - mean(d.new) ) ^ 3 ) / var(d.new) ^ (3/2)
stat.new2[i] = mean( abs(d.new) < delta )
}
if ( input$model.check == "skewness" ) ppp = mean( stat.new > stat.obs )
if ( input$model.check == "prop.agree" ) ppp = mean( stat.new2 > stat.obs2 )
par( mfrow=c(2,3) )
hist( mu.samp, xlab=expression(mu), col="red", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ mu), cex=0.9 )
axis(1)
hist( sigma.samp, xlab=expression(sigma), col="orange", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ sigma), cex=0.9 )
axis(1)
plot( mu.samp, sigma.samp, xlab=expression(mu), ylab=expression(sigma), main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution"), cex=0.9 )
mtext( side=3, line=0, adj=0.1, text=bquote("of" ~ mu ~ "and" ~ sigma), cex=0.9 )
axis(1); axis(2)
hist( theta1.samp, xlab=expression(theta[1]), col="darkgreen", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ theta[1]), cex=0.9 )
mtext( side=3, line=0.5, adj=0.1, text=bquote("(lower bound)" ), cex=0.9 )
axis(1)
hist( theta2.samp, xlab=expression(theta[2]), col="blue", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ theta[2]), cex=0.9 )
mtext( side=3, line=0.5, adj=0.1, text=bquote("(upper bound)"), cex=0.9 )
axis(1)
hist( d.samp, xlab=expression(tilde(D)), col="purple", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Predictive"), cex=0.9 )
mtext( side=3, line=0, adj=0.1, text=bquote("Distribution of" ~ tilde(D)), cex=0.9 )
axis(1)
out = list( post, post.h1, post.pred.agree, ppp )
names(out) = c("post","post.h1","post.pred.agree","ppp" )
out}
### Independent Flat Prior: posterior analysis
BA.Bayesian3 = function( d, delta, l.sigma, u.sigma, l.mu, u.mu, n.samp=10000, model.check="prop.agree" ) {
n = length(d)
dbar = mean(d)
v = sum( (d-dbar)^2 ) / n
if ( l.mu > u.mu ) stop( "invalid boundaries for mu" )
if ( l.sigma > u.sigma ) stop( "invalid boundaries for sigma" )
if ( l.mu > dbar | u.mu < dbar ) stop( "boundaries for mu do not cover sample mean; prior may deviate too much from data" )
if ( l.sigma > sqrt(v) | u.sigma < sqrt(v) ) stop( "boundaries for sigma do not cover sample standard deviation; prior may deviate too much from data" )
l.sigma = max( 0, l.sigma ) ### in case user inputs a negative value
l.tau = 1 / u.sigma^2
u.tau = 1 / l.sigma^2
mu.samp = tau.samp = rep( NA, n.samp )
mu.samp[1] = mu.new = dbar
tau.samp[1] = tau.new = 1/v
set.seed(123) ### set seed for consistent result
for ( i in 2:n.samp ) {
mu.new = l.mu - 1
while( mu.new < l.mu | mu.new > u.mu ) mu.samp[i] = mu.new = rnorm( 1, dbar, 1/sqrt(n*tau.new) )
tau.new = l.tau - 1
while( tau.new < l.tau | tau.new > u.tau ) tau.samp[i] = tau.new = rgamma( 1, 0.5*n + 1, 0.5*n*v + 0.5*n*(dbar-mu.new)^2 )
}
sigma.samp = 1/sqrt(tau.samp)
theta1.samp = mu.samp-1.96*sigma.samp
theta2.samp = mu.samp+1.96*sigma.samp
d.samp = rnorm( n.samp, mu.samp, sigma.samp )
##### HIGHEST DENSITY INTERVAL
p.grid = round( seq( 0.01, 0.99, 0.01 ), 2 )
n.grid = 0.05 * length(p.grid)
mu.Q = quantile( mu.samp, p.grid )
sigma.Q = quantile( sigma.samp, p.grid )
theta1.Q = quantile( theta1.samp, p.grid )
theta2.Q = quantile( theta2.samp, p.grid )
diff.Q = quantile( d.samp, p.grid )
rslt.mu.Q = rslt.sigma.Q = rslt.theta1.Q = rslt.theta2.Q = rslt.diff.Q = matrix( NA, n.grid, 3 )
for ( i in 1:n.grid ) {
index1 = which( p.grid == p.grid[i] ); index2 = which( p.grid == p.grid[i] + 0.95 )
rslt.mu.Q[i,] = c( mu.Q[index1], mu.Q[index2], mu.Q[index2] - mu.Q[index1] )
rslt.sigma.Q[i,] = c( sigma.Q[index1], sigma.Q[index2], sigma.Q[index2] - sigma.Q[index1] )
rslt.theta1.Q[i,] = c( theta1.Q[index1], theta1.Q[index2], theta1.Q[index2] - theta1.Q[index1] )
rslt.theta2.Q[i,] = c( theta2.Q[index1], theta2.Q[index2], theta2.Q[index2] - theta2.Q[index1] )
rslt.diff.Q[i,] = c( diff.Q[index1], diff.Q[index2], diff.Q[index2] - diff.Q[index1] )
}
mu.HDI = rslt.mu.Q[ which.min( rslt.mu.Q[,3] ), 1:2 ]
sigma.HDI = rslt.sigma.Q[ which.min( rslt.sigma.Q[,3] ), 1:2 ]
theta1.HDI = rslt.theta1.Q[ which.min( rslt.theta1.Q[,3] ), 1:2 ]
theta2.HDI = rslt.theta2.Q[ which.min( rslt.theta2.Q[,3] ), 1:2 ]
diff.HDI = rslt.diff.Q[ which.min( rslt.diff.Q[,3] ), 1:2 ]
p = c( 0.025, 0.05, 0.25, 0.5, 0.75, 0.95, 0.975 )
mu.rslt = c( round(mean(mu.samp), 3), round(quantile( mu.samp, prob=p ),3), paste("(", round(mu.HDI[1],3), ", ", round(mu.HDI[2], 3), ")", sep="" ) )
sigma.rslt = c( round(mean(sigma.samp),3), round(quantile( sigma.samp, prob=p ),3), paste("(", round(sigma.HDI[1],3), ", ", round(sigma.HDI[2],3), ")", sep="" ) )
theta1.rslt = c( round(mean(theta1.samp),3), round(quantile( theta1.samp, prob=p ),3), paste("(", round(theta1.HDI[1],3), ", ", round(theta1.HDI[2],3), ")", sep="" ) )
theta2.rslt = c( round(mean(theta2.samp),3), round(quantile( theta2.samp, prob=p ),3), paste("(", round(theta2.HDI[1],3), ", ", round(theta2.HDI[2],3), ")", sep="" ) )
d.rslt = c( round(mean(d.samp),3), round(quantile( d.samp, prob=p ), 3), paste("(", round(diff.HDI[1], 3), ", ", round(diff.HDI[2], 3), ")", sep="") )
post = rbind( mu.rslt, sigma.rslt, theta1.rslt, theta2.rslt, d.rslt )
rownames(post) = c( "mu", "sigma", "theta1", "theta2", "difference" )
colnames(post) = c( "mean", "2.5%", "5%", "25%", "50%", "75%", "95%", "97.5%", "95% HDI")
post.h1 = mean( theta1.samp >= -delta & theta2.samp <= delta )
post.pred.agree = mean( abs(d.samp) <= delta )
### added: model checking
stat.new = stat.new2 = rep( NA, n.samp )
stat.obs = mean( ( d - mean(d) ) ^ 3 ) / var(d) ^ (3/2)
stat.obs2 = mean( abs(d) < delta )
for ( i in 1:n.samp ) {
d.new = sample( d.samp, size=n, replace=TRUE )
stat.new[i] = mean( ( d.new - mean(d.new) ) ^ 3 ) / var(d.new) ^ (3/2)
stat.new2[i] = mean( abs(d.new) < delta )
}
if ( input$model.check == "skewness" ) ppp = mean( stat.new > stat.obs )
if ( input$model.check == "prop.agree" ) ppp = mean( stat.new2 > stat.obs2 )
par( mfrow=c(2,3) )
hist( mu.samp, xlab=expression(mu), col="red", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ mu), cex=0.9 )
axis(1)
hist( sigma.samp, xlab=expression(sigma), col="orange", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ sigma), cex=0.9 )
axis(1)
plot( mu.samp, sigma.samp, xlab=expression(mu), ylab=expression(sigma), main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution"), cex=0.9 )
mtext( side=3, line=0, adj=0.1, text=bquote("of" ~ mu ~ "and" ~ sigma), cex=0.9 )
axis(1); axis(2)
hist( theta1.samp, xlab=expression(theta[1]), col="darkgreen", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ theta[1]), cex=0.9 )
mtext( side=3, line=0.5, adj=0.1, text=bquote("(lower bound)" ), cex=0.9 )
axis(1)
hist( theta2.samp, xlab=expression(theta[2]), col="blue", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Distribution of" ~ theta[2]), cex=0.9 )
mtext( side=3, line=0.5, adj=0.1, text=bquote("(upper bound)"), cex=0.9 )
axis(1)
hist( d.samp, xlab=expression(tilde(D)), col="purple", main="")
mtext( side=3, line=1.5, adj=0.1, text=bquote("Posterior Predictive"), cex=0.9 )
mtext( side=3, line=0, adj=0.1, text=bquote("Distribution of" ~ tilde(D)), cex=0.9 )
axis(1)
out = list( post, post.h1, post.pred.agree, ppp )
names(out) = c("post","post.h1","post.pred.agree","ppp" )
out}
# Set prior values based on inputs
# Default Vague Prior - Normal Gamma
if(input$prior=="Opt1"){
a0=0.5
b0=1e-6
mu0=0
lambda0=1e-6
# Output from BA.Bayesian function (using appropriate prior values based on user input with Normal-Gamma Prior)
out = BA.Bayesian(d=d, delta=input$delta, a0=a0, b0=b0, mu0=mu0, lambda0=lambda0, n.samp=n.samp )
}
# Manual input of prior values - Normal Gamma
if(input$prior=="Opt2"){
a0=input$a0
b0=input$b0
mu0=input$mu0
lambda0=input$lambda0
# Output from BA.Bayesian function (using appropriate prior values based on user input with Normal-Gamma Prior)
out = BA.Bayesian(d=d, delta=input$delta, a0=a0, b0=b0, mu0=mu0, lambda0=lambda0, n.samp=n.samp )
}
# Calculates prior values based on our prior specification - Normal Gamma
if(input$prior=="Opt3"){
# Takes in input values
sigma.hat=input$sigma.hat
u.sigma=input$u.sigma
l.mu=input$l.mu
u.mu=input$u.mu
# Calculates prior values based on input
temp = parameters(sigma.hat = sigma.hat, u.sigma = u.sigma, l.mu = l.mu, u.mu = u.mu)
a0 = temp[[1]]
b0 = temp[[2]]
mu0 = temp[[3]]
lambda0 = temp[[4]]
# Output from BA.Bayesian function (using appropriate prior values based on user input with Normal-Gamma Prior)
out = BA.Bayesian(d=d, delta=input$delta, a0=a0, b0=b0, mu0=mu0, lambda0=lambda0, n.samp=n.samp )
}
# Manual input of prior values - Independent Normal Gamma
if(input$prior=="Opt4"){
a0=input$a0
b0=input$b0
mu0=input$mu0
lambda0=input$lambda0
# Output from BA.Bayesian function (using appropriate prior values based on user input with Normal-Gamma Prior)
out = BA.Bayesian2(d=d, delta=input$delta, a0=a0, b0=b0, mu0=mu0, lambda0=lambda0, n.samp=n.samp )
}
# Calculates prior values based on our prior specification - Independent Normal Gamma
if(input$prior=="Opt5"){
# Takes in input values
sigma.hat=input$sigma.hat
u.sigma=input$u.sigma
l.mu=input$l.mu
u.mu=input$u.mu
# Calculates prior values based on input
temp = parameters2(sigma.hat = sigma.hat, u.sigma = u.sigma, l.mu = l.mu, u.mu = u.mu)
a0 = temp[[1]]
b0 = temp[[2]]
mu0 = temp[[3]]
lambda0 = temp[[4]]
# Output from BA.Bayesian function (using appropriate prior values based on user input with Normal-Gamma Prior)
out = BA.Bayesian2(d=d, delta=input$delta, a0=a0, b0=b0, mu0=mu0, lambda0=lambda0, n.samp=n.samp )
}
# Uses independent uniform prior - with inputs of sigma.hat, u.sigma, etc.
if(input$prior=="Opt6"){
# Takes in input values
l.sigma=input$l.sigma
u.sigma=input$u.sigma
l.mu=input$l.mu
u.mu=input$u.mu
# Output from BA.Bayesian function (using appropriate prior values based on user input with Normal-Gamma Prior)
out = BA.Bayesian3(d=d, delta=input$delta, l.sigma=l.sigma, u.sigma=u.sigma, u.mu=u.mu, l.mu=l.mu, n.samp=n.samp )
}
# Output from BA.Bayesian function (using appropriate prior values based on user input)
rslt = out$post
ci = out$post[1,]
# Pull out 95% CI for mu
l = ci[2]
u = ci[8]
# Pull out Posterior Probability of H1
posth1 = out$post.h1
# Pull out Posterior Probability Distribution
postpred = out$post.pred.agree
# Pull out PPP
ppp = out$ppp
# Outputs for text interpretations. Follows HTML formatting
output$headingtext <- renderText({
paste("<B>Based on the prior and observed data...</B>")
})
output$interpretation1 <- renderText({
paste("<B>95% Credible Interval for μ:</B> The true mean (μ) of differences between the two measurements is between", l, "and", u, "with a probability of 0.95.")
})
output$interpretation2 <- renderText({
paste("<B>Posterior Probability of H1:</B> The true limits of agreement (θ1 = μ - 1.96σ and θ2 = μ + 1.96σ) are within delta =", input$delta, "units with a probability of", round(posth1, 3),".")
})
output$interpretation3 <- renderText({
paste("<B>Posterior Probability Distribution:</B> Future differences (between the two measurement methods) will be within delta =", input$delta, "units with a probability of", round(postpred, 3), ".")
})
output$interpretation4 <- renderText({
paste("<B>Posterior Predictive P-value:</B> The posterior predictive p-value (PPP) is ", round(ppp, 3), ". A very small (i.e. close to 0) or large (i.e. close to 1) PPP indicates violation of the normality assumption.")
})
# Outputs rslt table using the DT package for R Shiny
output$table <- DT::renderDataTable({
DT::datatable(rslt)
})
})
})
|
a3d2e546525cf2f07a3ddc359033d4210ad3f223 | 93051b30dbf41dcda0f678a8d811c16ac4c9f65d | /inst/shiny-examples/ROC/app.R | 97bf0e252eeceaa337e8e5119438e029c1c471f2 | [] | no_license | cran/UncertainInterval | 483d0b3db282e31e7413eb10d35cecdb7d031435 | f2465d3b1e06f4ed2b3aa01ebad5329757fc3555 | refs/heads/master | 2021-07-18T06:29:45.578973 | 2021-03-02T15:00:02 | 2021-03-02T15:00:02 | 79,441,666 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,831 | r | app.R | library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Medical Decision Methods: Receiver Operating Characteristics (ROC) for Binormal Distributions"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
p("The point of intersection is equal to the optimal dichtomous threshold at which the sum of Sensitivity and Specificity (Se + Sp) is maximized,
and the sum of errors (FNR + FPR) is minimized. You manipulate the percentages of errors with the sliders."),
sliderInput("FNR",
"False Negative Rate: Percentage of true patients with test scores below the intersection (1 - Se):",
min = 1,
max = 50,
value = 15),
sliderInput("FPR",
"False Positive Rate: Percentage of true non-patients with test scores above the intersection (1 - Sp):",
min = 1,
max = 50,
value = 15),
checkboxInput("combineSliders", "Combine patients slider with non-patient slider", TRUE),
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot"),
h1('Demonstration of the Receiver Operating Characteristic curve (ROC)'),
h2("Hands-on Demonstration of ROC"),
p("1. Check the checkbox to combine the two sliders. Move the upper
slider to the left or right, simulating a better or
worse test. ROC curves closer to the top-left corner indicate
better performance (AUC). A curve close to the diagonal indicates
a worthless test (AUC = .5). The ROC curves shows us the relative
trade-off between true positives (benefits) and false positives (costs).
The ROC curve does not show us the actual cut-off scores."),
p("2. Uncheck the checkbox to release the lower slider. Move the
upper slider to 30, and simulate different tests with the lower slider.
The two tests have now different variances. Observe that the ROC
curve sometimes crosses the daigonal. In the left plot you can see
that this is caused by a secondary point of intersection. This is
undesirable, because the test scores around the second intersection
have a problematic and inconsistent interpretation: they no longer
allow us to say that only the higher scores indicate the targeted disease. "),
p("3. Create different tests and observe the values of AUC. AUC is
abbreviation of 'Area under the Curve'. If you were wondering 'Which curve?',
well, it is the Area under the ROC curve, and AUROCC would be a better name.
As we do not want the curve
below the diagonal, AUC varies in practice between .5
(complete overlap) and 1.0 (no overlap)."),
h2('Using the dash board'),
p('The grey panel offers a dash board where the user can create many
different tests. The tests differ in their overlap of the scores
for the two groups. The overlap is chosen with two sliders: the
upper slider sets the percentage of true patients with test scores
below the point of intersection (that is the blue dotted line in the left graph). The
lower slider sets the percentage of patients which are known to not
have the disease that have test scores above the intersection.
The true presence or absence of the
disease is known and is determined with superior means, called a "gold standard".'),
p('A checkbox allows the combinations of the two sliders. When the two sliders are combined, the variance remains equal for
the two distributions. Unchecking makes it possible to use the two
sliders seperately, allowing the two distributions to have different variance.'),
p('The total overlap is here defined as the sum of these two percentages. In this way,
a large amount of tests of varying strenths can be simulated.
The strength of the test is directly determined by the overlap of
the distributions of test scores: a test is stronger when the
overlap is smaller. For convenience, the',
span("AUC statistic", style="color:blue"), 'is presented, which is
also an estimate of the strength of the test. '),
h2("Background Information"),
p("When a test intended for comfirming the presence or absence of a
disease, the test is evaluated using two groups: a group of true patients
who have the disease and a group of patients who truly do not have
the disease (shortly called non-patients). The true status of each
patient is determined with a 'gold standard'. The left plot above shows
the two densities of the two groups. The right plot shows the ROC,
and shows the trade-off between Sensitivity and Specificity. Clearly,
Sensitivity increases while Specificity decreases and vice versa."),
h2('Two normal densities and ROC'),
p("The bi-normal distributions shown in the left plot show the densities
of the obtained simulated test scores. The test scores of the
non-patients are always standard normal distributed, with mean of 0
and standard deviation of 1: N(0, 1). The distribution of the true
patients can vary widely. The difference of the two densities
indicates for a given test score from which of the two groups of patients
the test score is most likely drawn. When the sliders are combined,
both distributions have a standard deviation of 1 and only the means
differs. The application starts with a distribution of test scores
with a mean of 2.07 and a standard deviation of 1 (N(2.07, 1))."),
h2("Trichotomization versus dichotomization"),
p("The classic techniques such as the ROC curve for evaluating tests for medical decision
make use of dichotomization of the test scores. All test scores are
considered as equally useful for a positive or
negative classification of each patient concerning the disease that
is the target of the test. Trichotomization methods say that some test
scores offer an insufficient distinction between the two groups of patients and
try to identify test scores that are insufficiently valid and are
better not used for classification. "),
h2('In conclusion'),
p('The ROC method is useful for showing the trade-off between
Sensitivity and Specificity. It is also useful to compare different
tests: a test is better when the curve is more drawn to the upperleft
corner. This is directly related to AUC: the Area Under the curve.
Furthermore, it allows us to identify short-comings of the test, as
it shows a crossing of the diagonal when the test has an undesirable
secondary point of intersection.'),
p("The ROC curve shows us the trade-off between Sensitivity and
Specificity. These statistics are however group statistics.
Sensitivity gives us the percentage patients with a correct positive
classification and concerns the patients with test scores higher
than the cut-off score. Specificity gives us the percentage patients
with a correct negative classification and concerns the patients
with test scores higher than the cut-off score. Individual patients
do not have a true test score that is equal or higher than the cut-off
score, but their true score lies around the received test score.
For such a small interval of true scores around the received test
score, the left plot is easier
to interpret: simply look at the difference between the two density
plots. A large difference indicates good discrimination for these
test scores, a small difference indicates test scores that perhaps
are better not used for classification.
"),
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
output$distPlot <- renderPlot({
# generate pdf's based on input$bins from ui.R
if (input$combineSliders) updateSliderInput(session, "FPR", value = input$FNR)
FPR = input$FPR / 100
FNR = input$FNR / 100
acc = ifelse(input$acc==1, .9, .95)
m0=0; sd0=1;
is = qnorm(1-FPR, 0, 1) # x value for 15% FP = intersection
Z = qnorm(FNR) # Z value = 15% FN; Z = (x-mean) / sd
dens = dnorm(is, 0, 1) # density at the point of intersection
sd1 = (1/(dens*sqrt(2*pi)))*exp(-.5 * Z^2)
m1 = is-Z*sd1
x <- seq(-4, 4+m1, length=200)
y0 <- dnorm(x, m0, sd0)
y1 = dnorm(x, m1, sd1)
par(mfrow=c(1,2))
plot(x, y0, type="l", col='black', xlab='Test score', ylab='Density',
main='Probability Density Functions', ylim=c(0,.5))
lines(x, y1, type="l", col='red')
lines(x=c(is,is), y=c(0,.5),col='blue', lty=3)
a = (m1-m0)/sd1
b = sd0/sd1
AUC = round(unname(pnorm(a/sqrt(1+b^2))), 4)
legend('topleft', c(paste('Non-Patients = N(', m0,', ', sd0,')', sep=''),
paste('True Patients = N(', round(m1,2),', ', round(sd1,2),')', sep='')),
text.col= c('black','red'))
legend('topright', c('intersection',paste('AUC = ', AUC, sep='')), lty=c(3, 0),col=c('blue', 'black'))
Se = 1-pnorm(x, m1, sd1)
Sp = pnorm(x, 0, 1)
plot(1-Sp, Se, type='l', col='red', xlab='1 - Specificity',
ylab='Sensitivity', main="ROC curve of 1-Specificity and Sensitivity")
lines(x=c(0,1), y=c(0,1),col='black') # diagonal
par(mfrow=c(1,1))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
8b79bc0895bc8deab3713183f8386044668725d9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/surveillance/examples/epidata_summary.Rd.R | f87faafec52fa4bf4c28796a1237b461cb4b68ea | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 399 | r | epidata_summary.Rd.R | library(surveillance)
### Name: epidata_summary
### Title: Summarizing an Epidemic
### Aliases: summary.epidata print.summary.epidata
### Keywords: methods
### ** Examples
data("fooepidata")
s <- summary(fooepidata)
s # uses the print method for summary.epidata
names(s) # components of the list 's'
# positions of the individuals
plot(s$coordinates)
# events by id
head(s$byID)
|
cd356dd595fd8cad3e555244a8d926c5eeeca095 | c90b298bd859c3b9542a65da2e8885917d28c9e6 | /man/mixord.Rd | 15a40821973bf66967089055cda420bf44577a06 | [] | no_license | cran/mixor | f16b5b2a60470fed9f3a80390700e35b037cde56 | 2a9a67672767c6c2c38173711a600286e9d3e5ee | refs/heads/master | 2020-04-14T19:06:48.363567 | 2018-06-14T20:17:56 | 2018-06-14T20:17:56 | 27,953,583 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 407 | rd | mixord.Rd | \name{mixord}
\alias{mixord}
\title{
Old Function Name Replaced with mixor
}
\description{
The \code{mixord} function has been deprecated. Please use the \code{mixor} function for fitting longitudinal/clustered ordinal response models
}
\author{
Kellie J. Archer, Donald Hedeker, Rachel Nordgren, Robert D. Gibbons
}
\seealso{
See Also as \code{\link{mixor}}, \code{\link{summary.mixor}}
}
|
f4df3a063fa126057e33255cbc43fd11a27e63bb | 4edc2d9d109559d620cbaf3d1423fd5eabc19142 | /R/table.R | 175d8e39f6c561b3755655dfb1e3d9d61529e5c6 | [] | no_license | bedatadriven/DBI | 39299edcf6fc22e96c67a5a1d006a2e3e68c18d3 | 3caf86d34a7c1bb85ad7b2f386332c19b54a5191 | refs/heads/master | 2021-01-11T02:55:22.843356 | 2016-10-18T09:47:00 | 2016-10-18T09:47:00 | 70,905,997 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 854 | r | table.R | #' @rdname Table
setClass("Table", slots = list(name = "character"))
#' Refer to a table nested in a hierarchy (e.g. within a schema)
#'
#' @param ... Components of the hierarchy, e.g. `schema`, `table`,
#' or `cluster`, `catalog`, `schema`, `table`.
#' For more on these concepts, see
#' \url{http://stackoverflow.com/questions/7022755/}
Table <- function(...) {
new("Table", name = c(...))
}
#' @rdname hidden_aliases
#' @param conn,x Connection and Table used when escaping.
#' @export
setMethod("dbQuoteIdentifier", c("DBIConnection", "Table"),
function(conn, x, ...) {
SQL(paste0(dbQuoteIdentifier(conn, x@name), collapse = "."))
}
)
#' @rdname hidden_aliases
#' @param object Table object to print
#' @export
setMethod("show", "Table", function(object) {
cat("<Table> ", paste0(object@name, collapse = "."), "\n", sep = "")
})
|
28d8b5c3bd142ef2a759e61eb616004af424a1ed | 7a0cc1a29da34a761327a45f506c4b097cd33bd8 | /man/CommonFixScribe.Rd | b10fc44d06b79abcd829643bc4fd2dafcf20a0db | [
"CC0-1.0"
] | permissive | USFWS/AKaerial | 3c4d2ce91e5fac465a38077406716dd94c587fc8 | 407ccc5bf415d8d5ed0d533a5148693926306d27 | refs/heads/master | 2023-07-19T22:04:12.470935 | 2023-07-14T19:50:31 | 2023-07-14T19:50:31 | 254,190,750 | 0 | 1 | CC0-1.0 | 2023-07-14T19:50:33 | 2020-04-08T20:14:41 | R | UTF-8 | R | false | true | 1,220 | rd | CommonFixScribe.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/greenlightscribe.R
\name{CommonFixScribe}
\alias{CommonFixScribe}
\title{Apply "yellow light" changes to a SCRIBE data set}
\usage{
CommonFixScribe(data, fix, area)
}
\arguments{
\item{data}{The data frame to be fixed.}
\item{fix}{The character string vector of the names of the fixes to be applied.}
\item{area}{The project area designation.}
}
\value{
data frame of fixed columns
}
\description{
CommonFixScribe is used in conjunction with GreenLightScribe to apply "no-loss" changes to a data set
}
\details{
CommonfixScribe will take "yellow light" issues and apply a known treatment to fix offending issues. The list of fixes includes \itemize{
\item Seat - changes lower to uppercase, flips the characters (FR, FL, RL to RF, LF, LR)
\item Observer - changes lower to uppercase
\item Swan - breaks up a nest-only observation into 2 observations
\item Grouping - changes open 2 or open 1 SWAN to pair or single, changes SWANN to open 1
\item Species - changes incorrect or outdated species codes to current ones}
}
\references{
\url{https://github.com/USFWS/AKaerial}
}
\author{
Charles Frost, \email{charles_frost@fws.gov}
}
|
cf7a7ac68c87c1c00da75b07be5972dd78b01d95 | d5115ed34f56cec739b2dedcbd525156e1b3021b | /cachematrix.R | a45ea3e1d38d365b3ee19be78de15a80aed1312b | [] | no_license | KateTimms/ProgrammingAssignment2 | bfe1663cd9eec35103f8bed68fcdf08dfc217b83 | 1c998b8677d69914b831657d5063dedc0d6a9733 | refs/heads/master | 2021-01-20T15:42:22.414519 | 2017-06-28T20:21:37 | 2017-06-28T20:21:37 | 95,686,833 | 0 | 0 | null | 2017-06-28T15:54:30 | 2017-06-28T15:54:30 | null | UTF-8 | R | false | false | 1,770 | r | cachematrix.R | ## Overall, the functions create a place in memory where funcitons and the
## computer inverse matrix are stored. This allows them to be called in future
## and does not take up memory re-calculting them if the same matrix inversion
## is required again. Otherwise, if it is a new matrix, the new matrix inversion
## is stored in the environment of makeCacheMatrix for use in the future.
## mackeCacheMatrix creates the environment for cacheSolve to call
## functions/objects from, and to retrieve the inverse matrix from,
## if the matix specified is the same as when the function was last run.
## If a new matrix is specified, then the cacheSolve function solves the
## matrix inverse and stores it in the environment of makeCacheMatrix.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmi <- function(solve) m <<- solve
getmi <- function() m
list(set = set, get = get, setmi = setmi, getmi = getmi)
}
## cacheSolve first assigns m to the value of x in getmi in the makeCacheMatrix
## environment. Then, if x is the same as the matrix specified previously, m
## will have not been specified as NULL in makeCacheMatrix, so the if function
## first tells us what its going to do, then retrieves the inverse matrix from memory.
## If a new matrix has been specified, its inverse matrix is solved and stored to m in
## the makeCacheMean environment.
cacheSolve <- function(x, ...) {
m <- x$getmi()
if(!is.null(m)) {
message("retrieving data from cache")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmi(m)
m
}
|
cf1ef4e84a1b8456c4494f6917fe78657df9f47b | 7dd51c0c6137f8a32a6e2f265874acfcb0c0b5f8 | /demean/testtime/seperateEMDSC/subdm/code/02_test.R | 7c03e8af5661dac7db10abe2c2ec0d948e16b1bd | [] | no_license | Winnie09/GBM_myeloid | 7d3c657f9ec431da43b026570e5684b552dedcee | a931f18556b509073e5592e13b93b8cf7e32636d | refs/heads/master | 2023-02-04T04:29:50.274329 | 2020-12-20T21:16:16 | 2020-12-20T21:16:16 | 268,260,236 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,403 | r | 02_test.R | # ------------
# prepare data
# ------------
trajectory <- as.character(commandArgs(trailingOnly = TRUE)[[1]][1])
# trajectory = 'EMDSC_MAC1'
print(trajectory)
library(parallel)
library(splines)
source('/home-4/whou10@jhu.edu/scratch/Wenpin/trajectory_variability/function/01_function.R')
rdir <- paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/GBM_myeloid/demean/testtime/subdm/result/', trajectory, '/res/')
datadir <- paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/GBM_myeloid/demean/testtime/subdm/result/', trajectory, '/data/')
dir.create(rdir, recursive = TRUE)
pseudotime <- readRDS(paste0(datadir, 'pseudotime.rds'))
expr <- readRDS(paste0(datadir, 'log2cpm.rds'))
cellanno <- readRDS(paste0(datadir, 'cellanno.rds'))
expr <- expr[rowMeans(expr > 0) > 0.01, ]
design <- matrix(1, nrow = length(unique(cellanno[,2])))
rownames(design) <- unique(cellanno[,2])
colnames(design) <- 'intercept'
## demean
expr.demean <- lapply(unique(cellanno[,2]), function(s){
tmp <- expr[, cellanno[cellanno[,2] == s, 1]]
tmp2 <- tmp- rowMeans(tmp)
})
expr.demean <- do.call(cbind, expr.demean)
# -----
# test
# -----
system.time({
Res <- testpt(expr = expr.demean, cellanno = cellanno, pseudotime = pseudotime, design=design, permuiter=100, EMmaxiter=100, EMitercutoff=1, verbose=F, ncores=12, type='Time', fit.resolution = 1000, test.pattern = 'overall')
saveRDS(Res, paste0(rdir, 'ptest_res.rds'))
})
|
3399633c192aa2c9804d4d529776d9c9830f9a78 | 6bb301790c4255ce00bab6532b22fdb020a4ece9 | /plot4.R | 174c0b213cdcac1e9fbf32055dbff1d97add0d08 | [] | no_license | CVava/ExData_Plotting1 | 14acbce513b164f70d661429d9c99778f789f519 | 5f0e3b1abc0c78a655b3fd190417cd5706cc873f | refs/heads/master | 2020-12-11T05:23:27.000547 | 2015-05-06T21:56:33 | 2015-05-06T21:56:33 | 35,120,343 | 0 | 0 | null | 2015-05-05T19:45:33 | 2015-05-05T19:45:33 | null | UTF-8 | R | false | false | 3,481 | r | plot4.R | # plot4.R
library(chron)
# read data
consumption <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
# format Date column as dates
consumption$Date <- as.Date(consumption$Date, "%d/%m/%Y")
# select only entries from 2007-02-01 and 2007-02-02
consum <- consumption[consumption$Date == "2007-02-01" | consumption$Date == "2007-02-02", ]
# add first entry beyond the usefull range to act as end label
consum <- rbind(consum, consumption[consumption$Date == "2007-02-03" & consumption$Time == "00:00:00", ])
# clean some memory
rm(consumption)
# format Time column as time
consum$Time <- times(consum$Time) # strptime(consum$Time, format = "%H:%M:%S")
# change data format to numeric
consum$Sub_metering_1 <- as.numeric(as.character(consum$Sub_metering_1))
consum$Sub_metering_2 <- as.numeric(as.character(consum$Sub_metering_2))
consum$Sub_metering_3 <- as.numeric(as.character(consum$Sub_metering_3))
consum$Global_active_power <- as.numeric(as.character(consum$Global_active_power))
consum$Global_reactive_power <- as.numeric(as.character(consum$Global_reactive_power))
consum$Voltage <- as.numeric(as.character(consum$Voltage))
# initiate graphing device
png("plot4.png", width=480, height=480, res=90)
par(mfrow=c(2, 2)) # 2x2 graphs
par(cex = 0.7) # smaller font fit better
### 1. plot histogram ###
# no x axil ticks and labels
plot(consum$Global_active_power, type= 'l',
ylab="Global Active Power", xlab="", xaxt = "n")
# add x axis ticks and labels to match the model
axis(1, at = c(1, length(consum$Date)/2, length(consum$Date)),
labels = c(format(consum$Date[1], "%a"),
format(consum$Date[length(consum$Date)/2+1], "%a"),
format(consum$Date[length(consum$Date)], "%a")))
### 2. plot Voltage ###
# no x axil ticks and labels
plot(consum$Voltage, type= 'l',
ylab="Voltage", xlab="datetime", xaxt = "n")
# add x axis ticks and labels to match the model
axis(1, at = c(1, length(consum$Date)/2, length(consum$Date)),
labels = c(format(consum$Date[1], "%a"),
format(consum$Date[length(consum$Date)/2+1], "%a"),
format(consum$Date[length(consum$Date)], "%a")))
### 3. Sub_metering_x ###
# no x axis ticks and labels
with(consum, plot(consum$Sub_metering_1, ylab="Energy sub metering",
xlab="", xaxt = "n", type = "l"), type = "n")
with(consum, points(consum$Sub_metering_2, type= 'l', col = "red",
xlab="", xaxt = "n"))
with(consum, points(consum$Sub_metering_3, type= 'l', col = "blue",
xlab="", xaxt = "n"))
# add legend
legend("topright", pch="-", col=c("black", "red", "blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# add x axis ticks and labels to match the model
axis(1, at = c(1, length(consum$Date)/2, length(consum$Date)),
labels = c(format(consum$Date[1], "%a"),
format(consum$Date[length(consum$Date)/2+1], "%a"),
format(consum$Date[length(consum$Date)], "%a")))
### 4. Global_reactive_power ###
# no x axil ticks and labels
plot(consum$Global_reactive_power, type= 'l',
ylab="Global_reactive_power", xlab="datetime", xaxt = "n")
# add x axis ticks and labels to match the model
axis(1, at = c(1, length(consum$Date)/2, length(consum$Date)),
labels = c(format(consum$Date[1], "%a"),
format(consum$Date[length(consum$Date)/2+1], "%a"),
format(consum$Date[length(consum$Date)], "%a")))
# close the graphing device
dev.off()
|
af56c6ed1ae18519f4a34b0b4ead2ff2f8630c5e | ab7566d54987032fb49a6e65e6918624f3acac30 | /lib/generic-nbr.R | f191a6e043016c6b9539e35d2ae223d5699ecb2e | [] | no_license | aigujin/predict-bl | fa938c47449edd86d033ab26f458d6d7994560ba | 13671b254b0278e002433f460fa08b8efe0fbca9 | refs/heads/master | 2021-01-17T13:06:47.646486 | 2015-07-07T14:00:27 | 2015-07-07T14:00:27 | 30,874,464 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,899 | r | generic-nbr.R | ### Lable Ranking prediciton function----
pred.cont <- function(rank.data, model, test.data) {
prod <- -log(model$priors) + sapply(1:nrow(rank.data), function(r) {
cond <- sapply(1:length(test.data), function(i) {
dnorm(test.data[i], mean = model$cond$mean[r, i],
sd = model$cond$sdev[r, i])
})
# cond[is.infinite(cond)]<-NA cond<-round(cond,5)
# cond[which(cond==0)]<-NA cond
sum(-log(cond), na.rm = T)
})
rank.data[which.min(prod), ]
# prod <-model$priors*
# sapply(1:nrow(rank.data), function(r) {
# cond <- sapply(1:length(test.data), function(i) {
# pnorm(test.data[i], mean = model$cond$mean[r, i],
# sd = model$cond$sdev[r, i])
# })
# cond[is.infinite(cond)]<-NA
# cond<-round(cond,5)
# cond[which(cond==0)]<-NA
# cond
# prod(cond, na.rm = T)
# })
# rank.data[which.max(prod), ]
}
### Function to create NB model----
lr.model <- function(correlations, data, weights) {
priors <- sapply(1:ncol(correlations), function(r) {
weighted.mean(correlations[r, ], weights, na.rm = T)
})
attributes <- 1:ncol(data)
mu <- weights * t(sapply(1:nrow(correlations), function(r) {
sapply(attributes, function(x) {
sum(data[, x] * correlations[r, ], na.rm = T)/sum(correlations[r,
], na.rm = T)
})
}))
sigma <- weights * t(sapply(1:nrow(correlations), function(r) {
sapply(attributes, function(x) {
sqrt(sum(correlations[r, ] * (data[, x] - mu[r, x])^2,
na.rm = T)/sum(correlations[r, ], na.rm = T))
})
}))
conditionals <- list(mean = mu, sdev = sigma)
list(priors = priors, cond = conditionals)
}
evaluation.simple <- function(tr, pr, method = "spearman", use = "p") {
cor(tr, pr, method = method, use = use)
}
### Function wrap model and predictions-----
nbr.generic <- function(rank.data, data, test.data, weights) {
correlations <- rescale(cor(t(rank.data), use = "p"),from=c(-1,1))
model <- lr.model(correlations, data, weights)
pred.cont(rank.data, model, as.numeric(test.data))
}
#### Discriminative Power functions: NB model only + discrim.
#### power-----
nbr.generic.model <- function(rank.data, data) {
correlations <- (cor(t(rank.data), use = "pairwise.complete.obs") +
1)/2
attributes <- 1:ncol(data)
mu <- t(sapply(1:nrow(correlations), function(r) {
sapply(attributes, function(x) {
sum(data[, x] * correlations[r, ], na.rm = T)/sum(correlations[r,
], na.rm = T)
})
}))
sigma <- t(sapply(1:nrow(correlations), function(r) {
sapply(attributes, function(x) {
sqrt(sum(correlations[r, ] * (data[, x] - mu[r, x])^2,
na.rm = T)/sum(correlations[r, ], na.rm = T))
})
}))
list(priors = correlations, mean = mu, sdev = sigma)
}
bayes.model <- function(rank.data, data = data.sym.a, n) {
correlations <- (cor(t(rank.data), use = "pairwise.complete.obs") +
1)/2
lapply(4:nrow(rank.data), function(i) {
### Weights: these are not case when give more weight to the
### last period rank (0...1), but the case that last period
### rank does not change a lot, i.e. 1....0
weights <- n^((1:(i - 2))/(i - 2) - 1)
model <- lr.model(correlations[2:(i - 1), 2:(i - 1),
drop = F], data[1:(i - 2), ], weights)
discr.model(rank.data[2:(i - 1), ], model, data[i - 1,
])
})
}
discr.model <- function(sel.rank.data, model, test.data) {
base.prob <- -log(model$priors)
nomogr <- sapply(1:nrow(sel.rank.data), function(r) {
cond <- sapply(1:length(test.data), function(i) {
pnorm(test.data[i], mean = model$cond$mean[r, i],
sd = model$cond$sdev[r, i], lower.tail = F)
})
cond[is.infinite(cond)] <- NA
cond <- round(cond, 5)
cond[which(cond == 0)] <- NA
-log(cond)
})
list(base = base.prob, nom = nomogr)
}
### Implementing generic funciton for growing-window-----
rankings.time.corrected.gw.cont <- function(rank.data, data,
n) {
# correlations<-(
# cor(t(rank.data),use='pairwise.complete.obs')+1)/2
sapply(4:nrow(rank.data), function(i) {
weights <- n^((1:(i - 2))/(i - 2) - 1)
predict.rank <- nbr.generic(rank.data[2:(i - 1), ], data[1:(i -
2), ], as.numeric(data[i - 1, ]), weights)
if (length(predict.rank) == 0) {
rep(NA, length(rank.data[i - 1, ]))
} else {
predict.rank
}
})
# na.m <- matrix(NA, ncol = 2, nrow = length(rank.data[1, ]))
# cbind(na.m, pred.rank.m)
}
rankings.time.corrected.gw.cont <- function(rank.data, data,n) {
# correlations<-(
# cor(t(rank.data),use='pairwise.complete.obs')+1)/2
sapply(3:nrow(rank.data), function(i) {
weights <- n^((1:(i - 1))/(i - 1) - 1)
predict.rank <- nbr.generic(rank.data[1:(i - 1), ], data[1:(i-1), ], as.numeric(data[i, ]), weights)
if (length(predict.rank) == 0) {
rep(NA, length(rank.data[i - 1, ]))
} else {
predict.rank
}
})
# na.m <- matrix(NA, ncol = 2, nrow = length(rank.data[1, ]))
# cbind(na.m, pred.rank.m)
}
#### n-fold cross validation----
rankings.n.fold.cv <- function(rank.data, data, fold, my.seed = 1) {
set.seed <- my.seed
folds <- rep(1:fold, length.out = nrow(rank.data))[order(runif(nrow(rank.data)))]
mclapply(1:fold, function(i) {
weights <- rep(1, nrow(rank.data[folds != i, ]))
rows <- which(folds == i)
predict.rank <- t(sapply(rows, function(f) {
nbr.generic(rank.data[folds != i, ], data[folds !=
i, ], as.numeric(data[f, ]), weights)
}))
rownames(predict.rank) <- rows
predict.rank
}, mc.cores = getOption("mc.cores", 2L))
}
# library(abind) cont.data <-
# read.csv('/Users/aiguzhinov/Documents/Dropbox/workspace/Naive.Bayes.separate.functions/cont.data.na.csv',
# header = T, sep = ',', row.names = 1) rank.data <-
# cont.data[, c(5:7)] data <- cont.data[, c(1:4)] #
# ###Prediciton exmaple---- i = 14 n = 100 weights <-
# n^((1:i)/i - 1)
# nbr.generic(rank.data=ranking[1:i,],data=features[1:i,],test.data=features[i+1,],weights=rep(1,i))
# rankings.n.fold.cv(ranking,features,10) example.gw <-
# rankings.time.corrected.gw.cont(ranking,features,5)
# example.array <-
# abind(true=ranking[4:14,],pred=t(example.gw),along=3)
# accuracy <-
# apply(example.array,1,function(s){apply(s,2,evaluation.simple,s[,1])})
# apply(accuracy,1,mean)
|
053ccf58357694dd0ca94d1959521e21e1fd6eda | a0e3039e0cb2ce3ccbdb1c8df6b3b5550556448d | /cachematrix.R | 448bb59535070aef09fdeaba7425284df8d11bb1 | [] | no_license | hanumansetty/ProgrammingAssignment2 | f1f5b4057cbccf3d77e83cee4251dcee5f38071d | aaa423debe89755441607a514bd147ee9d88ab26 | refs/heads/master | 2020-12-29T03:19:42.379192 | 2014-05-25T22:40:01 | 2014-05-25T22:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,266 | r | cachematrix.R | ## The following set of functions work for a square matrix
## The functions create and cache the inverse of a square input matrix
## When cache of input matrix exists it is returned from cache otherwise calculated
## This function creates a special "matrix" object that can cache
## its inverse
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse
inv <- NULL
## Cache the matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
## cache the inverse of matrix
setinv <- function(solve) inv <<- solve
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
inv <- x$getinv()
## Check if inverse exists is in cache
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
## Return a matrix that is the inverse of 'x' i.e., the input
inv
}
|
a19ed792f330d6c521a92072c1441d48a5fe7444 | 316d809769f33ad54983336ee713c2ebbae55d2c | /twitterRead.R | 1901b4018973e752b2819648732e392d8f7b10fa | [] | no_license | spjenk/RDataScience | 5b8c1c6077f18a8deb02265a72a68af99cb4bdc2 | 2259ebb7239b8a2e99d17587c0b70b4397f010ca | refs/heads/master | 2021-01-13T02:28:06.476606 | 2015-08-13T05:21:39 | 2015-08-13T05:21:39 | 40,639,561 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 416 | r | twitterRead.R | library(httr)
library(base64enc)
apiKey <- ""
apiSecret <- ""
accessToken <- ""
accessTokenSecret <- ""
myapp <- oauth_app("SPJENKCourseraExample",key=apiKey,secret=apiSecret)
sig <- sign_oauth1.0(myapp,token=accessToken,token_secret = accessTokenSecret)
homeTL <- GET("https://api.twitter.com/1.1/search/tweets.json?q=%23datascience", sig)
json1 <- content(homeTL)
json2 <- jsonlite::fromJSON(toJSON(json1))
json2 |
18c28d0fd76563abf237800f55aec858760e8a23 | 79aea49c2a42de69b275b1d9b10659c1ce4f017d | /man/clusterVis_region_ssom.Rd | afd0900100f38c50a1cb78161bcf6a53a123f638 | [] | no_license | iamciera/somtools | 95b0483d07aa856a4de7391362825007d171cc00 | dc7a53603c323c3a9cac7513cf57cc4cc8fbb5c2 | refs/heads/master | 2021-05-03T12:44:52.576703 | 2018-02-06T18:20:10 | 2018-02-06T18:20:10 | 120,500,613 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 321 | rd | clusterVis_region_ssom.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/superSOM_vis.R
\name{clusterVis_region_ssom}
\alias{clusterVis_region_ssom}
\title{Cluster Visualization by region for superSOM}
\usage{
clusterVis_region_ssom(clustNum)
}
\description{
Seperated by regions, tiussue and colored by genotype.
}
|
b5d7877063842464eba347ac3a43a3483d345f44 | 097d80517e10b7389ad91de15c4dfe76bdf444e4 | /Qessay.R | 7544833eccf89a966fd111181fb10a78c7513e66 | [] | no_license | mandydog/myR | 50d9b7ae06a98338aed32c667e223853707fa498 | a4f7920c628c0724cce4eedbc484cc323833e0e1 | refs/heads/master | 2021-01-25T06:00:53.531273 | 2014-09-07T01:28:19 | 2014-09-07T01:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 813 | r | Qessay.R | dataframe <-read.csv("borisdata.csv")
############################
### Quetelet Indices ###
############################
########################################
rowsums <- c(sum(dataframe[1, 1:4]), sum(dataframe[2, 1:4]),
sum(dataframe[3, 1:4]), sum(dataframe[4, 1:4]))
colsums <- c( sum(dataframe$Low), sum(dataframe$Low.Medium),
sum(dataframe$High.Medium), sum(dataframe$High) )
Grandtotal <- sum(dataframe)
Row1Q <- (dataframe[1,] / rowsums[1]) / (colsums /Grandtotal) -1
Row2Q <- (dataframe[2,] / rowsums[2]) / (colsums /Grandtotal) -1
Row3Q <- (dataframe[3,] / rowsums[3]) / (colsums /Grandtotal) -1
Row4Q <- (dataframe[4,] / rowsums[4]) / (colsums /Grandtotal) -1
Matrix <- rbind (Row1Q, Row2Q, Row3Q, Row4Q)
Qmatrix <- round(Matrix, digits=3)
print (Qmatrix)
|
86855b861d8d0583fe3b596c1fafef89ff215807 | 0c44a1dc61eaeb642b4a64715fa2440cde85816d | /man/le_filter.Rd | 398646f0714a9e62d8db55ac1c72019b373c18c0 | [
"MIT"
] | permissive | StratoDem/strato-query | 7caeb476d9e8b604b428357649d4e8d671921e62 | 392413cd821c05b8db0e385a7f5ad629b5b04759 | refs/heads/master | 2023-03-10T11:23:25.138573 | 2023-02-28T20:18:06 | 2023-02-28T20:18:06 | 163,192,887 | 1 | 1 | MIT | 2023-02-28T20:18:08 | 2018-12-26T15:22:05 | Python | UTF-8 | R | false | true | 532 | rd | le_filter.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filters.R
\name{le_filter}
\alias{le_filter}
\title{Helper function to create a "less than or equal to" filter}
\usage{
le_filter(filter_variable, filter_value)
}
\arguments{
\item{filter_variable}{Variable filtered on}
\item{filter_value}{Value used for filter}
}
\value{
filter structure
}
\description{
Helper function to create a "less than or equal to" filter
}
\examples{
le_filter(filter_variable = 'year', filter_value = 2018)
}
\keyword{query}
|
421c1d3d1b683ddf79af021fe3b598e064108d9c | eb7cdcffad71acd0854a79ac8f414086b1d54204 | /practica 15 de marzo.R | da10b355d115f3496bae479bfd9ce9998dfa1e0b | [] | no_license | DiegoBarreda/Programaci-n_Actuarial_III | c8d18d46de2c192d0f5120eda4e89614d477b0cb | 57d8522071a963384b4e0f8d527e0412d3ee995c | refs/heads/master | 2021-01-21T04:35:20.154073 | 2016-06-27T03:48:32 | 2016-06-27T03:48:32 | 50,951,039 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 460 | r | practica 15 de marzo.R |
k <- 1
secuencia <- vector("numeric",0)
caminata <- function(puntoInicial=100,pasos=1000){
z <- puntoInicial
for(i in 1:pasos){
length(secuencia) <- length(secuencia) + 1
secuencia[k] <- z
moneda <- rbinom(1,1,.5)
if (moneda==1){
z<- z+1
secuencia[k] <- z
} else {
z<-z-1
secuencia[k] <- z
}
k <- k+1
}
plot(secuencia,type = "l")
}
|
441c0b89e4ca5332bed99fc6d4297e43a3c2fca5 | c18b81fae25f537dec0ff3423ac93f4ba17d046b | /effects_of_fertilizer_rate/server.R | c0d40a36787c9969abf2c796dff2a2e921374cf9 | [
"MIT"
] | permissive | jarad/shiny-server | aaea2b4ea046bafa3389645ff6e7d50a1f155fe3 | 01364631df63b3086fa362d25b9e3d1698aadda6 | refs/heads/master | 2021-08-07T00:10:50.373318 | 2021-07-28T14:05:25 | 2021-07-28T14:05:25 | 242,822,356 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,697 | r | server.R | # Load packages
library(tidyverse)
library(gridExtra)
source("theme_map.R")
source("emissions.R")
source("yield.R")
ymPlotDF <- read.csv(file.path("basswood_2020.csv")) %>%
dplyr::mutate(wheatplot=yieldMgHaMean*0.9)%>%
select(long, lat, group, wheatplot)%>%
na.omit
g_yield<-ggplot(ymPlotDF) + # Omits 95 pixels without information
geom_polygon(aes(
x = long, # Longitudes in the horizontal axis
y = lat, # Latitude in the vertical axis
group = group, # More than one data frame row belong to the same poly
fill = wheatplot*yield_proportion(300) # Fill the polygon with the yield mean
)) +
scale_fill_distiller( # Palette from https://colorbrewer2.org/#type=sequential&scheme=Greens&n=3
palette = "Greens", # 'cause chlorophyll
direction = 1, # Darker is higher
limits = c(0, 15) # Set color bar minimum at zero, max TBD by ggplot
) +
labs(
title = "Current practice (300 Units of Nitrogen Fertilizer (kg ha^-1)",
subtitle = "Wheat Yield",
fill = expression("Yield in" ~ MgHa^-1 ~ "Darker is higher")
) +
theme_map() +
theme( # Play with background color to decide if gray helps with contrast
panel.background = element_rect(fill = "gray80")
)
# Define server function
server <- function(input, output) {
#ScatterPlot
g_yield_modified<- ggplot(ymPlotDF) + # Omits 95 pixels without information
geom_polygon(aes(
x = long, # Longitudes in the horizontal axis
y = lat, # Latitude in the vertical axis
group = group, # More than one data frame row belong to the same poly
fill = wheatplot*yield_proportion(input$x) # Fill the polygon with the yield mean
)) +
scale_fill_distiller( # Palette from https://colorbrewer2.org/#type=sequential&scheme=Greens&n=3
palette = "Greens", # 'cause chlorophyll
direction = 1, # Darker is higher
limits = c(0, 15) # Set color bar minimum at zero, max TBD by ggplot
) +
labs(
title = "Farmer selected amount of N Fertilizer",
subtitle = "Wheat Yield",
fill = expression("Yield in" ~ MgHa^-1 ~ "Darker is higher")
) +
theme_map() +
theme( # Play with background color to decide if gray helps with contrast
panel.background = element_rect(fill = "gray80")
)
output$lineplot <- renderPlot({
grid.arrange(g_yield, g_yield_modified, ncol=2)
})
# Pull in description of trend
output$desc <- renderText({
{
e <- emissions(input$x)
paste(round(e), "predicted emissions")
}
})
}
|
f0eacaca726e3b05cad52b16a7078b72a20b6221 | 5a1dd4bdf66323ab628cb70ae7bf9782bed8f0bf | /test.R | 536f14acac247519bb481bdf239dae92af4c046b | [] | no_license | devcao/High-dimensional-inference | 1ad3644eb8a0b4c82f477bb37ed6d4e0cb11ae19 | dea9bcdf7e6fc82a3ccd1b0cbcb464d6a984bb94 | refs/heads/master | 2020-03-28T03:08:17.325617 | 2019-03-04T17:52:31 | 2019-03-04T17:52:31 | 147,622,754 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,357 | r | test.R | ##################################################################################
# Includes all Pathwise test statistics, including end point variant and variate norm
##################################################################################
#######
##### loading required packages, functions
#source("~/hello/hdi_path/pathwise_require.R")
#source("~/hello/hdi_path/pathwise_ts.R")
######
source("~/hdi_path/bin/pathwise_require.R")
source("~/hdi_path/bin/pathwise_ts.R")
Path.Resample = function(X, Y, which.covariate, betaNull, multiTest, B = 500, parallel = FALSE, exact = TRUE, beta.init = 'adaptive', beta.true = beta, ...){
# Bootstrap the null distribution of Path-based statistic, and return reject or not
#
# Args:
# X, Y, which.covariate : feed in to Path-based TS function
# B : # of bootstrap replications
# parallel : run in parallel or nor
# exact : use exact TS or approx TS
# beta.true : for simulation only, the true value of beta would be used as initial estimates.
#
# Return:
# Reject or not under alpha = 0.2,0.1,0.05,0.01
# p.values of the test
n = nrow(X)
p = ncol(X)
rej = matrix(0,len(which.covariate),4) # 1st : which cov, 2nd: na, 3rd: which alpha, 0.2,0.1,0.05,0.01
pval = numeric()
TS = Path.TS(exact = exact, X = X, Y = Y, which.covariate = which.covariate, betaNull = betaNull, multiTest = multiTest,...)
if(p >= n){ # high dimension we can try...
if(beta.init == "adaptive"){
bhat = adalasso(X = X, y = Y, k = 10, use.Gram = FALSE,both = TRUE, intercept = FALSE)$coefficients.adalasso
}else if (beta.init == "de-sparse"){
bhat = as.vector(lasso.proj(X, Y, standardize = TRUE, parallel = TRUE, ncores = 40)$bhat)
}else if (beta.init == "MC+"){
bhat = coef(cv.ncvreg(X = X, y = Y, penalty = "MCP",family = "gaussian", nfold= 10))[-1]
}else if (beta.init == "SCAD"){
bhat = coef(cv.ncvreg(X = X, y = Y, penalty = "SCAD",family = "gaussian", nfold= 10))[-1]
}else if (beta.init == "Truth"){
bhat = beta_true
}
residual = Y - X%*%bhat
}else{ # low dimenstion just use LSE
bhat = ginv(t(X)%*%X)%*%t(X)%*%Y
residual = Y - X%*%bhat
}
#TS_null = matrix(NA, nrow = B, ncol = len(which.covariate))
################### HERE WE GO ! ! ! ###########################################
###################### This part could be parallelized ##################
count = 1
for(wc_cov in which.covariate){
b.Null = bhat
#b.Null[wc_cov] = 0
if(multiTest) {
to.which.covariate = list(wc_cov)
to.betaNull = list(betaNull[[count]])
b.Null[wc_cov] = betaNull[[count]]
}else{
to.which.covariate = wc_cov
to.betaNull = betaNull[count]
b.Null[wc_cov] = betaNull[count]
} # then run multiple testing
TS_null = Path.Resample.Process(X = X, Y = Y, multiTest = multiTest, residual = residual, b.Null = b.Null, betaNull = to.betaNull,
beta.index = to.which.covariate, B = B, exact = exact, parallel = parallel, ...)
#rej[count,1] = TS[count] > quantile(TS_null,0.8)
#rej[count,2] = TS[count] > quantile(TS_null,0.9)
#rej[count,3] = TS[count] > quantile(TS_null,0.95)
#rej[count,4] = TS[count] > quantile(TS_null,0.99)
#pval[count] = mean(TS_null > TS[count])
count = count + 1
}
##########################################################
return(list(rej = rej, pval = pval, TS_null = TS_null))
}
#Path.TS.Para = function(mat, list){
# Calculate PATH statistic exactly, could run this in parallel
# n = nrow(mat)
# p = ncol(mat) - 1
# X = mat[,1:p]
# Y = mat[,p+1]
# return( do.call(ExactPath.TS, c(X = X,Y = Y, list)) )
#}
Path.Resample.Process = function(X, Y, multiTest, residual, b.Null, beta.index, betaNull, B = 500, exact, parallel = FALSE, ...){
# Bootstrap the null distribution of Path-based statistic of coef beta.index
#
# Args:
# X, Y, which.covariate : feed in to Path-based TS function
# B : # of bootstrap replications
# parallel : run in parallel or nor
# exact : use exact TS or approx TS
# beta.index : which coef
#
# Return:
# A vector of the bootstrapped null
n = nrow(X)
p = ncol(X)
TS_null = numeric()
if(parallel){ # running in parallel
mat = list()
for(bs in 1:B){
ind = sample(1:n,replace = TRUE)
boot_residual = residual[ind]
#b_null = bhat
#b_null[beta.index] = 0
Y = X %*% b.Null + boot_residual
mat[[bs]] = cbind(X,Y)
}
args = list(...)
Args = c(which.covariate = beta.index, betaNull = betaNull, exact = exact, multiTest = multiTest, args)
# On a cluster, just use
no_cores <- detectCores()
cat("n_cores detected:", no_cores, "\n")
# Initiate cluster
#cl <- makeCluster(no_cores)
cl <- makeCluster(no_cores, type = "FORK")
# load special packages
clusterEvalQ(cl, .libPaths("~/R"))
clusterEvalQ(cl, library(glmnet))
clusterEvalQ(cl, library(lars))
clusterEvalQ(cl, library(MASS))
clusterEvalQ(cl,library(pryr))
clusterEvalQ(cl,library(plus))
clusterEvalQ(cl,source("~/hdi_path/bin/pathwise_ts.R"))
#clusterExport(cl, varlist = c("beta.index", "exact", "betaNull", "multiTest",...), envir = environment())
#clusterExport(cl, varlist = 'Args', envir = environment())
re_list = parLapply(cl, mat, Path.TS.Para, exact = exact, multiTest = multiTest, which.covariate = beta.index, betaNull = betaNull, ...)
#re_list = parLapply(cl, mat, Path.TS.Para, list = Args)
######## in case run out of MEMORY
print("Cluster MEM:")
print(mem_used())
########
stopCluster(cl) # END parallel bootsrap
for(bss in 1:B){
TS_null[bss] = re_list[[bss]]
}
return(TS_null)
}else{ # not parallel, could be slow
for(bs in 1:B){
ind = sample(1:n,replace = TRUE)
boot_residual = residual[ind]
#b_null = bhat
#b_null[beta.index] = 0
Y = X %*% b.Null + boot_residual
TS_null[bs] = Path.TS(exact = exact, X = X, Y = Y, multiTest = multiTest, which.covariate = beta.index, betaNull = betaNull,...)
}
return(TS_null)
}
}
|
cbcfa6a9ebe680340b34f8de70f253c3bd9849e5 | 0662ba611c00aa49c3afca318a7f3ea16c83ba5d | /man/step_build_bookdown.Rd | 97a39bc4c3829ff2c20f4d3fadc5ad38093bad2b | [] | no_license | ropensci/tic | 38e06d7675e6820801edf74daa904d6ceea1a804 | 379cf98787e924a68e47792462fafca03f148d5f | refs/heads/main | 2023-09-01T19:28:37.862973 | 2023-05-22T14:07:38 | 2023-05-22T14:07:38 | 72,775,037 | 50 | 10 | null | 2023-01-18T18:03:44 | 2016-11-03T18:31:22 | R | UTF-8 | R | false | true | 507 | rd | step_build_bookdown.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/steps-bookdown.R
\name{step_build_bookdown}
\alias{step_build_bookdown}
\title{Step: Build a bookdown book}
\usage{
step_build_bookdown(...)
}
\arguments{
\item{...}{See \link[bookdown:render_book]{bookdown::render_book}.}
}
\description{
Build a bookdown book using \code{\link[bookdown:render_book]{bookdown::render_book()}}.
}
\examples{
dsl_init()
get_stage("script") \%>\%
add_step(step_build_bookdown("."))
dsl_get()
}
|
a63385c932e7c8003abb4d9f0bc6faf7d5981bd4 | d65b12e6c8e8b87a88b332f05715890dd5f786c9 | /R/uploadToOverleaf.R | ec29e8faece5aec89dca2d6ccb59e6be878c92eb | [] | no_license | Annliu9/singlecellworkflow | fcaf41dc1265081550c50022e1d09e04b9090499 | d3697b72fd6d37fbee567ff3250d75862e6d6199 | refs/heads/master | 2021-09-21T16:40:40.268809 | 2017-07-12T12:51:29 | 2017-07-12T12:51:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 152 | r | uploadToOverleaf.R | library(BiocWorkflowTools)
workflow_dir <- file.path(getwd(), 'workflow')
uploadToOverleaf(files = workflow_dir,
openInBrowser = TRUE)
|
4934deb1f6ac95c32647fd2094298e25aa951abc | 8f7d733baec4f5ab0f942bd482f0516ad8d47113 | /R/twitterConnect.R | 6341eb07291688a32a7a9b97feea4695908ce2cd | [
"MIT"
] | permissive | jeffchang5/MusicMap | 4ee910eacfc75747a65038e78f090ae10c82a5ea | 0cb3e036faf3cb9f673c9771d3f44f65b5fd2451 | refs/heads/master | 2020-02-26T14:40:47.959455 | 2016-06-13T20:54:26 | 2016-06-13T20:54:26 | 60,759,582 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 125 | r | twitterConnect.R | createTwitterStream <- function() {
require('streamR')
filterStream(file.name = "tweets.txt", track = "justinbieber")
}
|
f3a0ff3a6542dda9c1df70b8cab46fb0b0ec576f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/longCatEDA/examples/longCatEDA-package.Rd.R | c840eae03840df4d5e4a4b8f237fd0410bb98423 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 276 | r | longCatEDA-package.Rd.R | library(longCatEDA)
### Name: longCatEDA-package
### Title: Plot Categorical Longitudinal and Time-Series Data
### Aliases: longCatEDA-package longCatEDA
### Keywords: package
### ** Examples
par(bg='cornsilk3')
longCatPlot( longCat( example3 ) )
par(bg='transparent')
|
25822f94b5f136de4005bcf2e56ac3bfdcedd34f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/EcoVirtual/examples/extGame.Rd.R | 9857a092575bb3dd77c05e9249598ccd72e9285c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 235 | r | extGame.Rd.R | library(EcoVirtual)
### Name: extGame
### Title: Zero-sum game
### Aliases: extGame
### Keywords: neutral simulation theory
### ** Examples
## Not run:
##D extGame(bet=1,total=20)
##D extGame(bet=1,total=100)
## End(Not run)
|
269d7d4d1080034987d6b2d32a0b739b4ae22da1 | 0c038b05bbba20bf013b0efb0f166dc4fb324a83 | /inst/tests/test.nearest.codes.som.R | c408e15b229c823bee030a86147b2b8f92d48d8d | [] | no_license | cran/BarcodingR | 710f9da30052a20ccaa5de56fe95148aceb21960 | b84d618fe82a2dea399f483cf2ac30a7e28f0ba2 | refs/heads/master | 2021-01-09T20:37:39.424992 | 2020-04-14T14:00:05 | 2020-04-14T14:00:05 | 63,674,379 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,212 | r | test.nearest.codes.som.R | #' seek the nearest codes/units from an som object
#'
#' @param out.som an object generated by som()
#' @return indices of pair nearest codes (a matrix of n*2 containing indices of pair nearest codes)
#' @keywords nearest.codes.som
#' @export
#' @import kohonen
#' @author Ai-bing ZHANG,PhD. CNU, Beijing, CHINA, contact at zhangab2008(at)mail.cnu.edu.cn
#' @references Zhang et al. 2015.Initinally created on 2014/8/13 15:59:35
#' @examples
#' require(kohonen)
#' data(wines)
#' set.seed(7)
#' training <- sample(nrow(wines), 120)
#' Xtraining <- scale(wines[training, ])
#' out.som <- som(Xtraining, grid = somgrid(5, 5, "hexagonal"))
#' nn.codes<-nearest.codes.som(out.som)
#' nn.codes
library(testthat)
context("nearest.codes.som: seek the nearest codes/units from an som object")
test_that("nearest.codes.som: seek the nearest codes/units from an som object",{
require(kohonen)
data(wines)
set.seed(7)
training <- sample(nrow(wines), 120)
Xtraining <- scale(wines[training, ])
out.som <- som(Xtraining, grid = somgrid(5, 5, "hexagonal"))
nn.codes<-nearest.codes.som(out.som)
nn.codes
expect_that(length(nn.codes),
equals(2))
})
|
5ecfe1fb316928f083571655e9c4f5163dd5e8ef | b19ac6bc2ebfbde073263c783c8e917e2fd5d87f | /EDA.R | 8c82db78bb34fb7974cfddb58f6a98afbdc882b0 | [] | no_license | Gaboelc/predictive-analysis-of-customer-churn-in-banks | d7398b3a568e7391e7e9e5866a23261df423c67b | b5557d29357a9975acb249a3f2d3d09a81235305 | refs/heads/main | 2023-07-18T00:12:23.516016 | 2021-09-09T00:58:16 | 2021-09-09T00:58:16 | 398,403,764 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,671 | r | EDA.R | install.packages('prettydoc')
install.packages('psych')
library(readr)
library("dlookr")
library(dplyr)
library(psych)
#file.choose()
df = read.csv("C:/Users/Gaboelc/Desktop/predictive-analysis-of-customer-churn-in-banks/data/Churn_Modelling.csv",header = TRUE) #Cargo el dataSet
gender_dist = table(df$Gender)
barplot(gender_dist)
exited_dist = table(df$Exited)
exited_dist
barplot(exited_dist)
creditcard_dist = table(df$HasCrCard)
barplot(creditcard_dist)
numofprod_dist = table(df$NumOfProducts)
barplot(numofprod_dist)
geography_dist = table(df$Geography)
barplot(geography_dist)
#Varianza & desviacion estandar
creditscore_var = var(df$CreditScore)
creditscore_sd = sd(df$CreditScore)
creditscore_coevar = creditcard_sd/mean(df$CreditScore)*100
age_var = var(df$Age)
age_sd = sd(df$Age)
age_coevar = age_sd/mean(df$Age)*100
# Coeficiente de simetria y kurtosi
skew(df$CreditScore) # Coeficiente de simetria
kurtosi(df$CreditScore)
skew(df$Age)
kurtosi(df$Age)
# histogramas
hist(df$CreditScore)
hist(df$Age)
# descriptiva
summary(df$CreditScore)
summary(df$Age)
df$Gender = factor(x = df$Gender, levels = c('Female', 'Male'),labels = c(0,1)) #Un poco de limpieza de los valores GENDER
is.na(df) #Buscando valores na
sum(is.na(df)) #Me da la suma de los valores na
sum(complete.cases(df)) #revisar que la cantidad de observaciones sea la misma
df = subset(df, select=c("CreditScore", "Geography", "Gender", "Age", "Tenure", "Balance",
"NumOfProducts", "HasCrCard", "IsActiveMember", "EstimatedSalary",
"Exited"))
df
df_grouped = group_by(df, Geography)
eda_web_report(df,output_format = "html")
|
ea16ca8f22100129d4f5b19ba5d3fa5a3641dc01 | 67fc33c33c256a727d68c1266b35f3a14595aa6f | /man/hubnr_cols.Rd | 7f8f25d0b3fe370124537cc7248f4020d4c266ca | [] | no_license | maxhuebner/hubnR | c1b2aa53131d5f89bc055dfc649dffd374978f7b | afe8e4d25a1c9e58f3f2e865fa0b7b7e68a13c03 | refs/heads/master | 2021-12-31T16:29:41.166619 | 2021-12-28T19:38:35 | 2021-12-28T19:38:35 | 233,070,054 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 367 | rd | hubnr_cols.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/color_palette.R
\name{hubnr_cols}
\alias{hubnr_cols}
\title{Function to extract hubnr colors as hex codes}
\usage{
hubnr_cols(...)
}
\arguments{
\item{...}{Character names of hubnr_colors}
}
\description{
Function to extract hubnr colors as hex codes
}
\author{
Simon Jackson - drsimonj
}
|
75784993901227e7ea2c4ad85dfda5b05ad75689 | a5ddb6a56c7db5d404f28441fcebd0fdf86e4690 | /run_analysis.R | db3638d686428781dedeadb547d8d78c3cf02da8 | [] | no_license | Kristijan-K/data_analysis | ad2d7749b46f375d8254ae791efbf5c8b789bf59 | 0e85e5204e316b05183cb54d46049e263799dfc2 | refs/heads/master | 2021-01-01T05:53:07.448593 | 2014-04-27T19:34:30 | 2014-04-27T19:34:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,714 | r | run_analysis.R | labels<-read.table("./UCI HAR Dataset/features.txt",header=FALSE)
dataset1<-read.table("./UCI HAR Dataset/train/X_train.txt",header=FALSE)
x<-t(labels)
x<-x[2,]
names(dataset1)<-x
columnfilter <- (grepl("mean()",names(dataset1)) | grepl("std()",names(dataset1))) & !grepl("meanFreq()",names(dataset1))
tidydataset1<-dataset1[,columnfilter]
subject1<-read.table("./UCI HAR Dataset/train/subject_train.txt",header=FALSE)
names(subject1)<-"subject"
activity1<-read.table("./UCI HAR Dataset/train/y_train.txt",header=FALSE)
names(activity1)<-"activity"
tidydataset1<-cbind(subject1,tidydataset1)
tidydataset1<-cbind(activity1,tidydataset1)
dataset2<-read.table("./UCI HAR Dataset/test/X_test.txt",header=FALSE)
names(dataset2)<-x
tidydataset2<-dataset2[,columnfilter]
subject2<-read.table("./UCI HAR Dataset/test/subject_test.txt",header=FALSE)
names(subject2)<-"subject"
activity2<-read.table("./UCI HAR Dataset/test/y_test.txt",header=FALSE)
names(activity2)<-"activity"
tidydataset2<-cbind(subject2,tidydataset2)
tidydataset2<-cbind(activity2,tidydataset2)
tidydataset<-merge(tidydataset1,tidydataset2,all=TRUE)
tidydataset <- transform(tidydataset,activity = ifelse(activity == 1,"WALKING",ifelse(activity==2,"WALKING_UPSTAIRS",ifelse(activity==3,"WALKING_DOWNSTAIRS",ifelse(activity==4,"SITTING",ifelse(activity==5,"STANDING","LAYING"))))))
library(reshape)
y<-names(tidydataset)
y<-y[3:68]
dataMelt<-melt(tidydataset,id=c("subject","activity"),measure.vars=y)
finalDataset<-cast(dataMelt,subject+activity~variable,mean)
write.table(finalDataset,file="finalDataset.txt",row.names=FALSE,sep=" ",quote=FALSE)
write.table(finalDataset,file="finalDatasetClean.txt",col.names=FALSE,row.names=FALSE,sep=" ",quote=FALSE) |
568bd66d14c563310eb317aabf2e76591a7c89ac | 37d6346b56a80a1296ec9f7d1954318f05e10025 | /DAM_AT1_B_WIP.R | 085426af1a4a5dcb3a907c99ee77d90bf6cab26d | [] | no_license | CazMayhem/iLAB-1 | 389428756b24dcdb4352548b0e9fd40b92d12774 | 018ee858afd7555894fafbdad574588feab19411 | refs/heads/master | 2020-08-13T10:04:19.128759 | 2019-11-02T13:47:49 | 2019-11-02T13:47:49 | 214,951,551 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,459 | r | DAM_AT1_B_WIP.R | repRead %>% count(Target)
repRead %>% count(age_band)
repRead %>% filter(Target == 1) %>% count(age_band)
repRead %>% count(age_of_vehicle_years)
repRead %>% count(annualised_mileage)
repRead %>% count(car_model)
repRead %>% count(total_services)
repRead %>% count(car_model)
repRead %>% count(num_dealers_visited)
repRead %>% count(num_serv_dealer_purchased)
par(mfrow = c(2,2))
repRead %>% ggplot() +
geom_bar(aes(x=age_band, fill=as.factor(Target)), position = "dodge") +
labs(title="# Customer car purchases - age group", subtitle="Single=0, Multipe=1", fill='Target') +
ylab("# customers")
repRead %>% ggplot() +
geom_bar(aes(x=gender, fill=as.factor(Target)), position = "dodge") +
labs(title="# Customer car purchases - gender", subtitle="Single=0, Multipe=1", fill='Target') +
ylab("# customers")
repRead %>% ggplot() +
geom_bar(aes(x=car_model, fill=as.factor(Target)), position = "dodge") +
labs(title="# Customer car purchases - model", subtitle="Single=0, Multipe=1", fill='Target') +
ylab("# customers") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
repRead %>% ggplot() +
geom_bar(aes(x=age_of_vehicle_years, fill=as.factor(Target)), position = "dodge") +
labs(fill='Target') +
ylim(NA,1000) +
scale_x_continuous(breaks=pretty_breaks())
repRead %>% ggplot() +
geom_bar(aes(x=annualised_mileage, fill=as.factor(Target)), position = "dodge") +
labs(fill='Target') +
ylim(NA,1000) +
scale_x_continuous(breaks=pretty_breaks())
# Scores vector
scores <- c(1, 4, 7, 10, 15, 21, 25, 27, 32, 35,
49, 60, 75, 23, 45, 86, 26, 38, 34, 67)
# Create deciles based on the values of the vector
decileScores <- decile(vector = scores)
decileScoresDec <- decile(vector = scores, decreasing = TRUE)
ran <- rnorm(10)
deciles = quantile(ran, seq(0, 1, 0.1))
ran_dec = cut(ran, deciles, include.lowest = TRUE)
dplyr::ntile(rnorm(10), 10)
mynamestheme <- theme(plot.title = element_text(family = "Helvetica", face = "bold", size = (15)),
legend.title = element_text(colour = "steelblue", face = "bold.italic", family = "Helvetica"),
legend.text = element_text(face = "italic", colour="steelblue4",family = "Helvetica"),
axis.title = element_text(family = "Helvetica", size = (10), colour = "steelblue4"),
axis.text = element_text(family = "Courier", colour = "cornflowerblue", size = (10)))
print(IrisPlot + mynamestheme + labs( title= "Petal and sepal \nlength of iris", y="Petal length (cm)", x = "Sepal length (cm)"))
repRead %>% ggplot() +
geom_bar(aes(x=age_band, fill=as.factor(Target)), position = "dodge") +
labs(title="# Customer car purchases", subtitle="Single=0, Multipe=1", fill='Target') +
ylab("# customers") +
ylim(NA,5000)
#------------------------------------------------------------------------
recast_data %>% ggplot() +
geom_bar(aes(x=car_model, fill=as.factor(Target)), position = "dodge") +
labs(title="# Customer car purchases - model", subtitle="Single=0, Multipe=1", fill='Target') +
ylab("# customers") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
repRead %>% ggplot() +
geom_bar(aes(x=car_model, fill=as.factor(Target)), position = "dodge") +
labs(title="# Customer car purchases - model", subtitle="Single=0, Multipe=1", fill='Target') +
ylab("# customers") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
|
200389ae84fb6468492a55f0441fe06e3797771d | 03d3da95850cee0911c578de2baad0ab26e9210c | /man/normalize-NanoStringGeoMxSet-method.Rd | 5c60fda92cd004f582998165e436356f695544ed | [
"MIT"
] | permissive | qiyubio/GeomxTools | 701977829c49199c7b2785ebcb958240cc52a740 | 97959d5f38eb07b3bab380e5b82048ae368d3f72 | refs/heads/master | 2023-08-28T11:17:12.848356 | 2021-10-21T18:38:20 | 2021-10-21T18:38:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,264 | rd | normalize-NanoStringGeoMxSet-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NanoStringGeoMxSet-normalize.R
\name{normalize,NanoStringGeoMxSet-method}
\alias{normalize,NanoStringGeoMxSet-method}
\title{normalize}
\usage{
\S4method{normalize}{NanoStringGeoMxSet}(
object,
norm_method = c("quant", "neg", "hk", "subtractBackground"),
data_type = c("RNA", "protein"),
fromElt = "exprs",
toElt = "exprs_norm",
housekeepers = HOUSEKEEPERS,
...
)
}
\arguments{
\item{object}{name of the object class to perform normalization on}
\item{norm_method}{the normalization method to be applied on the object}
\item{data_type}{the data type of the object. Values maybe RNA, protein.}
\item{fromElt}{name of the assayDataElement to normalize}
\item{toElt}{name of the assayDataElement to store normalized values}
\item{housekeepers}{optional vector of housekeeper target names}
\item{...}{optional arguments}
}
\value{
a NanoStringGeoMxSet object with normalized counts and normalized factors
}
\description{
normalize GeoMxSet using different normalization methods
}
\examples{
datadir <- system.file("extdata", "DSP_NGS_Example_Data",
package = "GeomxTools"
)
demoData <- readRDS(file.path(datadir, "/demoData.rds"))
norm_object <- normalize(demoData)
}
|
4c2efca8ab1f742eaad8794064e3cd4ecd1d57e9 | 6fb35fd3f17a96a60a16860d1fcc5d58c4cdc2d3 | /R/drug_synonym_node_parser.R | 598138134ab6addd0be334574246da0a665dce41 | [] | no_license | Sparklingredstar/dbparser | 1b7f6717d03ba3e69a2584992ef34490ff19f457 | 6f758ec3ec4571c5ea11bfac45500ea1e4f51cb0 | refs/heads/master | 2020-06-24T22:12:47.281263 | 2019-07-21T13:12:34 | 2019-07-21T13:12:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 424 | r | drug_synonym_node_parser.R | get_synonym_rec <- function(rec, parent_key) {
return(
tibble(
parent_key = parent_key,
synonym = xmlValue(rec),
language = xmlGetAttr(rec, name = "language"),
coder = xmlGetAttr(rec,
name = "coder")
)
)
}
get_synonyms_df <- function(rec) {
return(map_df(
xmlChildren(rec[["synonyms"]]),
~ get_synonym_rec(.x, xmlValue(rec["drugbank-id"][[1]]))
))
}
|
04b47a7b25819f7cf0229972a20baa97da828ab8 | 7cc7064da39f7f57867825c7db32687dff967473 | /R/utils.R | cb65ead437c4edc628e43bb3097c4d167936ba15 | [] | no_license | cran/xesreadR | 3219f526f3c583e12ea6dffd3acfcb129bb5c4c9 | abaff7349ece21c9569b3f7c2c2ac578fc88b8d4 | refs/heads/master | 2021-01-21T20:53:44.729867 | 2019-03-19T11:50:03 | 2019-03-19T11:50:03 | 94,758,517 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | utils.R |
stop_eventlog <- function(eventlog)
if(!("eventlog" %in% class(eventlog)))
stop("Function only applicable for class eventlog")
|
c10cf53710622e3d2f0f542d7edc08a35cf1b634 | 6f4796e1757c4e4f7bccd7207d18c87e6f1e4e6b | /man/genotype.generate.Rd | b57c190a6f2bc5078ed8820aba856e03188447cc | [] | no_license | bjcochrane/TeachingPopGen | 12dd96517bb0b005615235c5b567490eb0a41adc | 033510f77f81fb1b2d668f34f682b857958c5cda | refs/heads/master | 2021-11-30T21:05:55.295372 | 2021-11-08T18:02:53 | 2021-11-08T18:02:53 | 20,026,473 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,788 | rd | genotype.generate.Rd | \name{genotype.generate}
\alias{genotype.generate}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Generate Array of Two Locus Genotypes
%% ~~function to do ... ~~
}
\description{A function that will, based on given gamete frequencies, linkage disequilibrium, and population size, return an array of genotypes for further analysis
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
genotype.generate(p, r, D, N = 1000)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{p}{\code{numerical}:
frequency of AB gamete
%% ~~Describe \code{p} here~~
}
\item{r}{ \code{numerical}:
frequency of ab gamete
%% ~~Describe \code{r} here~~
}
\item{D}{ \code{numerical}:
Value of linkage disequilibrium (-.25 < D < .25)
%% ~~Describe \code{D} here~~
}
\item{N}{ \code{integer}:
Population size (default=1000)
%% ~~Describe \code{N} here~~
}
}
\details{ Inputs are the coupling (parental) gamete frequencies and D (f(AB)-f(A)f(B))
%% ~~ If necessary, more details than the description above ~~
}
\value{\code{matrix}:
3 X 3 matrix of genotype numbers generated based on input values. Suitable for input into \code{\link{HillD}}
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{Bruce J. Cochrane \email{cochrabj@miamioh.edu}
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{HillD}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## Select parental gamete frequencies and D
gam1 <-.3
gam2 <-.4
D <-.1
## Generate 2000 genotypes
genos <-genotype.generate(gam1,gam2,D,N=2000)
genos
## The function is currently defined as
function (p, r, D, N = 1000)
{
q <- 1 - p
s <- 1 - r
gams <- rep(0, 4)
gams[1] <- p * r + D
gams[2] <- p * s - D
gams[3] <- q * r - D
gams[4] <- q * s + D
genos <- matrix(nrow = 3, ncol = 3)
rownames(genos) <- c("AA", "Aa", "aa")
colnames(genos) <- c("BB", "Bb", "bb")
genos[1, 1] <- gams[1]^2
genos[1, 2] <- 2 * gams[1] * gams[2]
genos[1, 3] <- gams[2]^2
genos[2, 1] <- 2 * gams[1] * gams[3]
genos[2, 2] <- 2 * gams[1] * gams[4] + 2 * gams[2] * gams[3]
genos[2, 3] <- 2 * gams[2] * gams[4]
genos[3, 1] <- gams[3]^2
genos[3, 2] <- 2 * gams[4] * gams[3]
genos[3, 3] <- gams[4]^2
genos <- round(N * genos)
return(genos)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
c345f506e7a07403bfbd3e983c3e72080a455101 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ibd/examples/Cmatrix.Rd.R | 5a8dbfb784750e83b8eb820e36266acdd036745d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 391 | r | Cmatrix.Rd.R | library(ibd)
### Name: Cmatrix
### Title: Information matrix from given treatment by block incidence
### matrix of a block design
### Aliases: Cmatrix
### Keywords: incidence matrix incomplete block design information matrix
### ** Examples
N=matrix(c(1,0,0,0,1,0,1,0,0,0,1,0,1,1,0,0,1,0,1,1,0,1,0,1,1,0,0,0,0,1,1,0,0,0,1,1,1,0,0,0,1,0,0
,1,0,1,1,0,0),nrow=7,byrow=TRUE)
Cmatrix(N)
|
8c08fce592672e0f81915e05973bf8a0dd855a1e | 70306df1ac9538c5c7d6182859febdc0a21b2258 | /man/vfbr-package.Rd | ae8936bbaa7faddada01bdd3751fc354f28065c9 | [] | no_license | jefferis/vfbr | 7c898bd340f7e9700383a1f2d8f5a96605347a01 | 497c621bf666df8897c6a8c7dcd5b6aa31702ab4 | refs/heads/master | 2021-06-27T07:30:59.266990 | 2021-02-15T23:55:23 | 2021-02-15T23:55:23 | 34,357,737 | 2 | 1 | null | 2021-01-25T20:53:41 | 2015-04-21T23:33:47 | R | UTF-8 | R | false | true | 2,067 | rd | vfbr-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vfbr-package.R
\docType{package}
\name{vfbr-package}
\alias{vfbr-package}
\alias{vfbr}
\title{Programmatic Access to the virtualflybrain.org website}
\section{Queries}{
\itemize{
\item{\code{\link{vfb_solr_query}}} Allows you to query VFB's SOLR end
point. This gives you programmatic access to the great majority of searches
that you can perform interactively on the VFB website.
\item{\code{\link{vfb_neo4j_query}}} Allows you to query VFB's Neo4J end
point. This gives you programmatic access to many sophisticated searches
based on the VFB ontology, including searches that may not be easily
accessible via the website.
\item{\code{\link{vfb_synonym_query}}} A higher level function that allows
you to search for canonical terms with synonyms matching a given query.
}
}
\section{Package Options}{
The following options can be set to specify default
behaviour. They will be set to sensible defaults on package startup, unless
you have previously set a value (e.g. in your \code{\link[base]{Rprofile}}).
\itemize{
\item{\code{vfbr.server}}{ URL of main VFB server}
\item{\code{vfbr.server.neo4j}}{ URL for Neo4J graph database queries}
\item{\code{vfbr.server.solr}}{ URL for SOLR queries}
\item{\code{vfbr.server.owl}}{ URL for OWL ontology queries}
\item{\code{vfbr.server.gepetto}}{ URL of VFB's gepetto server for 3D
visualisation (see \code{\link{vfb_3dbrowser_url}}.)}
\item{\code{vfbr.server.r}}{ URL for opencpu/R queries - not for users at the moment}
\item{\code{vfbr.stack.gmr_url}}{URL containing listing of registered GMR
Gal4 confocal stacks. See \code{\link{gmr_stack_urls}}}
\item{\code{vfbr.stack.downloads}}{Location of downloaded stacks. See
\code{\link{download_gmr_stacks}}}
}
}
\examples{
# Show state of vfbr package options
options()[grep('^vfbr', names(options()))]
\dontrun{
example(vfb_solr_query)
}
}
\references{
See \url{http://virtualflybrain.org}
}
\seealso{
\code{\link{vfb_solr_query}}
}
\keyword{package}
|
10c3dba08c789e075c1fad06d7b5fe580b001407 | 3171a3608f0327668737686c87cca873b7467bb9 | /plot2.R | da9f80dd33fba51426fab046d58ce0c545bbd8ec | [] | no_license | vineetjain93/ExData_Plotting1 | beaecb4520d30ba03fa1d3d60ddd3a1d4b83c08f | 2c865851e726651456ce9f20b077d92416c93fa3 | refs/heads/master | 2021-06-17T11:51:09.082815 | 2017-01-06T00:57:42 | 2017-01-06T00:57:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,477 | r | plot2.R | plot2 <- function(){
listOfIndices1<- grep("1/2/2007",readLines("household_power_consumption.txt"))
listOfIndices2<- grep("3/2/2007",readLines("household_power_consumption.txt"))
startElement <- listOfIndices1[1]
endElement <- listOfIndices2[1]
numberOfRows <- endElement-startElement
columnNames <- read.table("household_power_consumption.txt",sep=";",nrows=1,stringsAsFactors = FALSE)
powerConsumptionDataRaw <- read.table(file = "household_power_consumption.txt",
stringsAsFactors = FALSE,skip = startElement-1,
nrows= numberOfRows,
sep=";",na.strings = "?")
names(powerConsumptionDataRaw) <- as.character(columnNames)
#### DATE TIME PARSING ######################################
dateRaw <- powerConsumptionDataRaw$Date
timeRaw <- powerConsumptionDataRaw$Time
dateTimeRaw <- paste(dateRaw,timeRaw,sep=" ")
dateTimeVals <- strptime(dateTimeRaw,format = "%d/%m/%Y %H:%M:%S")
dateTimeValsCT <- as.POSIXct(dateTimeVals)
#### OPENING THE PNG DEVILCE ################################
png(filename = "plot2.png",width = 480,height = 480)
#### MAKING THE PLOT ########################################
plot(dateTimeValsCT,powerConsumptionDataRaw$Global_active_power,type="l",xlab = "",ylab = "Global Active Power(kilowatts)")
#### CLOSING THE FILE DEVICE ################################
dev.off()
} |
1c562d099359cb849c695e8c28c3d9fbeebb3228 | 865dc98a3a72e500941287f8994d5e5de06fa435 | /plot1.R | 579c2ae54e5dd5e28fa9ae592f85acfe531e2440 | [] | no_license | ineshf/ExData_Plotting1 | 12f6fce2c869094fa4786e5ce9089de6c3c899d1 | 87861a4b72cbb3ab55755079eb7744e37c360ade | refs/heads/master | 2021-01-18T05:18:29.245221 | 2014-06-07T23:15:01 | 2014-06-07T23:15:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 531 | r | plot1.R | #Read csv data
data<-read.csv("household_power_consumption.txt", sep=";")
###########PLOT 1################
data$Date<-as.Date(data$Date, "%d/%m/%Y")
#Select date
fec2<-as.Date("02/02/2007","%d/%m/%Y")
fec1<-as.Date("01/02/2007","%d/%m/%Y")
vec<-c(fec1,fec2)
midata<-data[data$Date %in% vec,]
#First plot
png("plot1.png",width = 480, height = 480)
hist(as.numeric(as.character(midata$Global_active_power)),col="red", main="Histogram Global activity power",xlab="Global active Power (Kilowatts)",breaks=15, right=FALSE)
dev.off()
|
78aedce01bdaef307bf30135fc8a98eb948899d3 | 939c7572529f8e2ec7df8b189c57a75736b74d1d | /tests/wilcoxon_twosample_errors.r | c84c88b000681cdede8269ad1cb64c1239512532 | [] | no_license | cran/matrixTests | 13af4039cfedb914cc266eb20cea228e0deef55a | b875053677d8578631afaa4175436a0642aa9652 | refs/heads/master | 2023-05-11T01:26:13.893032 | 2023-05-01T13:40:02 | 2023-05-01T13:40:02 | 123,973,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,654 | r | wilcoxon_twosample_errors.r | library(matrixTests)
source("utils/capture.r")
#--- x argument errors ---------------------------------------------------------
# cannot be missing
err <- 'argument "x" is missing, with no default'
res <- capture(row_wilcoxon_twosample())
stopifnot(all.equal(res$error, err))
# cannot be NULL
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(NULL, 1:2))
stopifnot(all.equal(res$error, err))
# cannot be character
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(c("1", "2"), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be logical
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(c(TRUE, FALSE), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be complex
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(complex(c(1,2), c(3,4)), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be data.frame containing some non numeric data
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(iris, 1:2))
stopifnot(all.equal(res$error, err))
# cannot be a list
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(as.list(c(1:5)), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be in a list
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(list(1:5), 1:2))
stopifnot(all.equal(res$error, err))
#--- y argument errors ---------------------------------------------------------
# cannot be missing
err <- 'argument "y" is missing, with no default'
res <- capture(row_wilcoxon_twosample(1))
stopifnot(all.equal(res$error, err))
# cannot be NULL
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(1, NULL))
stopifnot(all.equal(res$error, err))
# cannot be character
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(1:2, c("1","2")))
stopifnot(all.equal(res$error, err))
# cannot be logical
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(1:2, c(TRUE, FALSE)))
stopifnot(all.equal(res$error, err))
# cannot be complex
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(1:2, complex(c(1,2), c(3,4))))
stopifnot(all.equal(res$error, err))
# cannot be data.frame containing some non numeric data
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(1:2, iris))
stopifnot(all.equal(res$error, err))
# cannot be a list
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(1:2, as.list(c(1:5))))
stopifnot(all.equal(res$error, err))
# cannot be in a list
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_wilcoxon_twosample(1:2, list(1:5)))
stopifnot(all.equal(res$error, err))
#--- alternative argument errors -----------------------------------------------
err <- '"alternative" must be a character vector with length 1 or nrow(x)'
# cannot be NA
res <- capture(row_wilcoxon_twosample(x=1:3, y=2:4, alternative=NA))
stopifnot(all.equal(res$error, err))
# cannot be numeric
res <- capture(row_wilcoxon_twosample(x=1:3, y=2:4, alternative=1))
stopifnot(all.equal(res$error, err))
# cannot be complex
res <- capture(row_wilcoxon_twosample(x=1:3, y=2:4, alternative=complex(1)))
stopifnot(all.equal(res$error, err))
# cannot be in a list
res <- capture(row_wilcoxon_twosample(x=1:3, y=2:4, alternative=list("less")))
stopifnot(all.equal(res$error, err))
# cannot be a data frame
res <- capture(row_wilcoxon_twosample(x=1:3, y=2:4, alternative=data.frame("less")))
stopifnot(all.equal(res$error, err))
err <- 'all "alternative" values must be in: two.sided, less, greater'
# must be in correct set
res <- capture(row_wilcoxon_twosample(x=1:3, y=2:4, alternative="ga"))
stopifnot(all.equal(res$error, err))
# error produced even when some are correct
res <- capture(row_wilcoxon_twosample(x=matrix(1:10, nrow=2), y=matrix(1:10, nrow=2), alternative=c("g","c")))
stopifnot(all.equal(res$error, err))
#--- null argument errors ------------------------------------------------------
err <- '"null" must be a numeric vector with length 1 or nrow(x)'
# cannot be a character
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, null="1"))
stopifnot(all.equal(res$error, err))
# cannot be complex
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, null=complex(1)))
stopifnot(all.equal(res$error, err))
# cannot be in a list
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, null=list(1)))
stopifnot(all.equal(res$error, err))
# cannot be a data frame
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, null=data.frame(1)))
stopifnot(all.equal(res$error, err))
err <- 'all "null" values must be greater than -Inf and lower than Inf'
# cannot be NA
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, null=NA_integer_))
stopifnot(all.equal(res$error, err))
# cannot be NaN
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, null=NaN))
stopifnot(all.equal(res$error, err))
# TODO: check if can be made to work with Inf
# cannot be Inf
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, null=Inf))
stopifnot(all.equal(res$error, err))
# cannot be -Inf
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, null=-Inf))
stopifnot(all.equal(res$error, err))
#--- exact argument errors -----------------------------------------------------
err <- '"exact" must be a logical vector with length 1 or nrow(x)'
# cannot be non-logical NA
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, exact=NA_integer_))
stopifnot(all.equal(res$error, err))
# cannot be numeric
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, exact=1))
stopifnot(all.equal(res$error, err))
# cannot be character
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, exact="TRUE"))
stopifnot(all.equal(res$error, err))
# cannot be complex
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, exact=complex(1)))
stopifnot(all.equal(res$error, err))
# cannot be in a list
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, exact=list(TRUE)))
stopifnot(all.equal(res$error, err))
# cannot be a data frame
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, exact=data.frame(TRUE)))
stopifnot(all.equal(res$error, err))
#--- correct argument errors ---------------------------------------------------
err <- 'all "correct" values must be in: TRUE, FALSE'
# cannot be NA
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, correct=NA))
stopifnot(all.equal(res$error, err))
err <- '"correct" must be a logical vector with length 1 or nrow(x)'
# cannot be numeric
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, correct=0))
stopifnot(all.equal(res$error, err))
# cannot be character
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, correct="FALSE"))
stopifnot(all.equal(res$error, err))
# cannot be complex
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, correct=complex(1)))
stopifnot(all.equal(res$error, err))
# cannot be in a list
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, correct=list(FALSE)))
stopifnot(all.equal(res$error, err))
# cannot be a data frame
res <- capture(row_wilcoxon_twosample(x=1:3, y=1:3, correct=data.frame(FALSE)))
stopifnot(all.equal(res$error, err))
#--- dimension mismatch errors -------------------------------------------------
# y number of rows must match x number of rows
err <- '"x" and "y" must have the same number of rows'
x <- matrix(1:10, nrow=2)
y <- matrix(1:10, nrow=5)
res <- capture(row_wilcoxon_twosample(x, y))
stopifnot(all.equal(res$error, err))
# null must match x number of rows
err <- '"null" must be a numeric vector with length 1 or nrow(x)'
x <- matrix(1:12, nrow=4)
y <- matrix(1:12, nrow=4)
res <- capture(row_wilcoxon_twosample(x, y, null=c(1,2)))
stopifnot(all.equal(res$error, err))
# alternative must match x number of rows
err <- '"alternative" must be a character vector with length 1 or nrow(x)'
x <- matrix(1:12, nrow=4)
y <- matrix(1:12, nrow=4)
res <- capture(row_wilcoxon_twosample(x, y, alternative=c("g","l")))
stopifnot(all.equal(res$error, err))
# exact must match x number of rows
err <- '"exact" must be a logical vector with length 1 or nrow(x)'
x <- matrix(1:12, nrow=4)
y <- matrix(1:12, nrow=4)
res <- capture(row_wilcoxon_twosample(x, y, exact=c(TRUE, FALSE)))
stopifnot(all.equal(res$error, err))
# correct must match x number of rows
err <- '"correct" must be a logical vector with length 1 or nrow(x)'
x <- matrix(1:12, nrow=4)
y <- matrix(1:12, nrow=4)
res <- capture(row_wilcoxon_twosample(x, y, correct=c(TRUE, FALSE)))
stopifnot(all.equal(res$error, err))
|
9d2f55c766c9ec4e0702801d3a83f56293e0e90e | 12514216d4c61dfa44228b9e0e662845830f34b8 | /longshot/code/betfair/util.R | e4b4706db956891ed84702eaff384953a9a2a290 | [] | no_license | tima04/prediction_market | 1cbba735a3d7b89bff875825ea97bbd8d4f1909f | 51439f278331a8516a5c81ad29cbf705badebfa6 | refs/heads/master | 2021-01-01T18:20:05.705567 | 2017-08-01T16:41:13 | 2017-08-01T16:41:13 | 98,307,498 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 785 | r | util.R | library(data.table)
library(lubridate)
## isthere("ab", c("Aab", "daf")) == c(TRUE, FALSE)
isthere <- function(pat, xs) {
match <- function(x)
length(grep(pat, x, ignore.case = T, value = T)) > 0
rslt <- unlist(Map(match, xs))
names(rslt) <- NULL
return (rslt)
}
## lowercase("Ab_CdE") == "ab_cde"
lowercase <- function(chars) {
big <- "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
small <- "abcdefghijklmnopqrstuvwxyz"
rslt <- ""
for (char in strsplit(chars, "")[[1]]) {
pos <- regexpr(char, big)
if (pos %in% seq(1,26))
rslt <- paste(rslt, (strsplit(small, "")[[1]][pos]), sep="")
else
rslt <- paste(rslt, char, sep="")
}
rslt
}
to.seconds <- function(times) {
as.numeric(dmy_hms(times))
}
|
8cbe34eb4f85c0167272f05944b886776fa32a38 | f8244e2f71fc98894344ca730a8d696ebdecbda3 | /man/scrape_all_pages_silently.Rd | 892a9e9893a91058b34923fb64b6b10a87a93fd3 | [] | no_license | AndersT123/boliga | df9a23894227c4e10842409793713078237b2488 | f97c74be3378b76e583a1b113b48e1aa8ac99422 | refs/heads/master | 2021-05-19T08:18:56.443996 | 2020-03-31T12:44:04 | 2020-03-31T12:44:04 | 251,601,764 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 428 | rd | scrape_all_pages_silently.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boliga.R
\name{scrape_all_pages_silently}
\alias{scrape_all_pages_silently}
\title{Scrape pages silently}
\usage{
scrape_all_pages_silently()
}
\value{
A list with four elements. The results slot contains the output from iterations without failure. Check warnings slot for possible problems for scraped pages.
}
\description{
Scrape pages silently
}
|
a968ec65697ffe4cfa66ed2c3eed89af8f53b11e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/future/examples/multicore.Rd.R | 5f53d8d9c4e8332fb92a66c3fb31172156ad65e5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 569 | r | multicore.Rd.R | library(future)
### Name: multicore
### Title: Create a multicore future whose value will be resolved
### asynchronously in a forked parallel process
### Aliases: multicore
### ** Examples
## Use multicore futures
plan(multicore)
## A global variable
a <- 0
## Create multicore future (explicitly)
f <- future({
b <- 3
c <- 2
a * b * c
})
## A multicore future is evaluated in a separate forked
## process. Changing the value of a global variable
## will not affect the result of the future.
a <- 7
print(a)
v <- value(f)
print(v)
stopifnot(v == 0)
|
3da5fdb8dba1a5367cd266b7d40acae5e0940bc1 | fac7401159e19eb23d6f89c787c0e0357782d008 | /pset6_1.R | e2eddebf915a4c599b356254f93f3036572a906a | [] | no_license | jpalbino/edxAnalyticsEdge-1 | b573fb3fe9b7832bfce9091f188747e77ef299c8 | 116de0df0098e2798d04b28dd4b2df4be561d593 | refs/heads/master | 2021-01-18T06:41:37.717775 | 2015-05-01T12:33:08 | 2015-05-01T12:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,979 | r | pset6_1.R | dailykos = read.csv("dailykos.csv")
kosDist = dist(dailykos, method="euclidean")
kosHierClust = hclust(kosDist, method="ward.D")
plot(kosHierClust)
hierGroups = cutree(kosHierClust, k = 7)
HierCluster1 = subset(dailykos, hierGroups == 1)
HierCluster2 = subset(dailykos, hierGroups == 2)
HierCluster3 = subset(dailykos, hierGroups == 3)
HierCluster4 = subset(dailykos, hierGroups == 4)
HierCluster5 = subset(dailykos, hierGroups == 5)
HierCluster6 = subset(dailykos, hierGroups == 6)
HierCluster7 = subset(dailykos, hierGroups == 7)
nrow(HierCluster1)
nrow(HierCluster2)
nrow(HierCluster3)
nrow(HierCluster4)
nrow(HierCluster5)
nrow(HierCluster6)
nrow(HierCluster7)
table(hierGroups)
#OR
HierCluster = split(dailykos, hierGroups)
HierCluster[[1]]
tail(sort(colMeans(HierCluster1)))
tail(sort(colMeans(HierCluster2)))
tail(sort(colMeans(HierCluster3)))
tail(sort(colMeans(HierCluster4)))
tail(sort(colMeans(HierCluster5)))
tail(sort(colMeans(HierCluster6)))
tail(sort(colMeans(HierCluster7)))
set.seed(1000)
KmeansCluster = kmeans(dailykos, centers=7)
KmeansCluster1 = subset(dailykos, KmeansCluster$cluster == 1)
KmeansCluster2 = subset(dailykos, KmeansCluster$cluster == 2)
KmeansCluster3 = subset(dailykos, KmeansCluster$cluster == 3)
KmeansCluster4 = subset(dailykos, KmeansCluster$cluster == 4)
KmeansCluster5 = subset(dailykos, KmeansCluster$cluster == 5)
KmeansCluster6 = subset(dailykos, KmeansCluster$cluster == 6)
KmeansCluster7 = subset(dailykos, KmeansCluster$cluster == 7)
table(KmeansCluster$cluster)
KmeansCluster = split(dailykos, KmeansCluster$cluster)
tail(sort(colMeans(KmeansCluster1)))
tail(sort(colMeans(KmeansCluster2)))
tail(sort(colMeans(KmeansCluster3)))
tail(sort(colMeans(KmeansCluster4)))
tail(sort(colMeans(KmeansCluster5)))
tail(sort(colMeans(KmeansCluster6)))
tail(sort(colMeans(KmeansCluster7)))
table(dailyClusters,KmeansCluster$cluster)
123/(123+111+1+24+39+10)
|
0b70e886fb02379344fc9c1bfda667b61d608f71 | a84b97e3f97fe5a92663a424f40d2be1ad0809ad | /man/simulate_offspring.Rd | a19edb0385128ec2f1415a47b7f756773adc48a9 | [] | no_license | cran/APIS | f8f9a7ac041be93d407737e5a016cc88e1885ef3 | 4c4043396033f2354860973fedbbeae1f9302306 | refs/heads/master | 2023-07-10T15:50:06.826290 | 2023-06-30T14:40:02 | 2023-06-30T14:40:02 | 236,548,490 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,609 | rd | simulate_offspring.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_simulate_offspring.R
\name{simulate_offspring}
\alias{simulate_offspring}
\title{Simulate offspring}
\usage{
simulate_offspring(
sire_genotype,
dam_genotype,
number_offspring,
ploidy_level = 2,
sire_contribution = 1,
dam_contribution = 1,
recombination_rate = 0.5,
genotyping_error = 0.01
)
}
\arguments{
\item{sire_genotype}{sire genotype}
\item{dam_genotype}{dam genotype}
\item{number_offspring}{number of offspring to simulate}
\item{ploidy_level}{ploidy level of offspring}
\item{sire_contribution}{sire contribution}
\item{dam_contribution}{dam contribution}
\item{recombination_rate}{recombination rate (only important for tri/tetra ploids offspring)}
\item{genotyping_error}{genotyping error}
}
\value{
list with matrix with simulated offspring and pedigree
}
\description{
Simulate offspring
}
\examples{
data("APIS_sire")
data("APIS_dam")
# For diploide offspring
simulate_offspring(sire_genotype=APIS_sire, dam_genotype=APIS_dam,
number_offspring=10,
ploidy_level = 2,
sire_contribution = 1, dam_contribution = 1,
recombination_rate = 0.5,
genotyping_error = 0.01)
# For triploide offspring
simulate_offspring(sire_genotype=APIS_sire, dam_genotype=APIS_dam,
number_offspring=10,
ploidy_level = 3,
sire_contribution = 1, dam_contribution = 2,
recombination_rate = 0.5,
genotyping_error = 0.01)
}
|
939cf3a23a759442813fd26d17fce1b4c14e931e | 86151a6ecec532ac065621a1ffdfd827504176a3 | /R/dataset_check.R | 3f7557d47b5cef007c059c4e8bf2b3b8541d6e16 | [] | no_license | imarkonis/pRecipe | 3454f5ce32e6915a6caef1dbc041d12c411c9ae5 | 07c6b1da653221a0baeeb2aa81b8744393ff587e | refs/heads/master | 2022-11-02T20:27:40.979144 | 2022-10-28T10:52:04 | 2022-10-28T10:52:04 | 237,580,540 | 0 | 0 | null | 2020-02-01T07:44:23 | 2020-02-01T07:44:23 | null | UTF-8 | R | false | false | 1,197 | r | dataset_check.R | #' Data set name checker
#'
#' Function to check if the data set is available
#'
#' @importFrom methods is
#' @param data_name a character string.
#' @return No return value, called to download the data set.
#' @keywords internal
dataset_check <- function(data_name){
if (!Reduce("&", is.element(data_name, c("all", "20cr", "chirps", "cmap",
"cmorph", "cpc", "cru-ts",
"em-earth", "era20c", "era5",
"ghcn", "gldas-clsm", "gldas-noah",
"gldas-vic", "gpcc", "gpcp",
"gpm-imerg", "mswep", "ncep-doe",
"ncep-ncar", "persiann", "precl",
"terraclimate", "trmm-3b43",
"udel")))){
stop("Error: Data set not available.
Select from 20cr, chirps, cmap, cmorph, cpc, cru-ts, em-earth, era20c,
era5, ghcn, gldas-clsm, gldas-noah, gldas-vic, gpcc, gpcp, gpm-imerg,
mswep, ncep-doe, ncep-ncar, persiann, precl, terraclimate, trmm-3b43, udel")
}
} |
0e4c549f46f31d65c962470cc8c93f305163eca5 | 229bf9f5443566993bfd9ba16153c1ad0aada67f | /ele_ela_analise/processBook.R | a762e9b97cc1d348ce25602650dfba4e12c5b404 | [] | no_license | GiulSposito/R-x | fc096199ca2efb483d164ba42b92a7a77281f39f | 902aad081c5b7961983234f183ed1df4bf621e8b | refs/heads/master | 2021-06-27T05:41:46.789209 | 2019-05-21T17:42:01 | 2019-05-21T17:42:01 | 115,006,654 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 518 | r | processBook.R | # script to download a pdf and extract the text
library(tidyverse)
library(pdftools)
books <- readRDS("./ele_ela_analise/data/book_links.rds")
abook <- books[24,]
tfile <- tempfile()
download.file(abook$link, tfile)
txt <- pdf_text("./ele_ela_analise/data/bv000060.pdf")
txt <- pdf_text("./ele_ela_analise/data/me003427.pdf")
str(txt)
library(tm)
read <- readPDF(control = list(text = "-layout"))
document <- Corpus(URISource(tfile), readerControl = list(reader = read))
doc <- content(document[[1]])
head(doc)
|
def4f8a97e535c985aea70c928ff6d3c9242d70d | 2a753e9a48b58d84cd3fd8088521590541eb974e | /flightanalysis.R | 70168a4b47dc84d8fe3df6acbbc306da56cb2f1b | [] | no_license | Desmondonam/flightanalysis | e485003d3856ba2025aac1cfd6a78eecb69b977d | c7b4f7bf1c4d902d3e601615ce45742932757764 | refs/heads/main | 2023-04-01T00:19:20.071321 | 2021-04-06T12:57:04 | 2021-04-06T12:57:04 | 355,188,262 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,939 | r | flightanalysis.R | library(tidyverse)
library(lubridate)
library(nycflights13)
head(flights)
flights %>%
mutate(long_flight = (air_time >= 6 * 60)) %>%
View()
flights %>%
mutate(long_flight = (air_time >= 6 * 60)) %>%
count(long_flight)
flights %>%
group_by(date = make_date(year, month, day)) %>%
summarise(flights_n = n(), air_time_mean = mean(air_time, na.rm = TRUE)) %>%
ungroup()
flights %>%
slice_sample(n = 15)
flights %>%
slice_sample(prop = 0.15)
flights %>%
select(year, month, day) %>%
mutate(date = make_date(year, month, day))
numbers_1 <- tibble(number = c("#1", "Number8", "How are you 3"))
numbers_1 %>% mutate(number = parse_number(number))
flights %>%
select(starts_with("dep_"))
flights %>%
select(ends_with("hour"))
flights %>%
select(contains("hour"))
flights %>%
mutate(origin = case_when(
(origin == "EWR") & dep_delay > 20 ~ "Newark International Airport - DELAYED",
(origin == "EWR") & dep_delay <= 20 ~ "Newark International Airport - ON TIME DEPARTURE",
)) %>%
count(origin)
flights %>%
mutate(origin = str_replace_all(origin, c(
"^EWR$" = "Newark International", "^JFK$" = "John F. Kennedy International"
))) %>%
count(origin)
# filtering groups
flights_top_carriers <- flights %>%
group_by(carrier) %>%
filter(n() >= 10000) %>%
ungroup()
# extract rows from the first column
beginning_with_am<- airlines %>%
filter(name %>% str_detect("^Am"))
beginning_with_am
# etract rows which are not matched with the second table
flights %>%
anti_join(beginning_with_am, by = "carrier")
airline_names <- flights %>%
left_join(airlines, by = "carrier")
airline_names %>%
count(name) %>%
ggplot(aes(name, n)) +
geom_col()
airline_names %>%
count(name) %>%
mutate(name = fct_reorder(name, n)) %>%
ggplot(aes(name, n)) +
geom_col()
# display counts more accurately
airline_names %>%
count(name) %>%
mutate(name = fct_reorder(name, n)) %>%
ggplot(aes(name, n)) +
geom_col() +
coord_flip()
# all combinations using crossing
crossing(
customer_channel = c("Bus", "Car"),
customer_status = c("New", "Repeat"),
spend_range = c("$0-$10", "$10-$20", "$20-$50", "$50+"))
# group by based on function
summary <- function(data, col_names, na.rm = TRUE) {
data %>%
summarise(across({{ col_names }},
list(
min = min,
max = max,
median = median,
mean = mean
),
na.rm = na.rm,
.names = "{col}_{fn}"
))
}
flights_with_airline_names <- airline_names
flights_with_airline_names %>%
summary(c(air_time, arr_delay))
flights_with_airline_names %>%
group_by(carrier) %>%
summary(c(air_time, arr_delay))
|
421c47f7767860ce2feafcadc4713a95a97daf99 | d8ef8d07e435905e3b4be3792951093e0976c181 | /containers/genie3/test_genie3_exp.R | 5713aa7cc8829a5fcc403a12db729c4a2686ff10 | [] | no_license | AluruLab/ardmore | 376c6d5841d9b3105ca006b474dbc22abea65dda | d5f14f666a149f349d11f6f0329ef1cfe87aec67 | refs/heads/master | 2023-05-28T01:58:20.613700 | 2021-06-17T19:45:08 | 2021-06-17T19:45:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 926 | r | test_genie3_exp.R | library(GENIE3)
library(doParallel)
library(doRNG)
library(reshape2)
run.genie3 <- function(exp.fname, out.fname, nc=1){
exp_tab <- read.table(exp.fname , skip = 3, as.is = T)
rownames(exp_tab) <- exp_tab[,1]
dt <- dim(exp_tab)
exp_tab <- exp_tab[,3:dt[2]]
# computed.cor <- cor(exp_tab)
# write.table(computed.cor, out.fname, sep = "\t")
XD <- data.matrix(exp_tab, rownames.force=TRUE)
cat("Running genie3 on #cores : ", nc, "\n")
weightMatrix <- GENIE3(XD, nCores=nc)
linkList <- getLinkList(weightMatrix)
write.table(linkList, file=out.fname, sep='\t', row.names=FALSE)
}
args = commandArgs(trailingOnly=TRUE)
options(stringsAsFactors=FALSE)
print(length(args))
print(args)
threads=if (length(args) >= 3) {
as.numeric(args[3])
} else {
1
}
if (length(args) == 2) {
run.genie3(args[1], args[2], threads)
}
if (length(args) == 3) {
run.genie3(args[1], args[2], threads)
}
#
|
833f799dabfc37a8b0715ecd821fd4683c989602 | 859695d308824e3cfc429658c4631ccab2a7c61b | /Rscripts/Clustering.R | 2123406db6e5b8e3a25c626f2cf35895b77e1e47 | [] | no_license | natan-alper/Machine-learning-algorithms-in-R | 843ca72391365597288896b8b12ff68d88df3520 | 3d7e28a809114eb7a779179769b829f812368313 | refs/heads/main | 2023-05-26T19:19:13.381936 | 2021-06-16T02:43:13 | 2021-06-16T02:43:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,163 | r | Clustering.R | #Download and Import Churn dataset
## One of the most frequently used functions for performing clustering is the hclust() function located in the default "stats" package.
## hclust is able to determine clusters based on "single-linkeage", "complet-linkeage", "average-linkeage", etc. (NOT k-means or k-medoids)
##VERY IMPORTANT: The primary input into hclust() is the distance between the observations from each other!!!
##that is, we can't just put Churn into hclust().
##Thus we need to first determine the distance between each observation using the dist() function.
##Let's consider columns INCOME, OVERAGE, LEFTOVER, and HOUSE of Churn and only the first 10000 observations (to not stress computer too much).
data=Churn[1:10000,c(2,3,4,5)] ##note that we disregard the LEAVE columns!!! This is clustering!
#It is CRUCIAL to bring all variables to the same scale!
for(k in 1:ncol(data)){
data[,k]=(data[,k]-mean(data[,k]))/sd(data[,k])
}
#obtain the distances for all observations from each other:
Eucl.Distances= dist(data) ## we can choose different distances here like "Euclidean" (default), or "Manhattan", etc.
Manh.Distances= dist(data,method="manhattan")
length(Eucl.Distances) ## Note and understand the size of this "vector".
(10000*10000-10000)/2
##Note that Eucl.Distances is a vector of distances from each observation to the others (one-way only)
#### excluding distances between the observation and itself.
#Once we have the distances we can proceed with hclust:
Clusters=hclust(Eucl.Distances,method="complete") ### default method is "complete"
Clusters$merge[1:10,] ## $merge describes which observations/clusters were merged together at which step.
##To draw a dendrogram we can call:
plot(as.dendrogram(Clusters))
##Note that this isn't very informative since we have so many observations.
##Alternatively we can
Split=cut(as.dendrogram(Clusters),h=1) ## Divides the tree by branches below height=1 ("lower") and branches above height=1 ("upper")
length(Split$lower)
plot(Split$lower[[1]]) ## We see which observations get grouped together first left-to-right
plot(Split$lower[[2]])
plot(Split$lower[[3]])
#etc.
##We can also plot the tree above the height we specified (1), but it's less informative
plot(Split$upper[[1]])
plot(Split$upper[[2]])
##### FOR PREDICTION and to SEE THE GROUPINGS:
## We can obtain the groups that each observations belong to using "cutree()" where we specify the number of groups we want.
## Note that in this case we know that we want to decide between two groups ("LEAVE" and "STAY")
Groups=cutree(Clusters,k=2)
################misclassification rate can be computed with:
sum(Churn$LEAVE[1:10000][Groups==1]=="LEAVE") #3574
sum(Churn$LEAVE[1:10000][Groups==1]=="STAY") #3999
## Therefore Group 1 should be predicted as "STAY"
Pred=rep(NA,10000)
Pred[Groups==1]="STAY"
sum(Churn$LEAVE[1:10000][Groups==2]=="LEAVE") #1340
sum(Churn$LEAVE[1:10000][Groups==2]=="STAY") #1087
## Therefore Group 2 should be predicted as "LEAVE"
Pred[Groups==2]="LEAVE"
missclass=sum(Churn$LEAVE[1:10000]!=Pred)/10000
missclass
## Note that it was just a guess that Groups==2 is equivalent to "LEAVE".
## All we know based on hclust() is which observations belong to different groups (NOT THE LITTERAL MEANING OF THE GROUPS)
## We note however that, if missclass turned out to be >50% we would know that we made a mistake in assigning Groups==2 to "LEAVE" instead of "STAY".
### We can note how the groups split up in two dimensions using:
plot(data[,c(1,2)],col=Groups)
plot(data[,c(1,3)],col=Groups)
plot(data[,c(1,4)],col=Groups)
plot(data[,c(2,3)],col=Groups)
plot(data[,c(2,4)],col=Groups)
plot(data[,c(3,4)],col=Groups)
## We note that the dominant separation occurs along LEFTOVER
## Unfortunately this is not so true for the separation between "LEAVE" and "STAY"
color=rep(1,10000)
color[Churn$LEAVE[1:10000]=="STAY"]=2
plot(data[,c(1,4)],col=color)
#########################Just for experience we can think about an optimal number of groups for this dataset:
#### Note that there is no "simple" way to compute Average Intra-Cluster distance for objects returned by hclust()
### Thus we apply a more naive approach.
### Since we have the dist() function, We will average out all the distances from observation to observation (as opposed to mean) within each cluster
## When using one cluster:
Gr=cutree(Clusters,k=1)
d=dist(data[Gr==1,]) ### Compute distances between objects in Group 1 (the only group here)
mean(d)
ONE.clustAve=mean(d)
## When using two clusters:
Gr=cutree(Clusters,k=2)
d1=dist(data[Gr==1,]) ### Compute distances between objects in Group 1
d2=dist(data[Gr==2,]) ### Compute distances between objects in Group 2
TWO.clustAve=mean(c(d1,d2)) # average of the distances within the clusters
TWO.clustAve
## When using three clusters:
Gr=cutree(Clusters,k=3)
d1=dist(data[Gr==1,])
d2=dist(data[Gr==2,])
d3=dist(data[Gr==3,])
THREE.clustAve=mean(c(d1,d2,d3)) # average of the distances within the clusters
THREE.clustAve
## When using four clusters:
Gr=cutree(Clusters,k=4)
d1=dist(data[Gr==1,])
d2=dist(data[Gr==2,])
d3=dist(data[Gr==3,])
d4=dist(data[Gr==4,])
FOUR.clustAve=mean(c(d1,d2,d3,d4)) # average of the distances within the clusters
FOUR.clustAve
## When using five clusters:
Gr=cutree(Clusters,k=5)
d1=dist(data[Gr==1,])
d2=dist(data[Gr==2,])
d3=dist(data[Gr==3,])
d4=dist(data[Gr==4,])
d5=dist(data[Gr==4,])
FIVE.clustAve=mean(c(d1,d2,d3,d4,d5)) # average of the distances within the clusters
FIVE.clustAve
################### THE AVERAGES TOGETHER:
ONE.clustAve
TWO.clustAve
THREE.clustAve
FOUR.clustAve
FIVE.clustAve
## To get a sense for the best grouping we can plot them:
plot(c(1,2,3,4,5),c(ONE.clustAve,TWO.clustAve,THREE.clustAve,FOUR.clustAve,FIVE.clustAve))
##### Note that the dropoff seems to be small from 3 to 4, so perhaps 3 groups are the best!
#########################################################################
######### To do k-means clustering we use the kmeans() function.
## Note that the kmeans() function requires the original dataset as the input (not d)
## Note that the distance between cluster means is computed as EUCLIDEAN DISTANCE (Cannot be changed)
Clusters=kmeans(data, 2) ## data defined above.
## The number 2 in this case specifies the number of groups we want in the end.
## which means that 2 random "centers" are chosen to begin the algorithm--- in this case 2 random observations.
## We can alternatively write in a vector, which would then indicate what we want to be the starting point for the centers.
## We can obtain predicted groups with:
Groups=Clusters$cluster
sum(Churn$LEAVE[Groups==1]=="LEAVE") #3453
sum(Churn$LEAVE[Groups==1]=="STAY") #4633
## Therefore Group 1 should be predicted as "STAY"
Pred=rep("STAY",10000)
Pred[Groups==1]="STAY"
sum(Churn$LEAVE[Groups==2]=="LEAVE") #6399
sum(Churn$LEAVE[Groups==2]=="STAY") #5515
## Therefore Group 1 should be predicted as "LEAVE"
Pred[Groups==2]="LEAVE"
missclass=1-(sum(Churn$LEAVE[1:10000]==Pred))/10000
missclass
#### We can initialize cluster centers with a matrix.
## For example if we want one of our cluster centers to be at INCOME=1, OVERAGE=-1, LEFTOVER=-1, HOUSE=1
## And if we want the other cluster center to be at INCOME=-.5, OVERAGE=.5, LEFTOVER=.5, HOUSE=.-5
## Then we can create the matrix:
InitCenters=matrix(c(1,-1,-1,1,-.5,.5,.5,-.5),nrow=2, byrow=TRUE)
InitCenters
## And use it as:
Clusters=kmeans(data, InitCenters) ## data defined above.
Groups=Clusters$cluster
sum(Churn$LEAVE[Groups==1]=="LEAVE") #3453
sum(Churn$LEAVE[Groups==1]=="STAY") #4633
## Therefore Group 1 should be predicted as "STAY"
Pred=rep("STAY",10000)
Pred[Groups==1]="STAY"
sum(Churn$LEAVE[Groups==2]=="LEAVE") #6399
sum(Churn$LEAVE[Groups==2]=="STAY") #5515
## Therefore Group 1 should be predicted as "LEAVE"
Pred[Groups==2]="LEAVE"
missclass=1-(sum(Churn$LEAVE[1:10000]==Pred))/10000
missclass
|
035195631873dce8cadd38487120187d9e9d6649 | 841262a212e9272d4270e9e9f6be664e87b009ba | /plot2.r | 4cc0a2df90ea8b525290e1e9d14c1bbda17f39f1 | [] | no_license | abhisheksharma2287/ExData_Plotting1 | a405f2af3ec2979faa405e8ed37960d864734075 | a8897c8bbc1822b7b897fe9378a530ee996384fc | refs/heads/master | 2020-06-12T20:45:49.542520 | 2019-07-04T16:11:43 | 2019-07-04T16:11:43 | 194,420,464 | 0 | 0 | null | 2019-06-29T15:28:46 | 2019-06-29T15:28:46 | null | UTF-8 | R | false | false | 603 | r | plot2.r | library(dplyr)
setwd("c:/Users/Abhis/ExData_Plotting1")
hpc<-read.table(file = "household_power_consumption.txt",header=T,sep = ";",na.strings = "?",dec = ".")
data1 <- subset(hpc, Date %in%c("1/2/2007","2/2/2007"))
time2<-strptime(paste(data1$Date, data1$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
data2<-cbind(data1,time2)
with(data2,plot(data2$time2,data2$Global_active_power,type="l", xlab="", ylab="Global Active Power (kilowatts)"))
png("plot2.png",width = 480,height = 480)
with(data2,plot(data2$time2,data2$Global_active_power,type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
|
57b9af5867acb206386a99a463cd9dc2d814f63e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/agridat/examples/nebraska.farmincome.Rd.R | fe3d60131e097742b5859e5f2e3c3fbbae0d96fc | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,884 | r | nebraska.farmincome.Rd.R | library(agridat)
### Name: nebraska.farmincome
### Title: Nebraska farm income in 2007 by county
### Aliases: nebraska.farmincome
### Keywords: datasets
### ** Examples
data(nebraska.farmincome)
dat <- nebraska.farmincome
if(require("maps") & require("mapproj") & require("latticeExtra")) {
# latticeExtra for mapplot
dat$stco <- paste0('nebraska,', dat$county)
# Scale to million dollars per county
dat <- transform(dat, crop=crop/1000, animal=animal/1000)
# Raw, county-wide incomes. Note the outlier Cuming county
redblue <- colorRampPalette(c("firebrick", "lightgray", "#375997"))
mapplot(stco ~ crop + animal, data = dat, colramp=redblue,
main="nebraska.farmincome",
xlab="Farm income from animals and crops (million $ per county)",
scales = list(draw = FALSE),
map = map('county', 'nebraska', plot = FALSE, fill = TRUE,
projection = "mercator") )
# Now scale to income/mile^2
dat <- within(dat, {
crop.rate <- crop/area
animal.rate <- animal/area
})
# And use manual breakpoints.
mapplot(stco ~ crop.rate + animal.rate, data = dat, colramp=redblue,
main="nebraska.farmincome: income per square mile (percentile breaks)",
xlab="Farm income (million $ / mi^2) from animals and crops",
scales = list(draw = FALSE),
map = map('county', 'nebraska', plot = FALSE, fill = TRUE,
projection = "mercator"),
# Percentile break points
# breaks=quantile(c(dat$crop.rate, dat$animal.rate),
# c(0,.1,.2,.4,.6,.8,.9,1), na.rm=TRUE)
# Fisher-Jenks breakpoints via classInt package
# breaks=classIntervals(na.omit(c(dat$crop.rate, dat$animal.rate)),
# n=7, style='fisher')$brks
breaks=c(0,.049, .108, .178, .230, .519, .958, 1.31))
}
|
d470772b04ae39927653410b5ada5a2e12a24cb7 | c118908b1c8bad0914e38e43f1148b58364accc2 | /man/plot.BchronDensityRunFast.Rd | 9dcfbb5f96e5201ca7a53d11669103e785a40242 | [] | no_license | andrewcparnell/Bchron | baf98d6642a328ba3c83e8fcf2e04b6c0af86974 | faa14f54444e7ec417e0e389596014a1c7645349 | refs/heads/master | 2023-06-27T02:01:46.417288 | 2023-06-08T11:17:34 | 2023-06-08T11:17:34 | 40,361,984 | 30 | 12 | null | 2022-04-05T20:46:28 | 2015-08-07T13:33:16 | R | UTF-8 | R | false | true | 983 | rd | plot.BchronDensityRunFast.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.BchronDensityRunFast.R
\name{plot.BchronDensityRunFast}
\alias{plot.BchronDensityRunFast}
\title{Plot run from \code{BchronDensityFast}}
\usage{
\method{plot}{BchronDensityRunFast}(x, plotDates = TRUE, plotSum = FALSE, dateTransparency = 0.4, ...)
}
\arguments{
\item{x}{Output from \code{\link{BchronDensityFast}}}
\item{plotDates}{Whether to include individual age pdfs (default TRUE)}
\item{plotSum}{Whether to include sum of age pdfs (default FALSE)}
\item{dateTransparency}{The transparency value for the dates (default 0.4)}
\item{...}{Other graphical parameters, see \code{\link{par}}}
}
\description{
Plots output from \code{\link{BchronDensityFast}}
}
\details{
Creates a basic plot of output for a run of \code{\link{BchronDensityFast}}
}
\seealso{
Examples in \code{\link{BchronDensityFast}}, and see \code{\link{BchronDensity}}, for a slower, more accurate version of this function
}
|
b9818d640e4f14b0120443ef91ef48ad5c955a7c | 79f9c6c04654cbb29c27266a3c413cad7b033aab | /02 Shiny/server.R | 56fd3b2eafa3ddd55c99fcdcc19a72bfedb611a5 | [] | no_license | CannataUTDV/s17dvfinalproject-dvproject-5-chi-rambo-xu-zhang | d200b31c610b343f24aaeea9ee41d9f3df10c802 | 681d2e32839621d221f82d119d33b59a3cbe3016 | refs/heads/master | 2021-01-20T02:28:24.008781 | 2017-05-03T18:13:44 | 2017-05-03T18:13:44 | 89,411,203 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,686 | r | server.R | # server.R
require(ggplot2)
require(dplyr)
require(shiny)
require(shinydashboard)
require(data.world)
require(readr)
require(DT)
require(leaflet)
require(plotly)
require(lubridate)
online0 = TRUE
# The following query is for the select list in the Boxplots -> Simple Boxplot tab, and Barcharts -> Barchart with Table Calculation tab.
if(online0) {
locations = query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="select distinct LocationType as D, LocationType as L
from MassShooting
order by 1"
)
# View(locations)
} else {
print("Getting Locations from csv")
file_path = "www/MassShooting.csv"
df <- readr::read_csv(file_path)
tdf1 = df %>% dplyr::distinct(LocationType) %>% arrange(LocationType) %>% dplyr::rename(D = LocationType)
tdf2 = df %>% dplyr::distinct(LocationType) %>% arrange(LocationType) %>% dplyr::rename(L = LocationType)
locations = bind_cols(tdf1, tdf2)
}
location_list <- as.list(locations$D, locations$L)
location_list <- append(list("All" = "All"), location_list)
location_list5 <- location_list
# The following queries are for the Barcharts -> High Fatality Cases tab data.
if(online0) {
# Step 1:
highFatalities <- query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="
SELECT distinct Year, sum(Fatalities) as sumFatalities, sum(NumWeapons) as sumNumWeapons
FROM MassShooting
group by Year
having sum(Fatalities) > 35"
)
# View(highFatalities )
# Step 2
highFatalitiesCase <- query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="
SELECT distinct Year, `Case`, City, State
FROM MassShooting
where year(Year) in (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
order by Year",
queryParameters = highFatalities$Year
)
# View(highFatalitiesCase)
# Step 3
stateAbreviations <- query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="SELECT distinct State, Abbreviation
FROM StateAbbrev
where State in (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
order by State",
queryParameters = highFatalitiesCase$State
)
# View(stateAbreviations )
# Step 4
highFatalitiesCase2 <- left_join(highFatalitiesCase,
stateAbreviations, by="State")
# View(highFatalitiesCase2)
# Step 5
longLat <- query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="SELECT distinct City, State as Abbreviation, Latitude, Longitude
FROM LatLong
where City in (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
order by City",
queryParameters = highFatalitiesCase$City
)
# View(longLat)
# Step 6
highFatalitiesCase2LongLat <-
inner_join(highFatalitiesCase2, longLat, by = c("City", "Abbreviation"))
# View(highFatalitiesCase2LongLat)
# Step 7
fatalities <-
inner_join(highFatalitiesCase2LongLat, highFatalities, by="Year")
# View(fatalities)
}
# The following query is for the Barcharts -> High Total Victims Cases tab data.
if(online0) {
# Step 1:
highFatalities <- query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="
SELECT distinct Year, sum(Fatalities) as sumFatalities
FROM MassShooting
group by Year
having sum(Fatalities) > 35"
)
# View(highFatalities)
# Step 2
totalVictims <- query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="
select `Case`, sum(TotalVictims) as sumTotalVictims
FROM MassShooting
where year(Year) in (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
group by `Case`",
queryParameters = highFatalities$Year
)
# View(totalVictims)
} else {
print("Getting Total Victims from csv")
file_path = "www/MassShooting.csv"
df <- readr::read_csv(file_path)
# Step 1
highFatalities <- df %>% dplyr::group_by(Year) %>% dplyr::summarize(sumFatalities = sum(Fatalities)) %>% dplyr::filter(sumFatalities >= 30)
# View(highFatalities)
# Step 2
totalVictims <- df %>% dplyr::filter(Year %in% highDiscounts$Year) %>% dplyr::select(Case, City, State, Year, TotalVictims) %>% dplyr::group_by(Case, City, State, Year) %>% dplyr::summarise(sumTotalVictims = sum(TotalVictims))
# View(totalVictims)
}
############################### Start shinyServer Function ####################
shinyServer(function(input, output) {
# These widgets are for the Box Plots tab.
online5 = reactive({input$rb5})
output$BoxplotLocations <- renderUI({selectInput("selectedBoxplotLocations", "Choose Locations:", location_list5, multiple = TRUE, selected='All') })
# These widgets are for the Histogram tab.
online4 = reactive({input$rb4})
# These widgets are for the Scatter Plots tab.
online3 = reactive({input$rb3})
# These widgets are for the Crosstabs tab.
online1 = reactive({input$rb1})
FatalRate_Low = reactive({input$KPI1})
FatalRate_Medium = reactive({input$KPI2})
# These widgets are for the Barcharts tab.
online2 = reactive({input$rb2})
output$locations2 <- renderUI({selectInput("selectedLocations", "Choose Locations:", location_list, multiple = TRUE, selected='All') })
# Begin Box Plot Tab ------------------------------------------------------------------
dfbp1 <- eventReactive(input$click5, {
if(input$selectedBoxplotLocations == 'All') location_list5 <- input$selectedBoxplotLocations
else location_list5 <- append(list("Skip" = "Skip"), input$selectedBoxplotLocations)
if(online5() == "SQL") {
print("Getting from data.world")
df <- query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="select Race, TotalVictims, LocationType, Year
from MassShooting
where (? = 'All' or LocationType in (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?))",
queryParameters = location_list5)
}
else {
print("Getting from csv")
file_path = "www/MassShooting.csv"
df <- readr::read_csv(file_path)
df %>% dplyr::select(Race, TotalVictims, LocationType, Year) %>% dplyr::filter(LocationType %in% input$selectedBoxplotLocations | input$selectedBoxplotLocations == "All")
}
})
output$boxplotData1 <- renderDataTable({DT::datatable(dfbp1(), rownames = FALSE,
extensions = list(Responsive = TRUE,
FixedHeader = TRUE)
)
})
dfbp2 <- eventReactive(c(input$click5, input$boxTotalVictimsRange1), {
dfbp1() %>% dplyr::filter(TotalVictims >= input$boxTotalVictimsRange1[1] & TotalVictims <= input$boxTotalVictimsRange1[2])
})
output$boxplotPlot1 <- renderPlotly({
p <- ggplot(dfbp2()) +
geom_boxplot(aes(x=Race, y=TotalVictims, colour=LocationType)) +
ylim(0, input$boxTotalVictimsRange1[2]) +
theme(axis.text.x=element_text(angle=90, size=10, vjust=0.5))
ggplotly(p)
})
# End Box Plot Tab ___________________________________________________________
# Begin Histgram Tab ------------------------------------------------------------------
dfh1 <- eventReactive(input$click4, {
if(online4() == "SQL") {
print("Getting from data.world")
query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="select NumWeapons, MentalIllness, LegalWeapon
from MassShooting"
)
}
else {
print("Getting from csv")
file_path = "www/SuperStoreOrders.csv"
df <- readr::read_csv(file_path)
df %>% dplyr::select(NumWeapons, MentalIllness, LegalWeapon) %>% dplyr::filter(MentalIllness == 'true' & LegalWeapon == 'true')
}
})
output$histogramData1 <- renderDataTable({DT::datatable(dfh1(), rownames = FALSE,
extensions = list(Responsive = TRUE,
FixedHeader = TRUE)
)
})
output$histogramPlot1 <- renderPlotly({p <- ggplot(dfh1()) +
geom_histogram(aes(x=NumWeapons), binwidth = 0.5) +
theme(axis.text.x=element_text(angle=0, size=10, vjust=0.5)) +
xlab("Number of weapons carried") + ylab("Count of cases") +
labs(title = "The count of number of weapons carried by shooters by cases",
subtitle = "The vertical variable is whether the shooters had prior sign of mental illness while the horizontal variable is whether the shooters are leagally carrying the weapons. These two variables together plot 4 graphs.") +
facet_grid(MentalIllness ~ LegalWeapon) +
scale_x_continuous(breaks = seq(0,7,1))
ggplotly(p)
})
# End Histogram Tab ___________________________________________________________
# Begin Scatter Plots Tab ------------------------------------------------------------------
dfsc1 <- eventReactive(input$click3, {
if(online3() == "SQL") {
print("Getting from data.world")
query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="select Fatalities, NumWeapons, State, LocationType
from MassShooting"
)
}
else {
print("Getting from csv")
file_path = "www/MassShooting.csv"
df <- readr::read_csv(file_path)
df %>% dplyr::select(Fatalities, NumWeapons, State, LocationType)
}
})
output$scatterData1 <- renderDataTable({DT::datatable(dfsc1(), rownames = FALSE,
extensions = list(Responsive = TRUE,
FixedHeader = TRUE)
)
})
output$scatterPlot1 <- renderPlotly({p <- ggplot(dfsc1()) +
theme(axis.text.x=element_text(angle=90, size=16, vjust=0.5)) +
theme(axis.text.y=element_text(size=16, hjust=0.5)) +
geom_point(aes(x=Fatalities, y=NumWeapons, colour=LocationType), size=2)
ggplotly(p)
})
# End Scatter Plots Tab ___________________________________________________________
# Begin Crosstab Tab ------------------------------------------------------------------
dfct1 <- eventReactive(input$click1, {
if(online1() == "SQL") {
print("Getting from data.world")
query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="select LocationType, State,
sum(Fatalities) as sumFatalities,
sum(TotalVictims) as sumTotalVictims,
sum(Fatalities) / sum(TotalVictims) as FatalRate,
case
when sum(Fatalities) / sum(TotalVictims) < ? then '03 Low Fatal Rate'
when sum(Fatalities) / sum(TotalVictims) < ? then '02 Medium Fatal Rate'
else '01 High Fatal Rate'
end AS KPI
from MassShooting
where LocationType in ('Workplace', 'Military', 'Religious', 'School')
group by LocationType, State
order by LocationType, State",
queryParameters = list(FatalRate_Low(), FatalRate_Medium())
)
}
else {
print("Getting from csv")
file_path = "www/MassShooting.csv"
df <- readr::read_csv(file_path)
df %>%
dplyr::filter(LocationType %in% c('Workplace', 'Military', 'Religious', 'School')) %>%
dplyr::group_by(LocationType, State) %>%
dplyr::summarize(sumFatalities = sum(Fatalities), sumTotalVictims = sum(TotalVictims),
FatalRate = sum(Fatalities) / sum(TotalVictims),
KPI = if_else(FatalRate <= FatalRate_Low(), '03 Low Fatal Rate',
if_else(FatalRate <= FatalRate_Medium(), '02 Medium Fatal Rate', '01 High Fatal Rate')))
}
})
output$data1 <- renderDataTable({DT::datatable(dfct1(), rownames = FALSE,
extensions = list(Responsive = TRUE, FixedHeader = TRUE)
)
})
output$plot1 <- renderPlot({ggplot(dfct1()) +
theme(axis.text.x=element_text(angle=90, size=16, vjust=0.5)) +
theme(axis.text.y=element_text(size=16, hjust=0.5)) +
geom_text(aes(x=LocationType, y=State, label=sumTotalVictims), size=6) +
geom_tile(aes(x=LocationType, y=State, fill=KPI), alpha=0.50)
})
# End Crosstab Tab ___________________________________________________________
# Begin Barchart Tab ------------------------------------------------------------------
dfbc1 <- eventReactive(input$click2, {
if(input$selectedLocations == 'All') region_list <- input$selectedLocations
else location_list <- append(list("Skip" = "Skip"), input$selectedLocations)
if(online2() == "SQL") {
print("Getting from data.world")
tdf = query(
data.world(propsfile = "www/.data.world"),
dataset="andyzhang/final-project", type="sql",
query="select Race, LocationType, sum(TotalVictims) sumTotalVictims
from MassShooting
where ? = 'All' or LocationType in (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
group by Race, LocationType",
queryParameters = location_list
)
# View(tdf)
}
else {
print("Getting from csv")
file_path = "www/MassShooting.csv"
df <- readr::read_csv(file_path)
tdf = df %>% dplyr::filter(Location %in% input$selectedLocations | input$selectedLocations == "All") %>%
dplyr::group_by(Race, LocationType) %>%
dplyr::summarize(sumTotalVictims = sum(TotalVictims))
}
# The following two lines mimic what can be done with Analytic SQL. Analytic SQL does not currently work in data.world.
tdf2 = tdf %>% group_by(Race) %>% summarize(windowAvgTotalVictims = mean(sumTotalVictims))
# View(tdf2)
dplyr::inner_join(tdf, tdf2, by = "Race")
})
output$barchartData1 <- renderDataTable({DT::datatable(dfbc1(),
rownames = FALSE,
extensions = list(Responsive = TRUE, FixedHeader = TRUE) )
})
output$barchartData2 <- renderDataTable({DT::datatable(fatalities,
rownames = FALSE,
extensions = list(Responsive = TRUE, FixedHeader = TRUE) )
})
output$barchartData3 <- renderDataTable({DT::datatable(totalVictims,
rownames = FALSE,
extensions = list(Responsive = TRUE, FixedHeader = TRUE) )
})
output$barchartPlot1 <- renderPlot({ggplot(dfbc1(), aes(x=LocationType, y=sumTotalVictims)) +
scale_y_continuous(labels = scales::comma) + # no scientific notation
theme(axis.text.x=element_text(angle=0, size=12, vjust=0.5)) +
theme(axis.text.y=element_text(size=12, hjust=0.5)) +
geom_bar(stat = "identity") +
facet_wrap(~Race, ncol=1) +
coord_flip() +
geom_text(mapping=aes(x=LocationType, y=sumTotalVictims, label=round(sumTotalVictims)),colour="black", hjust=-.5) +
geom_text(mapping=aes(x=LocationType, y=sumTotalVictims, label=round(sumTotalVictims - windowAvgTotalVictims)),colour="blue", hjust=-4) +
# Add reference line with a label.
geom_hline(aes(yintercept = round(windowAvgTotalVictims)), color="red") +
geom_text(aes( -1, windowAvgTotalVictims, label = windowAvgTotalVictims, vjust = -.5, hjust = -.25), color="red")
})
output$barchartMap1 <- renderLeaflet({leaflet(data = fatalities) %>%
setView(lng = -98.35, lat = 39.5, zoom = 4) %>%
addTiles() %>%
addMarkers(lng = ~Longitude, lat = ~Latitude,
options = markerOptions(draggable = TRUE, riseOnHover = TRUE),
popup = ~as.character(paste(Case, ", ",
City, ", ",
State, ", ",
" Fatalities: ", sumFatalities, ", ",
" Number of Weapons: ", sumNumWeapons))
)
})
output$barchartPlot2 <- renderPlotly({
# The following ggplotly code doesn't work when sumProfit is negative.
p <- ggplot(totalVictims, aes(x=Case, y=sumTotalVictims)) +
theme(axis.text.x=element_text(angle=90, size=8, vjust=0.5)) +
theme(axis.text.y=element_text(size=8, hjust=0.5)) +
geom_bar(stat = "identity")
ggplotly(p)
# End Barchart Tab ___________________________________________________________
})
})
|
96eb3ba5a5b5314bdd6c46c3153f50045c3b69a0 | 3e74b2d423d7b4d472ffce4ead1605621fb2d401 | /external/external_OLD/R_RJcluster/script_SICLEN.R | f9c0324d1a0451cf7f6bf1f4df873c97daff2f39 | [] | no_license | jamesjcai/My_Code_Collection | 954988ee24c7bd34139d35c880a2093b01cef8d1 | 99905cc5d063918cbe6c4126b5d7708a4ddffc90 | refs/heads/master | 2023-07-06T07:43:00.956813 | 2023-07-03T22:17:32 | 2023-07-03T22:17:32 | 79,670,576 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 179 | r | script_SICLEN.R | library(SICLEN)
X <- as.matrix(read.csv("input.csv", header = FALSE))
#X <- as.matrix(read.table("input.csv", sep=","))
res <- siclen(t(X))
write.csv(res$clusterid,'output.csv')
|
edde5308b7cb5652e3c878072dbbc38b8bc9d10a | 2693a682078fe71bed78997f82b71b82c0abd0ad | /base/utils/R/get.analysis.filenames.r | b67afa272c640eb3b16ad9dea984e4423c74c9a7 | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ashiklom/pecan | 642a122873c9bca4f7ac60f6f260f490f15692e4 | 52bb31866810e2c93ddf540f2065f41ec008627b | refs/heads/develop | 2023-04-01T11:39:16.235662 | 2021-05-24T22:36:04 | 2021-05-24T22:36:04 | 28,980,311 | 3 | 0 | NOASSERTION | 2023-04-01T20:08:15 | 2015-01-08T18:44:03 | R | UTF-8 | R | false | false | 5,096 | r | get.analysis.filenames.r | ##' Generate ensemble filenames
##'
##' @name ensemble.filename
##' @title Generate ensemble filenames
##'
##' @return a filename
##' @export
##'
##' @details Generally uses values in settings, but can be overwritten for manual uses
##' @author Ryan Kelly
ensemble.filename <- function(settings, prefix = "ensemble.samples", suffix = "Rdata",
all.var.yr = TRUE, ensemble.id = settings$ensemble$ensemble.id,
variable = settings$ensemble$variable,
start.year = settings$ensemble$start.year,
end.year = settings$ensemble$end.year) {
if (is.null(ensemble.id) || is.na(ensemble.id)) {
# This shouldn't generally arise, as run.write.configs() appends ensemble.id to
# settings. However,it will come up if running run.write.configs(..., write=F),
# because then no ensemble ID is created in the database. A simple workflow will
# still work in that case, but provenance will be lost if multiple ensembles are
# run.
ensemble.id <- "NOENSEMBLEID"
}
ensemble.dir <- settings$outdir
dir.create(ensemble.dir, showWarnings = FALSE, recursive = TRUE)
if (all.var.yr) {
# All variables and years will be included; omit those from filename
ensemble.file <- file.path(ensemble.dir, paste(prefix, ensemble.id, suffix, sep = "."))
} else {
ensemble.file <- file.path(ensemble.dir, paste(prefix, ensemble.id, variable,
start.year, end.year, suffix, sep = "."))
}
return(ensemble.file)
} # ensemble.filename
##' Generate sensitivity analysis filenames
##'
##' @name sensitivity.filename
##' @title Generate sensitivity analysis filenames
##'
##' @return a filename
##' @export
##'
##' @details Generally uses values in settings, but can be overwritten for manual uses
##' @author Ryan Kelly
sensitivity.filename <- function(settings,
prefix = "sensitivity.samples", suffix = "Rdata",
all.var.yr = TRUE,
pft = NULL,
ensemble.id = settings$sensitivity.analysis$ensemble.id,
variable = settings$sensitivity.analysis$variable,
start.year = settings$sensitivity.analysis$start.year,
end.year = settings$sensitivity.analysis$end.year) {
if(is.null(ensemble.id) || is.na(ensemble.id)) {
# This shouldn't generally arise, as run.write.configs() appends ensemble.id to settings. However,it will come up if running run.write.configs(..., write=F), because then no ensemble ID is created in the database. A simple workflow will still work in that case, but provenance will be lost if multiple ensembles are run.
ensemble.id <- "NOENSEMBLEID"
}
## for other variables, these are just included in the filename so just need to
## make sure they don't crash
if (is.null(variable)) {
variable <- "NA"
}
if (is.null(start.year)) {
start.year <- "NA"
}
if (is.null(end.year)) {
end.year <- "NA"
}
if (is.null(pft)) {
# Goes in main output directory.
sensitivity.dir <- settings$outdir
} else {
ind <- which(sapply(settings$pfts, function(x) x$name) == pft)
if (length(ind) == 0) {
## no match
PEcAn.logger::logger.warn("sensitivity.filename: unmatched PFT = ", pft, " not among ",
sapply(settings$pfts, function(x) x$name))
sensitivity.dir <- file.path(settings$outdir, "pfts", pft)
} else {
if (length(ind) > 1) {
## multiple matches
PEcAn.logger::logger.warn("sensitivity.filename: multiple matchs of PFT = ", pft,
" among ", sapply(settings$pfts, function(x) x$name), " USING")
ind <- ind[1]
}
if (is.null(settings$pfts[[ind]]$outdir) | is.na(settings$pfts[[ind]]$outdir)) {
## no outdir
settings$pfts[[ind]]$outdir <- file.path(settings$outdir, "pfts", pft)
}
sensitivity.dir <- settings$pfts[[ind]]$outdir
}
}
dir.create(sensitivity.dir, showWarnings = FALSE, recursive = TRUE)
if (!dir.exists(sensitivity.dir)) {
PEcAn.logger::logger.error("sensitivity.filename: could not create directory, please check permissions ",
sensitivity.dir, " will try ", settings$outdir)
if (dir.exists(settings$outdir)) {
sensitivity.dir <- settings$outdir
} else {
PEcAn.logger::logger.error("sensitivity.filename: no OUTDIR ", settings$outdir)
}
}
if (all.var.yr) {
# All variables and years will be included; omit those from filename
sensitivity.file <- file.path(sensitivity.dir,
paste(prefix, ensemble.id, suffix, sep = "."))
} else {
sensitivity.file <- file.path(sensitivity.dir,
paste(prefix, ensemble.id, variable, start.year, end.year, suffix, sep = "."))
}
return(sensitivity.file)
} # sensitivity.filename
|
90f8dc93b3e87509ebdc09a173dbee4e978aa4db | ef0c246945c2301c860cfc7ab30443d1d922307e | /R/corr_coeff_andRF_tree.R | 050074652fcff643bf391b14e55466223ee6a1f0 | [] | no_license | tacormier/LCCVP | 3cac058053a54b2c71f952b971cfe1913bad5acb | 1345f06193b438a3ee477ced9a8543fad83274f7 | refs/heads/master | 2020-05-26T17:54:44.447655 | 2015-05-20T18:43:09 | 2015-05-20T18:43:09 | 35,967,013 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,308 | r | corr_coeff_andRF_tree.R | library(raster)
suit.img <- "C:/Share/LCC-VP/Parks/GRSM/analysis/spruce_fir/inputs/clim_srad/COVER_spruce_fir_inPRISM_800m.tif"
biovars <- "C:/Share/LCC-VP/Parks/GRSM/analysis/spruce_fir/inputs/clim_srad/biovars_1981-2010_srad_masked.tif"
rf <- load("C:/Share/LCC-VP/Parks/GRSM/analysis/spruce_fir/outputs/clim_srad/rf_model_Spruce_fir.RData")
p.names <- "C:/Share/LCC-VP/Parks/GRSM/analysis/common_inputs/GRSM_var_names_clim_srad.txt"
s.img <- getValues(raster(suit.img))
p.img <- as.data.frame(getValues(brick(biovars)))
names(p.img) <- pred.names
cor.preds <- as.data.frame(sapply(p.img, cor, s.img, use="complete.obs"))
names(cor.preds) <- "R - Spruce Fir"
write.csv(cor.preds, file="C:/Share/LCC-VP/Parks/GRSM/analysis/spruce_fir/outputs/clim_srad/Spruce-fir_corr_coeffs.csv")
pred.names <- read.table(p.names, sep="\n")[,1]
tree <- as.data.frame(getTree(randfor,k=500))
#str(tree)
names(tree)[3] <- "split_var"
count=1
for (i in tree$split_var) {
#print(i)
if (i != 0) {
tree$split_var[count] <- as.character(pred.names[i])
} else {
tree$split_var[count] <- "NA - Terminal Node"
}#end if
count <- count + 1
} #end for loop
write.csv(tree, file="C:/Share/LCC-VP/Parks/GRSM/analysis/spruce_fir/outputs/clim_srad/spruce-fir_rf_tree.csv") |
cbff56d427b41d13b0326bda25f4d28274c90e41 | b5d9e7a1bd35b19119e4b0d86957b657302067e3 | /smv.R | 3c4f136da3672a19d2ed1e4baf541f7e3ed0edc0 | [] | no_license | benilak/CS450 | 88ccd40c146399ec2f1d7522cdb39f59b3f1848c | 77256c58885bb04ba324677b0cd2cdd34cee8752 | refs/heads/master | 2021-05-12T12:04:33.962836 | 2018-04-12T06:59:22 | 2018-04-12T06:59:22 | 117,402,820 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,686 | r | smv.R | # CS450 - SVM
library(e1071)
# =============================================================================
vowel <- read.csv('C:\\Users\\brega\\OneDrive\\Desktop\\CS450\\vowel.csv')
# encode categorical features as numeric
vowel$Speaker <- as.numeric(vowel$Speaker)
vowel$Sex <- as.numeric(vowel$Sex)
# scale the data to the range 0 to 1 (pardoning the class column)
vowel[1:12] <- apply(vowel[1:12], 2, function(x) (x - min(x))/(max(x)-min(x)))
# information on the vowel data set tells us it was meant to be split such that
# speakers 1-8 comprise the training set and 9-15 comprise the testing set,
# so that's what we'll do
voweltrain <- vowel[1:(990*8/15),]
voweltest <- vowel[(990*8/15+1):990,]
# choose a range of parameters for C and gamma
voweltuned <- tune.svm(Class~., data = voweltrain,
gamma = 10^seq(-10, 10, 5), cost = 10^(-2:2))
voweltuned$best.performance
summary(voweltuned) # (gamma = 1, cost = 100)
# refine parameters based on the best performance
voweltuned2 <- tune.svm(Class~., data = voweltrain,
gamma = 10^(-2:2), cost = 10^seq(1, 3, 0.5))
voweltuned2$best.performance
summary(voweltuned2) # (gamma = 0.1, cost = 10^1.5)
# interestingly, we see the same performance for gamma = 0.1 no matter what
# the cost parameter is.
# upon further inspection, it seems we only want gamma = 0.1 and cost >= 1
# refine one more time ()
voweltuned3 <- tune.svm(Class~., data = voweltrain,
gamma = 10^seq(-2, 0, 0.1), cost = 10)
voweltuned3$best.performance
summary(voweltuned3) # same performance for gamma = 10^c(-1.0, -0.9, -0.8)...
# let's choose gamma = 0.1, cost = 10 and build the model using the RBF kernel
vowelmodel <- svm(Class~., data = voweltrain,
kernel = "radial", gamma = 0.1, cost = 10)
summary(vowelmodel)
# let the model predict classes
vowelpredict <- predict(vowelmodel, voweltest[,-13])
voweltab <- table(pred = vowelpredict, true = voweltest$Class)
voweltab
vowelagreement <- vowelpredict == voweltest$Class
vowelaccuracy <- prop.table(table(vowelagreement))
vowelaccuracy # 62%
# for such low errors in the grid search process, I'm rather disappointed
# =============================================================================
letters.df <- read.csv('C:\\Users\\brega\\OneDrive\\Desktop\\CS450\\letters.csv')
# no encoding necessary - all numeric data except for the class attribute
# the page info for the letters data set indicates it is already scaled
# "We typically train on the first 16000 items and then use the resulting
# model to predict the letter category for the remaining 4000"
letterstrain <- letters.df[1:16000,]
letterstest <- letters.df[16001:20000,]
# choose a range of parameters for C and gamma
letterstuned <- tune.svm(letter~., data = letterstrain,
gamma = 10^seq(-10, 10, 5), cost = 10^(-2:2))
# this is taking over an hour to process, so let's pretend we got some output
summary(lettersmodel)
letterstuned$best.performance
summary(letterstuned) # gamma = 1, cost = 1
# choose gamma = 1 and cost = 1 for the RBF model
lettersmodel <- svm(letter~., data = letterstrain,
kernel = "radial", gamma = 1, cost = 1)
summary(lettersmodel)
# let the model predict classes
letterspredict <- predict(lettersmodel, letterstest[,-1])
letterstab <- table(pred = letterspredict, true = letterstest$letter)
letterstab
lettersagreement <- letterspredict == letterstest$letter
lettersaccuracy <- prop.table(table(lettersagreement))
lettersaccuracy
# look at that it's 100% accuracy
|
ba9c51174547017dd56176ca5f5b6601186f2892 | 658650485ab0ef0cefb6ab0e2812cb76addb477f | /R/spocc-package.R | e07cdc571949b1141d9c0cfb54e913563d423af3 | [
"MIT"
] | permissive | jhollist/spocc | bb6fcaae82d6bc1ac8f28477f1e1c88e2fc98f95 | 9d6dc976129ddb63608a493a6fb9a8039bddfab6 | refs/heads/master | 2021-01-17T18:05:22.342694 | 2015-03-23T18:43:04 | 2015-03-23T18:43:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 848 | r | spocc-package.R | #' You don't need API keys for any of the data providers thus far. However, some
#' may be added in the future that require authentication, just FYI.
#'
#' Currently supported species occurrence databases:
#'
#' \tabular{ll}{
#' Provider \tab Web \cr
#' GBIF \tab \url{http://www.gbif.org/} \cr
#' BISON \tab \url{http://bison.usgs.ornl.gov/} \cr
#' iNaturalist \tab \url{http://www.inaturalist.org/} \cr
#' Berkeley ecoengine \tab \url{https://ecoengine.berkeley.edu/} \cr
#' AntWeb \tab \url{http://www.antweb.org/} \cr
#' }
#'
#' @name spocc-package
#' @aliases spocc
#' @docType package
#' @title R interface to many species occurrence data sources
#' @author Scott Chamberlain \email{myrmecocystus@@gmail.com}
#' @author Karthik Ram \email{karthik.ram@@gmail.com}
#' @author Ted Hart \email{edmund.m.hart@@gmail.com}
#' @keywords package
NULL
|
f58ef73d818637f28b346cb15a7aa72591126276 | 97a01ddeb2d82e7ee9473c0f8f652ce2f2c487a5 | /R/get_google_sheet_named_version.R | 2a6ed1c0c020f1e0d732b9525123898e6abc4225 | [] | no_license | langcog/metalabr | cb7086abc506544808b82fb10785502ff1937018 | ad2480e1ed83f48052a35171e7e2cc6e8eb978e7 | refs/heads/master | 2023-07-21T18:10:57.181620 | 2021-09-04T07:16:46 | 2021-09-04T07:16:46 | 281,990,517 | 2 | 1 | null | 2021-09-04T06:27:37 | 2020-07-23T15:35:26 | R | UTF-8 | R | false | false | 357 | r | get_google_sheet_named_version.R | get_google_sheet_named_version <- function(sheet_id) {
sheet_url <- paste0("https://docs.google.com/spreadsheets/d/",
sheet_id,
"/revisions/tiles?id=",
sheet_id,
"&start=1&showDetailedRevisions=false&filterNamed=true")
httr::GET(sheet_url) %>% content("text")
}
|
2a47d888c42586ef99f4761a2f752704ba504b89 | 2d42639303d9726b8aab4e6567b196d6c6098798 | /Plot1.R | b6c5e14c2f36cf9c1b3394685c0dcb16d285dc8c | [] | no_license | marcoiai/exploratory | 893c8b87baf9fa24c0f93a55750f39eda5dc394f | c2da6f6b916b5321c03ac30c02c3cc2bf3db7afc | refs/heads/master | 2021-01-21T07:39:00.948555 | 2015-06-07T22:24:50 | 2015-06-07T22:24:50 | 36,990,153 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 379 | r | Plot1.R |
f <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?")
filtered <- na.omit(f[as.Date(f$Date, "%d/%m/%Y") %in% as.Date(c('2007-02-01', '2007-02-02')),])
hist(as.double(as.character(filtered$Global_active_power)), col = "red", breaks = 25, main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png")
dev.off()
|
5c80adcba3a91f47646474cdc288b313bb5daa25 | 098841409c03478ddae35c4cdf6367cfd65fa3bf | /code/perf/10xcellline/saverx.R | ecad3bd35e5841e64f6e54c539c9c78006b1164a | [] | no_license | wangdi2016/imputationBenchmark | 0281746b482788c347faf9d96e8288639ba388a6 | 0881121444975cd0a3ee2ce69aaec46c3acd7791 | refs/heads/master | 2023-07-29T10:16:14.004610 | 2021-09-09T20:00:43 | 2021-09-09T20:00:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,092 | r | saverx.R | bexpr = readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/data/bulkrna/expr/jurkat_hex.rds')
v1 = rowMeans(bexpr[,which(colnames(bexpr)=='239T')])
v2 = rowMeans(bexpr[,which(colnames(bexpr)=='Jurkat')])
bexpr = cbind(v1,v2)
colnames(bexpr) = c('293T','jurkat')
allmtd = list.files('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/10xcellline/')
mtd = 'saverx'
if (length(list.files(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/10xcellline/',mtd))) != 0){
sexpr = readRDS(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/10xcellline/',mtd,'/hg19.rds'))
scl = sub('_.*','',colnames(sexpr))
scl = sub('X293T','293T',scl)
res <- lapply(colnames(bexpr), function(cl){
be <- bexpr[,cl]
intgene <- intersect(row.names(sexpr),names(be))
apply(sexpr[intgene,which(scl == cl)],2,cor,be[intgene],method='spearman')
})
names(res) <- colnames(bexpr)
saveRDS(res,file=paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/perf/10xcellline_cor/',mtd,'.rds'))
}
|
7e2184396f5477f252dc2e27dab879a8db9fcd1a | bb44cee2ab788c6315d03500208ce8905a19c46f | /gen3sdk/R/Gen3Indexd.R | 9c067f818cd9f85e180868d944845454381eaf2f | [
"Apache-2.0"
] | permissive | uc-cdis/gen3sdk-R | 4e4519867c1f65c8a6c0a1b77b47b3ee26b434d1 | 7ae8afc99a198ca317994bd3b545142b474db244 | refs/heads/master | 2021-07-04T08:03:47.355592 | 2020-10-26T20:26:22 | 2020-10-26T20:26:22 | 191,998,871 | 1 | 2 | Apache-2.0 | 2020-10-26T20:26:23 | 2019-06-14T19:57:51 | HTML | UTF-8 | R | false | false | 11,712 | r | Gen3Indexd.R | library(httr)
library(jsonlite)
library(readr)
Gen3Indexd <- setRefClass("Gen3Indexd",
#' @field endpoint
#' @title
#' CRUD operations from a Gen3 indexd system.
#' @description
#' A class for interacting with the Gen3 Indexd services.
#' @param
#' Args:
#' endpoint (str): The URL of the data commons.
#' auth_provider (Gen3Auth): A Gen3Auth class instance.
#' @usage
#' Examples:
#' This generates the Gen3DIndexd class pointed at the sandbox commons while
#' using the credentials.json downloaded from the commons profile page.
#' >>> endpoint <- "https://nci-crdc-demo.datacommons.io"
#' ... auth <- Gen3AuthHelper(endpoint, refresh_file="credentials.json")
#' ... ind <- Gen3Indexd(endpoint, auth)
fields = list(
endpoint = "character",
auth_provider = "Gen3AuthHelper"
),
methods = list(
initialize = function(endpoint, auth_provider) {
.self$endpoint <- endpoint
.self$auth_provider <- auth_provider
},
get_system_status = function() {
#' @description
#' Returns if IndexD is healthy or not
#' @usage
#' Examples:
#' >>> ind.get_system_status()
api_url <- paste(endpoint, "/index/_status", sep = "")
output <- GET(api_url)
return (output)
},
get_system_version = function() {
#' @description
#' Returns the version of IndexD
#' @usage
#' Examples:
#' >>> ind.get_system_version()
api_url <- paste(endpoint, "/index/_version", sep = "")
output <- GET(api_url)
return (output)
},
get_system_stats = function() {
#' @description
#' Returns basic information about the records in IndexD
#' @usage
#' Examples:
#' >>> ind.get_system_stats()
api_url <- paste(endpoint, "/index/_stats", sep = "")
output <- GET(api_url)
return (output)
},
get_global_guid = function(guid) {
#' @description
#' Get the metadata associated with the given id, alias, or distributed identifer
#' @param
#' Args:
#' guid (str): The guid of the record to retrieve
#' @usage
#' Examples:
#' This retrieves the metadata for guid
#' >>> ind.get_global_guid(guid)
api_url <- paste(endpoint, "/index/", guid, sep = "")
output <- GET(api_url)
return (output)
},
get_global_urls = function(size = NULL, hash = NULL, ids = NULL) {
#' @description
#' Get a list of urls that match query params
#' @param
#' Args:
#' size (int): The object size of the record to retrieve
#' hash (str): The hashes specified as algorithm:value of the record to retrieve
#' ids (str): The ids, comma delimited, of the record to retrieve
#' @usage
#' Examples:
#' This retrieves the urls with the filters size, ids
#' >>> ind.get_global_urls(size, ids)
api_url <- paste(endpoint, "/index/urls", sep = "")
output <- GET(api_url, query = list(size = size, hash = hash, ids = ids))
return (output)
},
post_index = function(body) {
#' @description
#' Add a new entry to the index
#' @param
#' Args:
#' body (object): The json-R-object of the record to create
#' @usage
#' Examples:
#' This adds a new entry in the sandbox index
#' >>> ind.post_index(body)
auth_token <- auth_provider$get_auth_value()
body$authz <- list(body$authz)
json_body <- toJSON(body, auto_unbox = TRUE)
api_url <- paste(endpoint, "/index/index", sep = "")
output <- POST(api_url, add_headers(Authorization = auth_token),
content_type('application/json'), body = json_body, encode = 'json')
return (output)
},
get_index = function(urls_meta = NULL, meta = NULL, size = NULL, hash = NULL, uploader = NULL,
ids = NULL, urls = NULL, acl = NULL, authz = NULL, negate_params = NULL) {
#' @description
#' Get a list of all records
#' @param
#' Args:
#' urls_metatdata (str): The urls_metadata, JSON string format, of the record to retrieve
#' metatdata (str): The metatdata, in format key:value, of the record to retrieve
#' size (int): The object size of the record to retrieve
#' hash (str): The hashes specified as algorithm:value of the record to retrieve
#' uploader (str): The uploader id of the reecord to retrieve
#' ids (str): The ids, comma delimited, of the record to retrieve
#' urls (str): The urls, comma delimited, of the record to reetrieve
#' acl (str): The acl, comma delimited, of the record to retrieve
#' authz (str): The authz, comma delimited, of the record to retrieve
#' negate_params (str): The negate params, JSON string format, of the record to retrieve
#' start (str): The start did of the record to retrieve
#' limit (str): The number of records to return for this page, default to 100
#' @usage
#' Examples:
#' This retrieves the records with the size filter
#' >>> ind.get_index(size)
api_url <- paste(endpoint, "/index/index", sep = "")
output <- GET(api_url, query = list(urls_metadata = urls_meta, metadata = meta, size = size, hash = hash,
uploader = uploader, ids = ids, urls = urls, acl = acl, authz = authz, negate_params = negate_params))
return (output)
},
get_index_guid = function(guid) {
#' @description
#' Get the metadata associated with the given id
#' @param
#' Args:
#' guid (str): The guid of the record to retrieve
#' @usage
#' Examples:
#' This retrieves the metadata for guid
#' >>> ind.get_index_guid(guid)
api_url <- paste(endpoint, "/index/index/", guid, sep = "")
output <- GET(api_url)
return (output)
},
post_index_guid = function(guid, body) {
#' @description
#' Add a new version for the document associated to the provided uuid
#' @param
#' Args:
#' guid (str): The uuid associated to the record needed to have new verison
#' body (object): The json-R-object of the record to create
#' @usage
#' Examples:
#' This adds a new verion of the document anchored by baseid
#' >>> ind.post_index_guid(guid, body)
auth_token <- auth_provider$get_auth_value()
body$authz <- list(body$authz)
json_body <- toJSON(body, auto_unbox = TRUE)
api_url <- paste(endpoint, "/index/index/", guid, sep = "")
output <- POST(api_url, content_type("application/json"), add_headers(Authorization = auth_token), body = json_body)
return (output)
},
put_index_guid = function(guid, rev, body) {
#' @description
#' Update an existing entry in the index
#' @param
#' Args:
#' guid (str): The uuid associated to the record needed to update
#' rev (str): The data revision associated with the record to update
#' body (object): The json-R-object of the index record that needs to be updated
#' @usage
#' Examples:
#' This updates the record
#' >>> ind.put_index_guid(guid, rev, body)
auth_token <- auth_provider$get_auth_value()
body$authz <- list(body$authz)
body$urls <- list(body$urls)
json_body <- toJSON(body, auto_unbox = TRUE)
api_url <- paste(endpoint, "/index/index/", guid, sep = "")
output <- PUT(api_url, add_headers(Authorization = auth_token), content_type("application/json"), query = list(rev = rev), body = json_body)
return (output)
},
delete_index_guid = function(guid, rev) {
#' @description
#' Deletes an entry from the index
#' @param
#' Args:
#' guid (str): The uuid associated to the record needed to delete
#' rev (str): The data revision associated with the record to delete
#' @usage
#' Examples:
#' This deletes the record
#' >>> ind.delete_index_guid(guid, rev)
auth_token <- auth_provider$get_auth_value()
api_url <- paste(endpoint, "/index/index/", guid, sep = "")
output <- DELETE(api_url, add_headers(Authorization = auth_token), query = list(rev = rev))
return (output)
},
post_bulk_documents = function(dids) {
#' @description
#' Get a list of documents given a list of dids
#' @param
#' Args:
#' dids (str): List of dids to retrive
#' @usage
#' Examples:
#' Retrieves documents associated with dids
#' >>> ind.post_bulk_documents(guid, rev)
api_url <- paste(endpoint, "/index/bulk/documents", sep = "")
output <- POST(api_url, body = dids, encode = 'json')
return (output)
},
get_index_guid_latest = function(guid, has_version) {
#' @description
#' Get the metadata of the latest index record version associated with the given id
#' @param
#' Args:
#' guid (str): The guid of the record to retrieve
#' has_version (bool): Filter by the latest doc that has version value populated
#' @usage
#' Examples:
#' Retrieves latest metadata associated with guid
#' >>> ind.get_index_guid_latest(guid, has_version)
api_url <- paste(endpoint, "/index/", guid, "/latest", sep = "")
if (missing(has_version)) {
output <- GET(api_url)
return (output)
} else {
output <- GET(api_url, query = list(has_version = has_version))
return (output)
}
},
get_index_guid_versions = function(guid) {
#' @description
#' Get the metadata of index record versions associated with the given id
#' @param
#' Args:
#' guid (str): The guid of the record to retrieve
#' @usage
#' Examples:
#' Retrieves metadata associated with guid
#' >>> ind.get_index_guid_versions(guid)
api_url <- paste(endpoint, "/index/", guid, "/versions", sep = "")
output <- GET(api_url)
return (output)
},
get_query_urls = function(exclude = NULL, include = NULL, versioned = FALSE, limit = 100, offset = 0) {
#' @desciption
#' Search index records by urls
#' @param
#' Args:
#' exclude (str): search for documents without a single URL that match this pattern
#' include (str): search for documents with at least one URL that match this pattern
#' versioned (bool): if true search with a version set, else search documents without version
#' limit (int): maximum rows to return
#' offset (int): pointer position to start search
#' @usage
#' Examples:
#' Retrieves index records by url with include filter
#' >>> ind.get_query_urls(include)
api_url <- paste(endpoint, "/_query_urls/q", sep = "")
output <- GET(api_url, query = list(exclude = exclude, include = include, versioned = versioned,
limit = limit, offset = offset))
return (output)
},
get_query_urls_metadata = function(key, value, url = NULL, versioned = FALSE, limit = 100, offset = 0) {
#' @description
#' Search index records by urls metadata key and value
#' @param
#' Args:
#' key (str): metadata key to search by
#' value (str): metadata value for provided key
#' url (str): URL pattern to filter by
#' versioned (bool): if true search with a version set, else search documents without version
#' limit (int): maximum rows to return
#' offset (int): pointer position to start search
#' @usage
#' Examples:
#' Retrieves index records by urls metadata
#' >>> ind.get_query_urls_metadata(key, value)
api_url <- paste(endpoint, "/_query_urls/metdata/q", sep = "")
output <- GET(api_url, query = list(key = key, value = value, url = url, versioned = versioned,
limit = limit, offset = offset))
return (output)
}
)
)
|
ab594db538c07c303f42550f25f41ea88dbfe65b | 8c5886ee0d4a2e0a22d489c29c9a78a15797d8cb | /NN1.R | 7a23efd525dd3e765180a9d39adc75e20ac2fccb | [] | no_license | vincent101/KNN | a7eb2507f683bfcb3119d15c6ae37e1e57bc09c7 | 3243e0dab562174d8206e8d34932e710b86ceba9 | refs/heads/master | 2021-01-09T21:53:32.188182 | 2015-11-06T01:47:23 | 2015-11-06T01:47:23 | 44,888,109 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,599 | r | NN1.R | # iris 1nn eu
source("s_knn.r")
iris <- read.csv("iris.txt")
train <- iris[1:70,]
train.X <- iris[1:70,-5]
train.Y <- iris[1:70,5]
test <- iris[71:99,]
test.X <- iris[71:99,-5]
test.Y <- iris[71:99,5]
predicted.Y <- c()
#print(test.Y)
#print(predicted.Y)
for(i in 1:nrow(test.X)){
min = 9999999
jmin = 0
for(j in 1:nrow(train.X)){
d = dist_eu(train.X[j,], test.X[i,])
if (d < min){
min = d
jmin = j
}
}
predicted.Y[i] = train.Y[jmin]
}
#t = table(predicted.Y, test.Y)
#percentage = (t[1,1]+t[2,2])/length(test.Y)
percentage = mean(predicted.Y == test.Y)
print("Use Euclidean metrics in 1-NN, the test.Y is:")
print(test.Y)
print("the predicted.Y is:")
print(predicted.Y)
#print("the table of predicted.Y and test.Y is:")
#print(t)
print("the percentage of correct predictions is:", percentage)
print(percentage)
#apply(train.X, 1, dist_eu, y=test.X[1,])
#d = cbind(apply(train.X, 1, dist_eu, y=test.X[1,]),train[,5])
#o = order(d[,1])
#d1 <- d[o,]
#predict_lable =
#dist_eu(train.X[1,], test.X[1,])
# iris 1nn eu END
############################
# iris 3nn eu
source("s_knn.r")
iris <- read.csv("iris.txt")
train <- iris[1:70,]
train.X <- iris[1:70,-5]
train.Y <- iris[1:70,5]
test <- iris[71:99,]
test.X <- iris[71:99,-5]
test.Y <- iris[71:99,5]
predicted.Y <- c()
#print(test.Y)
#print(predicted.Y)
for(i in 1:nrow(test.X)){
min1 = 9999999
min2 = 9999999
min3 = 9999999
jmin1 = 0
jmin2 = 0
jmin3 = 0
for(j in 1:nrow(train.X)){
d = dist_eu(train.X[j,], test.X[i,])
if (d < min1){
min3 = min2
jmin3 = jmin2
min2 = min1
jmin2 = jmin1
min1 = d
jmin1 = j
}
}
if(train.Y[jmin1]==train.Y[jmin2]){
predicted.Y[i] = train.Y[jmin1]
}
else{
predicted.Y[i] = train.Y[jmin3]
}
}
#t = table(predicted.Y, test.Y)
#percentage = (t[1,1]+t[2,2])/length(test.Y)
percentage = mean(predicted.Y == test.Y)
print("Use Euclidean metrics in 3-NN, the test.Y is:")
print(test.Y)
print("the predicted.Y is:")
print(predicted.Y)
#print("the table of predicted.Y and test.Y is:")
#print(t)
print("the percentage of correct predictions is:", percentage)
print(percentage)
# iris 3nn eu END
############################
# ionosphere 1nn eu
source("s_knn.r")
ionosphere <- read.csv("ionosphere.txt")
train <- ionosphere[1:200,]
train.X <- ionosphere[1:200,-35]
train.Y <- ionosphere[1:200,35]
test <- ionosphere[201:350,]
test.X <- ionosphere[201:350,-35]
test.Y <- ionosphere[201:350,35]
predicted.Y <- c()
#print(test.Y)
#print(predicted.Y)
for(i in 1:nrow(test.X)){
min = 9999999
jmin = 0
for(j in 1:nrow(train.X)){
d = dist_eu(train.X[j,], test.X[i,])
if (d < min){
min = d
jmin = j
}
}
predicted.Y[i] = train.Y[jmin]
}
#t = table(predicted.Y, test.Y)
#percentage = (t[1,1]+t[2,2])/length(test.Y)
percentage = mean(predicted.Y == test.Y)
print("Use Euclidean metrics in 1-NN, the test.Y is:")
print(test.Y)
print("the predicted.Y is:")
print(predicted.Y)
#print("the table of predicted.Y and test.Y is:")
#print(t)
print("the percentage of correct predictions is:", percentage)
print(percentage)
# ionosphere 1nn eu END
############################
# ionosphere 3nn eu
source("s_knn.r")
ionosphere <- read.csv("ionosphere.txt")
train <- ionosphere[1:200,]
train.X <- ionosphere[1:200,-35]
train.Y <- ionosphere[1:200,35]
test <- ionosphere[201:350,]
test.X <- ionosphere[201:350,-35]
test.Y <- ionosphere[201:350,35]
predicted.Y <- c()
#print(test.Y)
#print(predicted.Y)
for(i in 1:nrow(test.X)){
min1 = 9999999
min2 = 9999999
min3 = 9999999
jmin1 = 0
jmin2 = 0
jmin3 = 0
for(j in 1:nrow(train.X)){
d = dist_eu(train.X[j,], test.X[i,])
if (d < min1){
min3 = min2
jmin3 = jmin2
min2 = min1
jmin2 = jmin1
min1 = d
jmin1 = j
}
}
if(train.Y[jmin1]==train.Y[jmin2]){
predicted.Y[i] = train.Y[jmin1]
}
else{
predicted.Y[i] = train.Y[jmin3]
}
}
#t = table(predicted.Y, test.Y)
#percentage = (t[1,1]+t[2,2])/length(test.Y)
percentage = mean(predicted.Y == test.Y)
print("Use Euclidean metrics in 3-NN, the test.Y is:")
print(test.Y)
print("the predicted.Y is:")
print(predicted.Y)
#print("the table of predicted.Y and test.Y is:")
#print(t)
print("the percentage of correct predictions is:", percentage)
print(percentage)
# ionosphere 3nn eu END
|
0e43d75534d0c8e7504137090acb0e658042e48a | bceb55dba86be7d656a59d3ca3674f8266f18025 | /Curbelo_SWD_script1.R | cdd664f104461567089b10321bacad70b4e0f535 | [] | no_license | jbkoch/Curbeloetal_SWD | 80266fbc5aa002b8b94e5dcacf7ae918befd1e94 | ed59591e9fcbefe006bb4edbe77a9706fdd163d6 | refs/heads/master | 2020-12-03T01:02:03.347157 | 2020-01-01T02:49:47 | 2020-01-01T02:49:47 | 231,167,876 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,538 | r | Curbelo_SWD_script1.R | # Curbelo et al. 2020
# D. suzukii in Hawaii Forest
# 09 December 2019
# getwd
setwd("~/Google Drive/Manuscripts/Curbeloetal_HawaiiDsuzukii/suzukii_analysis/")
# Load libraries
library(car) # diagnostics
library(MuMIn) #model selection
library(date) #for julian date
# list files
list.files()
# set data
df <- read.csv("HavoFinal.csv")
names(df)
# create julian date
df$Date <- mdy.date(df$Mon0, df$Day0, df$Year0)
tmp <- as.POSIXlt(df$Date, format = "%d%b%y")
df$Julianday <- tmp$yday
# create end julian date
df$Date_end <- mdy.date(df$Mon1, df$Day1, df$Year1)
tmp <- as.POSIXlt(df$Date_end, format = "%d%b%y")
df$Julianday_end <- tmp$yday
# export data
write.csv(df, "HavoFinal_v3.csv")
# GLM
test.glm <-glm(df$Suzukii.Total~df$Elevation+df$Site.Type+df$Julianday+df$Day_out,
family = poisson(), na.action = "na.fail")
test.null <- glm(df$Suzukii.Total~1, family = poisson)
summary(test.glm)
# how many residuals
test.glm$residuals #140
test.glm$deviance/test.glm$df.residual # 16.68396; huge overdispersion
influence.measures(test.glm) # 23, 29, 43, 65, 107, 113, 115, 125, 127, 129, 131, 133; n = `1`
options(max.print = 2000)
plot(test.glm)
# Compare a change in the parameter estimates when observations 50,124 is removed from the model
# use negative sign to indicate you want to update the frog.glm model with observation 47 removed
# not a large change suggesting that inclusion is not a problem
# first round, 12 observations removed, therefore 128 records retained...91% of data used.
compareCoefs(test.glm, update(test.glm, subset=-c(23,29, 43, 65, 107, 113,
115, 125, 127, 129, 131,
133)))
# import new data and run model
df <- read.csv("HavoFinal_2.csv")
nrow(df)
# Generate Julian Day
# create julian date
df$Date <- mdy.date(df$Mon0, df$Day0, df$Year0)
tmp <- as.POSIXlt(df$Date, format = "%d%b%y")
df$Julianday <- tmp$yday
# create end julian date
df$Date_end <- mdy.date(df$Mon1, df$Day1, df$Year1)
tmp <- as.POSIXlt(df$Date_end, format = "%d%b%y")
df$Julianday_end <- tmp$yday
# Examines the fit of the model compared to a null model
## See large difference with summary output but this provides a p-value
anova(test.glm, test.null, test = "Chisq") # significantly different than null
dd <- dredge(test.glm)
dd
### Don't necessarily need to account for overdispersion, but for practice
# Notice that dispersion parameter is set at 2.39
# notice that AIC is set to NA bc log likelihood is not calculated
over.testGLM <- update(test.glm, family=quasipoisson)
summary(over.testGLM)
# function to be used as an argument to extract overdispersion parameter from a model
## (see below example of how it can be used as an argument)
dfun <- function(object) {with(object,sum((weights * residuals^2)[weights > 0])/df.residual)}
# Model Selection using QAICc with function dredge
## see quasi model selection in R_Boelker.pdf for more details and how to do in other packages
# must change the family name to x.quaasipoisson for MuMIn package
## from ?QAIC - A 'hacked' constructor for quasibinomial family object,
### that allows for ML estimation
x.quasipoisson <- function(...) {res <- quasipoisson(...)
res$aic <- poisson(...)$aic
res}
# Update model to include new family = x.quasipoisson
ms.over.testGLM <- update(over.testGLM, family = "x.quasipoisson",
na.action=na.fail)
# Examine all possible combinations of models based on parameters in global model
## Asking R to use the Quasi likelihood which incorportates variance inflation factor
## also use rank = "QAIC"
## chat = overdispersion parameter -- extracting the value from the original model
dd <- dredge(ms.over.testGLM, rank = "QAICc", chat=dfun(test.glm))
dd
# shown only to illistrate how function is calculated specfic for this example
dfun.test <- function(test.glm) {with(teset.glm,sum((weights * residuals^2)[weights > 0])/df.residual)}
# also just indicate the value for overdispersion based on the output for over.redstartGLM model
# same as dd -- shown as an example
dd2 <- dredge(ms.over.testGLM, rank = "QAICc", chat=11.33288)
dd2
write.csv(dd2, "dd2_quasipoison.csv")
# Model Average Parameter estimates for subset of models
# see MuMln package for details
# use model-avg coefficients list and Std. Error = unconditional se / ignore p-values
top.models <- get.models(dd2, subset = delta <2)
x <-model.avg(top.models)
coefs <- model.avg(dd, subset = delta < 4, revised.var= T)
summary(coefs) # examine full average only, notice the relative variable importance values here!
coefs <-top.models$`15`$coefficients # isolate just parameter estimates
test.glm <-glm(df$Suzukii.Total~df$Elevation+df$Site.Type+df$Julianday,
family = poisson(), na.action = "na.fail")
# Create plots
# forested site and lava site
nrow(df)
df.for <-df[ which(df$habitat=="Forested"),]
df.lav <-df[ which(df$habitat=="Non-forested"),]
# Figure 1 was made in ArcGIS
# Figure 2 generation
tiff(filename = "/Users/jonathankoch/Google Drive/Manuscripts/Curbeloetal_HawaiiDsuzukii/suzukii_analysis/Fig2.tiff",width = 2000, height = 1000,
units = "px", pointsize =12, res = 300)
par(mfrow=c(1,2))
plot(df.for$Suzukii.Total~df.for$Elevation, pch = 16, xlab = "Altitude (m)",
ylab = "D. suzukii abundance")
points(df.lav$Suzukii.Total~df.lav$Elevation, pch = 15, col = "gray")
boxplot(df$Suzukii.Total~df$habitat, xlab = "Habitat type",
ylab = "D. suzukii abundance")
dev.off() |
c6923ac010c93923fb09277620f7239a6547c09f | 932dba523258a20ba386695ed34a6f91da4688c7 | /R/term_before.R | eb17d57205d122d27e4205132c30975f867549cd | [] | no_license | trinker/termco | 7f4859a548deb59a6dcaee64f76401e5ff616af7 | aaa460e8a4739474f3f242c6b2a16ea99e1304f5 | refs/heads/master | 2022-01-18T23:43:46.230909 | 2022-01-05T19:06:43 | 2022-01-05T19:06:43 | 39,711,923 | 27 | 4 | null | 2018-02-02T05:57:38 | 2015-07-26T03:17:09 | R | UTF-8 | R | false | false | 4,620 | r | term_before.R | #' Extract Terms from Relative Locations
#'
#' \code{term_before} - View the frequency of terms before a regex/term.
#'
#' @param text.var The text string variable.
#' @param term A regex term to provide the search position.
#' @param ignore.case logical. If \code{FALSE}, the pattern matching is case
#' sensitive and if \code{TRUE}, case is ignored during matching.
#' @param \ldots ignored.
#' @return Returns a data.frame of terms and frequencies
#' @export
#' @rdname term_before
#' @examples
#' term_before(presidential_debates_2012$dialogue, 'president')
#' term_after(presidential_debates_2012$dialogue, 'president')
#' term_after(presidential_debates_2012$dialogue, 'oil')
#' term_first(presidential_debates_2012$dialogue)
#'
#' x <- term_before(presidential_debates_2012$dialogue, 'president')
#' plot(x)
#'
#' \dontrun{
#' library(dplyr); library(lexicon)
#'
#' pos_df_pronouns[['pronoun']][1:5] %>%
#' lapply(function(x){
#' term_after(presidential_debates_2012$dialogue, paste0("\\b", x, "\\b"))
#' }) %>%
#' setNames(pos_df_pronouns[['pronoun']][1:5])
#'
#' term_first(presidential_debates_2012$dialogue) %>%
#' filter(!term %in% tolower(sw_dolch) & !grepl("'", term))
#' }
term_before <- function(text.var, term, ignore.case = TRUE, ...){
regex <- paste0(
ifelse(ignore.case, "(?i)", ""),
'[A-Za-z\'-]+(?=,?\\s', term, ')'
)
trms <- stats::na.omit(unlist(stringi::stri_extract_all_regex(text.var, regex)))
if (length(trms) == 0) return(NULL)
if (ignore.case) trms <- tolower(trms)
out <- tibble::tibble(textshape::tidy_table(as.table(sort(table(trms), TRUE)), "term", "frequency"))
class(out) <- c("term_loc", class(out))
out
}
#' Extract Terms from Relative Locations
#'
#' \code{term_after} - View the frequency of terms after a regex/term.
#'
#' @export
#' @rdname term_before
term_after <- function(text.var, term, ignore.case = TRUE, ...){
regex <- paste0(
ifelse(ignore.case, "(?i)", ""),
'(?<=', term, ',?\\s)[A-Za-z\'-]+'
)
trms <- stats::na.omit(unlist(stringi::stri_extract_all_regex(text.var, regex)))
if (length(trms) == 0) return(NULL)
if (ignore.case) trms <- tolower(trms)
out <- tibble::tibble(textshape::tidy_table(as.table(sort(table(trms), TRUE)), "term", "frequency"))
class(out) <- c("term_loc", class(out))
out
}
#' Extract Terms from Relative Locations
#'
#' \code{term_first} - View the frequency of terms starting each string.
#'
#' @export
#' @rdname term_before
term_first <- function(text.var, ignore.case = TRUE, ...){
regex <- paste0(ifelse(ignore.case, "(?i)", ""), '^[A-Za-z\'-]+')
trms <- stats::na.omit(unlist(stringi::stri_extract_all_regex(text.var, regex)))
if (length(trms) == 0) return(NULL)
if (ignore.case) trms <- tolower(trms)
out <- tibble::tibble(textshape::tidy_table(as.table(sort(table(trms), TRUE)), "term", "frequency"))
class(out) <- c("term_loc", class(out))
out
}
#' Plots a term_loc Object
#'
#' Plots a term_loc object.
#'
#' @param x The \code{term_loc} object.
#' @param as.cloud logical. If \code{TRUE} a wordcloud will be plotted rather
#' than a bar plot.
#' @param random.order logical. Should the words be place randomly around the
#' cloud or if \code{FALSE} the more frequent words are in the center of the cloud.
#' @param rot.per The precentage of rotated words.
#' @param \ldots Other arguments passed to \code{\link[wordcloud]{wordcloud}}.
#' @method plot term_loc
#' @export
plot.term_loc <- function(x, as.cloud = FALSE, random.order = FALSE,
rot.per = 0, ...){
if (isTRUE(as.cloud)) {
wordcloud::wordcloud(x[[1]], x[[2]], random.order = random.order,
rot.per = rot.per, ...)
} else {
x[["term"]] <- factor(x[["term"]], levels=rev(x[["term"]]))
ggplot2::ggplot(x, ggplot2::aes_string(x='term', weight='frequency')) +
ggplot2::geom_bar() +
ggplot2::coord_flip() +
ggplot2::ylab("Count") +
ggplot2::xlab("Terms") +
ggplot2::scale_y_continuous(expand = c(0, 0),
labels = function(x) format(x, big.mark = ",", scientific = FALSE, trim = TRUE),
limits = c(0, 1.01 * x[1, "frequency"][[1]])) +
ggplot2::theme_bw() +
ggplot2::theme(
panel.grid.major.y = ggplot2::element_blank(),
#legend.position="bottom",
legend.title = ggplot2::element_blank(),
panel.border = ggplot2::element_blank(),
axis.line = ggplot2::element_line(color="grey70")
)
}
}
|
affc01a7b4be211b3d7879e263c7a75cc92da7ca | dcd3996153255a14ac169e8720296b200fa97f3f | /Priming/Prime_Model.R | 08154dc9a6ea851ee9065d8a37f7f1bce4fb8970 | [] | no_license | BramKreuger/PRIMING | 984344fce6a7480656500b97aff132df23229d96 | 43332ba78ddb18e167eb43229730795b210320c7 | refs/heads/master | 2021-09-10T05:47:56.353680 | 2018-03-21T08:26:19 | 2018-03-21T08:26:19 | 125,194,259 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 3,881 | r | Prime_Model.R | # Priming model voor Experimentele methode en statistiek
# Door: Nikki Evers, Daniël Bezema, Floris van Voorst tot Voorst, Bram Kreuger
# Universiteit Utrecht 2018
# Loading in data
setwd("D:/nikki/Documents/Experimentele Methoden & Statistiek/Eigen Onderzoek - Priming")
load("allData2018.Rdata")
woordenboek1 <- read.csv(file = "D:/nikki/Documents/Experimentele Methoden & Statistiek/Eigen Onderzoek - Priming/easy1.csv")
woordenboek2 <- read.csv(file = "D:/nikki/Documents/Experimentele Methoden & Statistiek/Eigen Onderzoek - Priming/easy2.csv")
woordenboek3 <- read.csv(file = "D:/nikki/Documents/Experimentele Methoden & Statistiek/Eigen Onderzoek - Priming/hard1.csv")
woordenboek4 <- read.csv(file = "D:/nikki/Documents/Experimentele Methoden & Statistiek/Eigen Onderzoek - Priming/hard2.csv")
# Cleaning up data
allDataCleaned <- allData[allData$LocalTime < 150 & allData$LocalTime > 0, ]
# Hier pakken we alle data uit de dualTask waar een correct nieuw woord word gemaakt
AllWordsData <- allDataCleaned[allDataCleaned$partOfExperiment == "dualTask" &
allDataCleaned$Eventmessage2 == "correctNewWord", ]
# Data Ophakken: Elk aaneengesloten stuk scrabble krijgt zijn eigen nummer.
# We vermoeden dat het wisselen binnen een dual task het primen opheft (dus wissen naar letterTypingTask). We zien het wisselen tussen
# Taken dus als een nieuw begin.
AllWordsData$scrabbleStukje <- NA
i <- 1
stukje <- 1
x <- AllWordsData[i, "letterWindowVisitCounter"]
while (i <= nrow(AllWordsData))
{
if(x != AllWordsData[i, "letterWindowVisitCounter"])
{
x <- AllWordsData[i, "letterWindowVisitCounter"]
stukje <- stukje + 1
}
AllWordsData[i, "scrabbleStukje"] <- stukje
i <- i + 1
}
##
## ONS EIGEN MODEL
##(In woorden wat er volgensmij (nikki) ongeveer moet gebeuren)
##
## Voor elk scrabbleStukje:
## Voor elk woord:
## input_string = woord
## calculate min_edit_dist (top 5)
## choose word based upon strategy (bijv. kies altijd nummer 1, of kies altijd random)
## check whether it's the same as human chose
## give score based upon previous
##
## Things to think about:
## We kunnen ook al die woorden in tabbelen gaan bewaren met scores, maar we hebben 121 scrabbleStukjes, wat betekend dat we 121 tabbelen krijgen
## Wat hebben we daar daadwerkelijk aan? Is het niet makkelijker om gewoon een score bij te houden en dan als output die score te krijgen?
## Het is handig om onze modellen zo te maken dat we priming gebaseerd op inputstring (die random letters) mee en niet mee kunnen laten tellen
## Omdat het mogelijk is dat de gekozen strategie daar de rest van de resultaten (de scores dus) te veel verandert
deletion <- function(input_str)
{
i <- 0
for(i in 0:nchar(input_str))
{
nieuw_woord <- gsub(paste("^(.{", i, "}).", sep=""), "\\1", input_str)
print(nieuw_woord)
## ipv print moet zoiets komen:
## if(nieuw_woord niet gelijk is aan input_string & niet in nieuwe data.frame (generated_words) voorkomt & wel in woordenboek 1, 2, 3 of 4)
## {sla woord op in data.frame generated_words in de kolom die hoort bij distance}
i <- i + 1
}
}
insertion <- function()
{
}
substitution <- function()
{
}
generate_distance_words <- function()
{
## Pas één voor één deletion, insertion en substitution toe op input woord. Dus hier creeren we alleen woorden met distance 1
## Als we nu nog niet genoeg worden hebben gegenereerd, gaan we op alle in deze ronde opgeslagen woorden weer één voor één deletion, insertion en substitution toepassen
## Op dezelfde manier.
## Hierdoor onstaan woorden met distance 2. (die komen dus ook in een nieuwe kolom, de kolom voor distance 2)
## blijf dit herhalen tot we genoeg woorden hebben.
} |
7bb4dc27beaee747af478f8585f1aa3f357932fd | 61e3e83ec8423164b63c1e6ddfefe042ff5f9c41 | /GTAmodel_paradist.R | 12686253faf28eba6c2dce8f193d05b4f46b2547 | [] | no_license | mascoma/GTA_project_script | e342c44e20202d8c0f3750b775dc1f1b513f5fae | 3601fa286a46bc96aebc95c41f2098e1b157cadb | refs/heads/master | 2020-12-29T02:20:12.514679 | 2016-12-27T21:49:34 | 2016-12-27T21:49:34 | 46,816,268 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,591 | r | GTAmodel_paradist.R | library(ggplot2)
library(plyr)
library(scales)
library(rgl)
library(scatterplot3d)
library(fields)
library(aqfig)
dir <- "/Users/Xin/Desktop/projects/GTA_project/output/20160722/"
file <- "GTAmodels_para50.txt"
input1 <- paste(dir, file, sep = "")
outputdir <- "/Users/Xin/Desktop/projects/GTA_project/output/20160804/"
model.output <- read.delim(input1, sep = "\t", stringsAsFactors = FALSE,
header = T, na.strings = "")
names(model.output) <- c("Xpos","Xneg","p","curveshape",
"stable", "r", "K", "c", "N")
os <- subset(model.output, curveshape == "o" & stable == "s") # 1244248 records
ou <- subset(model.output, curveshape == "o" & stable == "u") # 1180
ls <- subset(model.output, curveshape == "l" & stable == "s") # 4284663
lu <- subset(model.output, curveshape == "l" & stable == "u") # 4875
nonsolve <- subset(model.output, p == "NA" & c != "NA") # 589834
os.xpos <- subset(os, p!="NA" & as.numeric(p) >=0.99) #26321
os.xneg <- subset(os, p!="NA" & as.numeric(p) <=0.01) #797228
os.both <- subset(os, p!="NA" & as.numeric(p) > 0.01 & as.numeric(p) < 0.99) # 420699
ls.xpos <- subset(ls, p!="NA" & as.numeric(p) >=0.99) ##401314
ls.xneg <- subset(ls, p!="NA" & as.numeric(p) <=0.01) ##3537862
ls.both <- subset(ls, p!="NA" & as.numeric(p) > 0.01 & as.numeric(p) < 0.99) ## 345487
Xpos.dom <- subset(model.output, p!="NA" & as.numeric(p) >=0.99 & stable == "s") # 427635
Xneg.dom <- subset(model.output, p!="NA" & as.numeric(p) <=0.01 & stable == "s") # 9
Xpos.Xneg <- subset(model.output, p!="NA" & as.numeric(p) > 0.01
& as.numeric(p) < 0.99 & stable == "s") # 766186
### plot the distribution of each parameter
output <- paste(outputdir, "parameter_dist.png", sep = "")
png(output, width = 600, height = 1200)
par(mfrow=c(10, 4), mar = c(2,2,2,2))
plot(density(as.numeric(model.output$c), na.rm=T, bw = 1e-4),
main = "c distribution (all)", xlim = c(0, 1))
plot(density(as.numeric(model.output$r), na.rm=T, bw = 1e-4),
main = "r distribution (all)", xlim = c(0, 1))
plot(density(log10(as.numeric(model.output$N)), na.rm=T, bw = 0.01),
main = "logN distribution (all)", xlim = c(-10, -7))
plot(density(log10(as.numeric(model.output$K)), na.rm=T, bw = 0.01),
main = "logK distribution (all)", xlim = c(5, 9))
plot(density(as.numeric(ou$c), na.rm=T, bw = 1e-4),
main = "c distribution (ou)", xlim = c(0, 1))
plot(density(as.numeric(ou$r), na.rm=T, bw = 1e-4),
main = "r distribution (ou)", xlim = c(0, 1))
plot(density(log10(as.numeric(ou$N)), na.rm=T, bw = 0.01),
main = "logN distribution (ou)", xlim = c(-10, -7))
plot(density(log10(as.numeric(ou$K)), na.rm=T, bw = 0.01),
main = "logK distribution (ou)", xlim = c(5, 9))
plot(density(as.numeric(lu$c), na.rm=T, bw = 1e-4),
main = "c distribution (lu)", xlim = c(0, 1))
plot(density(as.numeric(lu$r), na.rm=T, bw = 1e-4),
main = "r distribution (lu)", xlim = c(0, 1))
plot(density(log10(as.numeric(lu$N)), na.rm=T, bw = 0.01),
main = "logN distribution (lu)", xlim = c(-10, -7))
plot(density(log10(as.numeric(lu$K)), na.rm=T, bw = 0.01),
main = "logK distribution (lu)", xlim = c(5, 9))
plot(density(as.numeric(os.xpos$c), na.rm=T, bw = 1e-4),
main = "c distribution (os.xpos)", xlim = c(0, 1))
plot(density(as.numeric(os.xpos$r), na.rm=T, bw = 1e-4),
main = "r distribution (os.xpos)", xlim = c(0, 1))
plot(density(log10(as.numeric(os.xpos$N)), na.rm=T, bw = 0.01),
main = "logN distribution (os.xpos)", xlim = c(-10, -7))
plot(density(log10(as.numeric(os.xpos$K)), na.rm=T, bw = 0.01),
main = "logK distribution (os.xpos)", xlim = c(5, 9))
plot(density(as.numeric(os.xneg$c), na.rm=T, bw = 1e-4),
main = "c distribution (os.xneg)", xlim = c(0, 1))
plot(density(as.numeric(os.xneg$r), na.rm=T, bw = 1e-4),
main = "r distribution (os.xneg)", xlim = c(0, 1))
plot(density(log10(as.numeric(os.xneg$N)), na.rm=T, bw = 0.01),
main = "logN distribution (os.xneg)", xlim = c(-10, -7))
plot(density(log10(as.numeric(os.xneg$K)), na.rm=T, bw = 0.01),
main = "logK distribution (os.xneg)", xlim = c(5, 9))
plot(density(as.numeric(os.both$c), na.rm=T, bw = 1e-4),
main = "c distribution (os.both)", xlim = c(0, 1))
plot(density(as.numeric(os.both$r), na.rm=T, bw = 1e-4),
main = "r distribution (os.both)", xlim = c(0, 1))
plot(density(log10(as.numeric(os.both$N)), na.rm=T, bw = 0.01),
main = "logN distribution (os.both)", xlim = c(-10, -7))
plot(density(log10(as.numeric(os.both$K)), na.rm=T, bw = 0.01),
main = "logK distribution (os.both)", xlim = c(5, 9))
plot(density(as.numeric(ls.xpos$c), na.rm=T, bw = 1e-4),
main = "c distribution (ls.xpos)", xlim = c(0, 1))
plot(density(as.numeric(ls.xpos$r), na.rm=T, bw = 1e-4),
main = "r distribution (ls.xpos)", xlim = c(0, 1))
plot(density(log10(as.numeric(ls.xpos$N)), na.rm=T, bw = 0.01),
main = "logN distribution (ls.xpos)", xlim = c(-10, -7))
plot(density(log10(as.numeric(ls.xpos$K)), na.rm=T, bw = 0.01),
main = "logK distribution (ls.xpos)", xlim = c(5, 9))
plot(density(as.numeric(ls.xneg$c), na.rm=T, bw = 1e-4),
main = "c distribution (ls.xneg)", xlim = c(0, 1))
plot(density(as.numeric(ls.xneg$r), na.rm=T, bw = 1e-4),
main = "r distribution (ls.xneg)", xlim = c(0, 1))
plot(density(log10(as.numeric(ls.xneg$N)), na.rm=T, bw = 0.01),
main = "logN distribution (ls.xneg)", xlim = c(-10, -7))
plot(density(log10(as.numeric(ls.xneg$K)), na.rm=T, bw = 0.01),
main = "logK distribution (ls.xneg)", xlim = c(5, 9))
plot(density(as.numeric(ls.both$c), na.rm=T, bw = 1e-4),
main = "c distribution (ls.both)", xlim = c(0, 1))
plot(density(as.numeric(ls.both$r), na.rm=T, bw = 1e-4),
main = "r distribution (ls.both)", xlim = c(0, 1))
plot(density(log10(as.numeric(ls.both$N)), na.rm=T, bw = 0.01),
main = "logN distribution (ls.both)", xlim = c(-10, -7))
plot(density(log10(as.numeric(ls.both$K)), na.rm=T, bw = 0.01),
main = "logK distribution (ls.both)", xlim = c(5, 9))
plot(density(as.numeric(nonsolve$c), na.rm=T, bw = 1e-4),
main = "c distribution (nonsolve)", xlim = c(0, 1))
plot(density(as.numeric(nonsolve$r), na.rm=T, bw = 1e-4),
main = "r distribution (nonsolve)", xlim = c(0, 1))
plot(density(log10(as.numeric(nonsolve$N)), na.rm=T, bw = 0.01),
main = "logN distribution (nonsolve)", xlim = c(-10, -7))
plot(density(log10(as.numeric(nonsolve$K)), na.rm=T, bw = 0.01),
main = "logK distribution (nonsolve)", xlim = c(5, 9))
dev.off()
|
11cfb60aa36b8be262b67641d1f0445191512b3f | f5131867a76c8c8af6ec863120c1e72722ea98d9 | /R/pkg/analysis/man/svar.Rd | e329afb083494aa3cc3b0ddc9e73f667f26b208d | [
"MIT"
] | permissive | k198581/src | c5b53b1f6eeae36116fb2063fdf993df303f1a57 | 17d2c54489adac2f428b6f449661712b1f7bf536 | refs/heads/master | 2022-11-22T09:01:34.948438 | 2020-07-12T09:57:50 | 2020-07-12T09:57:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 271 | rd | svar.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svar.R
\name{svar}
\alias{svar}
\title{Calculate the sample variance of a vector.}
\usage{
svar(x)
}
\arguments{
\item{x}{a vector}
}
\description{
Calculate the sample variance of a vector.
}
|
cc22dd2578e095f8adb6a9e97a6ee5f3c18337b7 | 5643509ed57314f610c16a02e7fc3fce6e8b6267 | /Chapter 5/experiments/CurvX.R | bb3a0aae98b94cf3233c461d02175a7c15811a10 | [] | no_license | aida-ugent/PhD_Code_Robin_Vandaele | d942ea50defe5417b032dcf7d2f61c9eac19c143 | 229166059fd8fb366f23162a0b72e7547e80649b | refs/heads/main | 2023-02-21T16:26:44.183346 | 2021-01-27T15:07:35 | 2021-01-27T15:07:35 | 318,493,807 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,353 | r | CurvX.R | # Load libraries
devtools::load_all() # load BCB
library("ggplot2") # plotting
library("randomcoloR") # branch assignment
# Load and plot data
df <- read.table("Data/Toy/curvX.csv", header=FALSE, sep=",")
colnames(df) <- c("x", "y")
ggplot(df, aes(x=x, y=y)) +
geom_point(size=3) +
theme_bw() +
coord_fixed()
# Conduct backbone pipeline
BCB <- backbone(df, eps=8, type="Rips", assign=TRUE)
# View boundary coefficients
EG <- get_edges2D(df, BCB$G)
ggplot(df[names(V(BCB$G)),], aes(x=x, y=y)) +
geom_segment(data=EG, aes(x = x1, y = y1, xend = x2, yend = y2), color='black', alpha=0.15) +
geom_point(size=2, aes(col=BCB$f)) +
scale_colour_gradientn(colours=topo.colors(7)) +
labs(col="BC") +
theme_bw() +
coord_fixed()
# View cost according to the number of leaves
ggplot(BCB$cost, aes(x=leaves, y=cost)) +
geom_line(aes(group=component, col=component), size=1.5) +
geom_vline(xintercept=length(which(degree(BCB$B) == 1)), linetype="dashed", size=1.5) +
geom_point(aes(group=component, col=component, size=1.5)) +
xlab("number of leaves") +
ylab("relative cost") +
theme_bw() +
theme(text = element_text(size=20), legend.position="none")
# View backbone and branch assignments
Epine <- get_edges2D(df, BCB$pine)
EBCB <- get_edges2D(df, BCB$B)
ggplot(df[names(V(BCB$G)),], aes(x=x, y=y)) +
geom_segment(data=EG, aes(x=x1, y=y1, xend=x2, yend=y2), color="black", alpha=0.1) +
geom_segment(data=Epine, aes(x=x1, y=y1, xend=x2, yend=y2), color="black", alpha=0.5, size=1) +
geom_point(size=3, fill=BCB$col, alpha=0.5, pch=21) +
geom_segment(data=EBCB, aes(x= x1, y=y1, xend=x2, yend=y2), col="black", size=2.75) +
geom_segment(data=cbind(EBCB, as.factor(BCB$branch)),
aes(x=x1, y=y1, xend=x2, yend=y2, col=as.factor(BCB$branch)), size=2) +
geom_point(data=df[V(BCB$B)$name,], fill=BCB$col[V(BCB$B)$name], size=5, pch=21) +
scale_color_manual(values=BCB$palette) +
guides(col = guide_legend(title="Path")) +
coord_fixed() +
theme_bw() +
theme(legend.title=element_text(size=20), legend.text=element_text(size=15)) +
theme(text = element_text(size=20),
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
|
69cfbbd0a033e9f9ed6bfa7b8f3084d47df1b903 | 74548251f8fc42984f607110e96d7554c66d0eaa | /mylogistic_Function.R | 3d6ad2e712d77a663b712c421a97cac1b424b11c | [] | no_license | drkamarul/Programming | 46b6f4f6f4b7f42d7c5a8c89e0c82f4e89b7f762 | 014aee863fea67858e3444386932e592145c71c0 | refs/heads/master | 2021-01-19T12:31:19.707173 | 2019-07-08T11:24:22 | 2019-07-08T11:24:22 | 24,819,982 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,060 | r | mylogistic_Function.R | oc <- rbinom(n = 50, size = 1, prob = 0.5)
iv <- rnorm(n = 50, mean = 0, sd = 1)
iv2 <- rnorm(n = 50, mean = 10, sd = 1)
mydata <- data.frame(oc, iv, iv2)
myfunction4 <- function(a, mydata2){
model4 <- glm(as.formula(a), family = binomial, data = mydata2)
print(summary(model4))
print(broom::tidy(model4))
print("---------------------------------")
b_or <- cbind(coef(model4), exp(coef(model4)))
colnames(b_or) <- c('beta', 'OR')
print(b_or)
print("----------------------------------")
b_or_ci <- cbind(confint(model4), exp(confint(model4)))
colnames(b_or_ci) <- c('lower.ci_beta', 'upper.ci_beta',
'lower.ci_OR', 'upper.ci_or')
print(b_or_ci)
}
myfunction4(oc ~ iv + iv2, mydata2 = mydata)
myfunction <- function(a,b){
mymodel <- glm(as.formula(paste(a, b, sep="~")), family = binomial, data = mydata)
print(mymodel)
print(summary(mymodel))
print("This is the odds ratio")
print("######################")
print(exp(coef(mymodel)))
print("This is the GOF checking")
print("########################")
LogisticDx::gof(mymodel)
}
myfunction("oc", "iv")
######################################################
myfunction2 <- function(a){
mymodel <- glm(as.formula(a), family = binomial, data = mydata)
print(mymodel)
print(summary(mymodel))
print("This is the odds ratio")
print("######################")
print(exp(coef(mymodel)))
print("This is the GOF checking")
print("########################")
LogisticDx::gof(mymodel)
}
myfunction2(oc ~ iv + iv2)
## this does not work for GOF
myfunction3 <- function(a, mydata2){
mymodel3 <- glm(as.formula(a), family = binomial, data = mydata2)
print(mymodel3)
print(summary(mymodel3))
print("This is the odds ratio")
print("######################")
print(exp(coef(mymodel3)))
print("This is the GOF checking")
print("########################")
LogisticDx::gof(mymodel3)
}
myfunction3("oc ~ iv + iv2", mydata2 = mydata)
|
22ac5d40508e53c898df66c206bad5d564c42efe | c07714bba04ba4588a2a73185b41ff2b25f659cc | /01_createDbEntry.R | 3df9c635e645a6b5a511092ea0e6acca01654e6c | [
"MIT"
] | permissive | Chenmengpin/BRC_SingleCell_LJames | 95a614d7ff31be5e0e332b4d838dc82f6c12a895 | 7fe3a5a2501a0c0c25bd00a0f309cc3fdc5d8ed3 | refs/heads/master | 2020-09-18T18:59:48.073974 | 2017-09-26T10:23:49 | 2017-09-26T10:23:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,734 | r | 01_createDbEntry.R | # File: 01_createDbEntry.R
# Auth: umar.niazi@kcl.ac.uk
# DESC: list the samples and create appropriate db entries
# Date: 10/03/2017
## set variables and source libraries
source('header.R')
## connect to mysql database
library('RMySQL')
##### connect to mysql database to get samples
db = dbConnect(MySQL(), user='rstudio', password='12345', dbname='Projects', host='127.0.0.1')
dbListTables(db)
# sample and file table
dbGetQuery(db, paste('describe Sample;'))
cSampleCol = dbGetQuery(db, paste('describe Sample;'))$Field[-1]
dbGetQuery(db, paste('describe File;'))
cFileCol = dbGetQuery(db, paste('describe File;'))$Field[-1]
# setwd(gcRemoteDir)
setwd('Data_external/')
setwd('Fastq/')
# list the files
cvFiles = list.files(pattern = 'fastq.gz')
# each sample has 2 files
fSplit = gsub('^(Plate\\d-C\\d+)_.+', '\\1', cvFiles)
lFiles = split(cvFiles, fSplit)
## create the entry for samples
cSampleCol
dfSamples = data.frame(idProject=g_pid, idData=g_did, title=unique(fSplit), description='Single Cell Sequencing rna-seq dataset')
# write this data to the database
rownames(dfSamples) = NULL
### NOTE: Do not execute this anymore as entry created
# # write this table to database
# dbWriteTable(db, name='Sample', value=dfSamples, append=T, row.names=F)
# get this table again from database with ids added
g_did
dfSamples = dbGetQuery(db, paste('select * from Sample where Sample.idData = 12;'))
# create entries for these files in the database
dbListTables(db)
cn = dbListFields(db, 'File')[-1]
cn
# get the names of the samples
temp = lapply(dfSamples$title, function(x){
# get the file names
df = data.frame(name=lFiles[[x]], type='fastq', idSample=dfSamples[dfSamples$title == x, 'id'])
return(df)
})
dfFiles = do.call(rbind, temp)
rownames(dfFiles) = NULL
# write this table to database
## note: do not execute as it is already done
# dbWriteTable(db, name='File', value=dfFiles, append=T, row.names=F)
dbDisconnect(db)
##### adding some additional information for covariates and groupings
db = dbConnect(MySQL(), user='rstudio', password='12345', dbname='Projects', host='127.0.0.1')
g_did
dfSamples = dbGetQuery(db, paste('select * from Sample where Sample.idData = 12;'))
# group 1
group1 = gsub('^Plate(\\d)-C\\d+', '\\1', dfSamples$title)
group1 = paste('P', group1, sep='')
# group 2
group2 = gsub('^Plate\\d-C(\\d+)', '\\1', dfSamples$title)
group2 = paste('Capture Site', group2)
dfSamples$group1 = group1
dfSamples$group2 = group2
## create an update statement for each row
queries = paste('Update Sample Set group1="', dfSamples$group1, '", group2="', dfSamples$group2, '" where Sample.id=', dfSamples$id, ';', sep='')
#sapply(queries, function(x) dbSendQuery(db, x))
dbDisconnect(db)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.