blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1100a73dc6bbcd1d249bd3edabcaa67299995e71
|
c78170677c97a0f8e258bf651f2bd55068274cb8
|
/analysis/3_msg_parameters/var_cost.R
|
d8720c114fb8ebfe7e937c61911c54ba1db390d7
|
[] |
no_license
|
junukitashepard/message_trade
|
a013b18f5a66021cc052b32b37b9ceb6ba57b5c1
|
be07af7b88a9f45425f5b933279bef617b7b4faa
|
refs/heads/master
| 2022-07-23T23:18:46.670202
| 2022-07-14T17:30:53
| 2022-07-14T17:30:53
| 190,220,147
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,331
|
r
|
var_cost.R
|
####################################
# Build parameters: var_cost #
####################################
# You must run 2_regress files before compiling parameter!
input_reg <- paste0(wd, "output/analysis/regress")
# Import regression file
paths <- readRDS(file.path(input_reg, 'var_cost_from_reg.rds'))
isid('paths', c('node_loc', 'technology', 'year'))
# Set up in MESSAGEix format #
##############################
all_technologies <- expand.grid(export_technologies, region.list)
all_technologies$technology <- paste0(all_technologies$Var1, "_", all_technologies$Var2)
all_technologies <- all_technologies$technology
paths_msg <- expand.grid(year_act, unique(paste0(region.number, '_', toupper(region.list))), unique(all_technologies))
names(paths_msg) <- c('year_act', 'node_loc', 'technology')
paths_msg[, 2:3] <- lapply(paths_msg[, 2:3], function(x) as.character(x))
paths_msg <- subset(paths_msg, tolower(substr(node_loc, 5, 7)) !=
substr(technology, nchar(technology) - 2, nchar(technology)))
paths_msg <- left_join(paths_msg, paths[c('node_loc', 'technology', 'year', 'var_cost')],
by = c('node_loc', 'technology', 'year_act' = 'year'))
# Make energy types (e.g. loil) follow foil variable costs
foil_costs <- data.frame()
for (e in energy.types.trade.foil) {
fcdf <- subset(paths_msg, grepl('foil_exp', technology))
fcdf$technology <- stringr::str_replace(fcdf$technology, 'foil_', paste0(e, '_'))
fcdf$foil_cost <- fcdf$var_cost
fcdf$var_cost <- fcdf$mean_var_cost <- NULL
foil_costs <- rbind(as.data.frame(foil_costs), as.data.frame(fcdf))
}
if (nrow(foil_costs) > 0) {
paths_msg <- left_join(paths_msg, foil_costs, by = c('year_act', 'node_loc', 'technology'))
paths_msg$var_cost[is.na(paths_msg$var_cost) & !is.na(paths_msg$foil_cost)] <- paths_msg$foil_cost[is.na(paths_msg$var_cost) & !is.na(paths_msg$foil_cost)]
paths_msg$foil_cost <- NULL
}
# Make energy types (e.g. hydrogen) follow LNG variable costs
LNG_costs <- data.frame()
for (e in energy.types.trade.LNG) {
lndf <- subset(paths_msg, grepl('LNG_exp', technology))
lndf$technology <- stringr::str_replace(lndf$technology, 'LNG_', paste0(e, '_'))
lndf$LNG_cost <- lndf$var_cost
lndf$var_cost <- lndf$mean_var_cost <- NULL
LNG_costs <- rbind(as.data.frame(LNG_costs), as.data.frame(lndf))
}
if (nrow(LNG_costs) > 0) {
paths_msg <- left_join(paths_msg, LNG_costs, by = c('year_act', 'node_loc', 'technology'))
paths_msg$var_cost[is.na(paths_msg$var_cost) & !is.na(paths_msg$LNG_cost)] <- paths_msg$LNG_cost[is.na(paths_msg$var_cost) & !is.na(paths_msg$LNG_cost)]
paths_msg$LNG_cost <- NULL
}
# For landlocked regions, only allow land access to be normal price, otherwise very high
landacc <- read.csv(file.path(wd, "raw/UserInputs/pipeline_connections.csv"), stringsAsFactors = F)
names(landacc) <- c('node1', 'node2')
landacc2 <- landacc[c('node2', 'node1')]
names(landacc2) <- c('node1', 'node2')
landacc <- unique(rbind(landacc, landacc2))
landacc$partners <- paste0(landacc$node1, " ", landacc$node2)
landacc$landacc <- 1
paths_msg$partners <- paste0(paths_msg$node_loc, " ", region.number, "_",
toupper(substr(paths_msg$technology, nchar(paths_msg$technology) - 2,
nchar(paths_msg$technology))))
paths_msg <- left_join(paths_msg, landacc[c('partners', 'landacc')], by = c('partners'))
paths_msg$var_cost[paths_msg$landacc == 1] <- mean(paths_msg$var_cost[paths_msg$var_cost > 0], na.rm = T)
# Fill in with mean where missing (particularly future values)
mean_var_cost <- dplyr::group_by(paths_msg, node_loc, technology) %>%
dplyr::summarise(mean_var_cost = mean(var_cost, na.rm = T))
mean_var_cost$mean_var_cost[is.nan(mean_var_cost$mean_var_cost)] <- NA
paths_msg$var_cost[is.nan(paths_msg$var_cost)] <- NA
paths_msg <- left_join(paths_msg, mean_var_cost, by = c('node_loc', 'technology'))
paths_msg$var_cost[is.na(paths_msg$var_cost)] <- paths_msg$mean_var_cost[is.na(paths_msg$var_cost)]
paths_msg$var_cost[is.na(paths_msg$var_cost)] <- 2*max(paths_msg$var_cost, na.rm = T) # make it very high to ship to landlocked regions
paths_msg$partners <- NULL
# Truncate at zero
paths_msg$var_cost[paths_msg$var_cost < 0] <- 0
# Put in MESSAGE format #
#########################
parout <- paths_msg
parout$year_vtg <- parout$year_act
parout$value <- parout$var_cost
parout$mode <- mode
parout$unit <- unit
parout$time <- time
parout <- parout[c('node_loc', 'technology', 'year_vtg',
'year_act', 'mode', 'time', 'value', 'unit')]
# Save across technologies
saveRDS(parout, file.path(output, 'analysis/msg_parameters/var_cost/var_cost_base.rds'))
# Save by technology
for (t in export_technologies) {
print(paste0('Saving parameter [', t, ']'))
assign('df', subset(parout, grepl(t, technology)))
if (t == 'oil_exp') {assign('df', subset(parout, substr(technology, 1, 3) == 'oil'))} # so we don't include foil or loil
saveRDS(df, file.path(output, paste0('analysis/msg_parameters/var_cost/', t, '.rds')))
write.csv(df, file.path(output, paste0('analysis/msg_parameters/var_cost/', t, '.csv')))
write.csv(df, file.path(output, paste0('analysis/msg_parameters/SCENARIOS/baseline_no_tariff/var_cost/', t, '.csv')))
}
|
62c42c8bf5eafd303d055dd20b871909f3398ba4
|
6656318be29e1b39b5ab20d6872a27dd9af923b2
|
/man/toDD.Rd
|
7842d714fc0f430970c0545b81f4a1ce131087bf
|
[] |
no_license
|
mdfrias/downscaleR
|
4ceb27793d2a119860d18ed5bc48b90a02705250
|
a841f836eccae7ba749030b3a65b997745906a92
|
refs/heads/master
| 2021-01-16T22:53:43.767577
| 2015-08-18T09:19:39
| 2015-08-18T09:19:39
| 31,306,761
| 0
| 1
| null | 2015-02-25T09:42:57
| 2015-02-25T09:42:57
| null |
UTF-8
|
R
| false
| false
| 684
|
rd
|
toDD.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/toDD.R
\name{toDD}
\alias{toDD}
\title{6-hourly to daily data aggregator}
\usage{
toDD(NDarray, dimNamesRef, dailyAggr)
}
\arguments{
\item{NDarray}{A N-dimensional array, as returned by \sQuote{readDataSlice}}
\item{dimNamesRefRef}{A vector of dimension names}
}
\value{
A ND array aggregated by its time dimension
}
\description{
Performs the 6h to 24h aggregation of variables
}
\details{
Because of the re-ordering of dimensions after using \code{apply}, the
vector of dimension names is needed for re-arranging accordingly
}
\author{
J Bedia \email{joaquin.bedia@gmail}
}
\keyword{internal}
|
2af1c6c08407227471efec69d46d00d11238777f
|
6589b6692169bc5e2aef8b4b960dc85b644f0c20
|
/utility_theory_and_binary_output_of_choice.R
|
703818eaaff16e64fd049e82c84113d6c5d7f56c
|
[] |
no_license
|
dimkon97/hello-world
|
30d0a65f641599a389d6ce8be8e0218d9ddac466
|
37cc5e9d4c83acaca2d27f0000f36b6ced142138
|
refs/heads/master
| 2021-01-25T10:55:45.596169
| 2017-06-27T21:20:54
| 2017-06-27T21:20:54
| 93,891,298
| 0
| 1
| null | 2018-10-31T17:50:46
| 2017-06-09T19:38:53
|
Matlab
|
UTF-8
|
R
| false
| false
| 1,698
|
r
|
utility_theory_and_binary_output_of_choice.R
|
#Initialize gambles in a dataframe
gambles <- data.frame(safe_1 = 100, safe_2 = 80,
risky_1 = 190, risky_2 = 5,
p_1 = seq(0.1,1,0.1))
gambles$p_2 = 1-gambles$p_1
# exp_val <- function(alpha,gamma){
exp_val_2 <- function(alpha, gamma, gambles){
gambles$wp_1 <- exp(-(-log(gambles$p_1))^gamma)
gambles$wp_2 <- 1 - gambles$wp_1
#Exponentiate monetary values to get utilities
gambles$su_1 <- gambles$safe_1^alpha
gambles$su_2 <- gambles$safe_2^alpha
gambles$ru_1 <- gambles$risky_1^alpha
gambles$ru_2 <- gambles$risky_2^alpha
#Matrix multiplication of weighted probabilities with utilities to get expected values per gamble
gambles$ut_1 <- gambles$wp_1*gambles$su_1 + gambles$wp_2*gambles$su_2
gambles$ut_2 <- gambles$wp_1*gambles$ru_1 + gambles$wp_2*gambles$ru_2
# Softmax method to find probbility of choosing the safe bet
gambles$prob_safe <- exp(gambles$ut_1)/(exp(gambles$ut_1) + exp(gambles$ut_2))
#binary output of choices
gambles$choice <- ifelse(gambles$prob_safe > 0.5, 1, 0)
#percent of safe choices
gambles$percent<- sum(gambles$choice)/10
return (gambles)
}
#heatmap
#for (i in seq(0,5,0.1)){
#for(j in seq(0,5,0.1)){
#matrix <- data.frame(gambles$percent)
#heatmap(matrix, aes(alpha, gamma))
#+ geom_tile() +
#scale_fill_gradient(low="white", high="darkgreen", name="First")
#}
#}
#I have some issues with the arguments of the heatmapp function. Also, I think that if I use
#the variables i and j as counters then ex_val(i,j,gambles) should appear somewhere. Any help
#or insight is welcome. What other graph methods could I use to model this?
|
77e4f939d2c68fd57ebfc3379888441a5532ecac
|
ab7d15d06ed92cd51cc383dc9e98ae2a8fa41eaa
|
/man/get_leverage_centrality.Rd
|
6a1c6b94e0fcdbdaf60600a6a2641332fa0bf7d5
|
[
"MIT"
] |
permissive
|
rich-iannone/DiagrammeR
|
14c46eb994eb8de90c50166a5d2d7e0668d3f7c5
|
218705d52d445c5d158a04abf8107b425ea40ce1
|
refs/heads/main
| 2023-08-18T10:32:30.784039
| 2023-05-19T16:33:47
| 2023-05-19T16:33:47
| 28,556,914
| 1,750
| 293
|
NOASSERTION
| 2023-07-10T20:46:28
| 2014-12-28T08:01:15
|
R
|
UTF-8
|
R
| false
| true
| 1,391
|
rd
|
get_leverage_centrality.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_leverage_centrality.R
\name{get_leverage_centrality}
\alias{get_leverage_centrality}
\title{Get leverage centrality}
\usage{
get_leverage_centrality(graph)
}
\arguments{
\item{graph}{A graph object of class \code{dgr_graph}.}
}
\value{
A data frame with leverage centrality values for each of the nodes.
}
\description{
Get the leverage centrality values for all nodes in the graph. Leverage
centrality is a measure of the relationship between the degree of a given
node and the degree of each of its neighbors, averaged over all neighbors. A
node with negative leverage centrality is influenced by its neighbors, as the
neighbors connect and interact with far more nodes. A node with positive
leverage centrality influences its neighbors since the neighbors tend to have
far fewer connections.
}
\examples{
# Create a random graph using the
# `add_gnm_graph()` function
graph <-
create_graph(
directed = FALSE) \%>\%
add_gnm_graph(
n = 10,
m = 15,
set_seed = 23)
# Get leverage centrality values
# for all nodes in the graph
graph \%>\%
get_leverage_centrality()
# Add the leverage centrality
# values to the graph as a
# node attribute
graph <-
graph \%>\%
join_node_attrs(
df = get_leverage_centrality(.))
# Display the graph's node data frame
graph \%>\% get_node_df()
}
|
30eaa2f110ab05e88d228702eff18804a5637486
|
29ced85982f8f7739f6b4df28f042c2299456549
|
/BasicFilters/SpdFilt.R
|
b77427882b6b0fafcb06c58d07265236b9b16ce9
|
[] |
no_license
|
ATLAS-HUJI/R
|
f5a056f5b9e82b277a2c1f41ad3c9f746e585fe1
|
d74b5d21c7b8e70e42620633159bad6e887b391b
|
refs/heads/master
| 2021-01-24T02:15:51.298877
| 2018-10-16T13:11:48
| 2018-10-16T13:11:48
| 122,840,774
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 631
|
r
|
SpdFilt.R
|
#speed based filter by Ingo Schiffner 2017
#calculates speed between consecutive localizations and filters out segments exceeding spd_lim
#assumes x and y coordinates are given in a projected format in meters and time(t) given as ATLASTimeStamp(ms)
#returns a data.frame containing filtered x,y and time(t)
SpdFilt <- function(x,y,t,spd_lim)
{
dx = diff(x,1)
dy = diff(y,1)
dt = diff(t,1)/1000
de = (dx^2 + dy^2) ^ 0.5
spd = de/dt
spd = c(0,spd)
xr= x[spd<=spd_lim]
yr= y[spd<=spd_lim]
tr= t[spd<=spd_lim]
sdf <- data.frame(x=as.numeric(xr),y=as.numeric(yr),t=as.numeric(tr))
return (sdf)
}
|
e14a09c84e88e76807c880546dc4d880e76b12a6
|
346ca394e5d9f64ee6cc9741f285f603c903c727
|
/Shiny_connect.R
|
69008bcb814273b798823d0efc7a222b58fe5668
|
[] |
no_license
|
DavidykZhao/Comparative_pol_measurement_project
|
845825cfe3911b31e256bb106a8972eda2279d98
|
f0595449d1e9e47672389e9b2f0fe512fed259ed
|
refs/heads/master
| 2020-04-23T20:09:13.510100
| 2020-04-03T21:13:47
| 2020-04-03T21:13:47
| 171,429,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 288
|
r
|
Shiny_connect.R
|
install.packages('rsconnect')
library(rsconnect)
rsconnect::setAccountInfo(name='yikai-zhao',
token='12389A9B53A08CC115D3FD54D4F123F3',
secret='')
rsconnect::deployApp('/Users/zhaoyikai/Comparative_pol_measurement_project/Shiny_app')
|
7355243e212a204594180e0497f608737b8d3a06
|
ce2d6ec9fd987c5b6325eaf7271534ace6010152
|
/QualityControl.R
|
3c265239a4d2fba88f34554caedcfbaaabb44d9f
|
[
"MIT"
] |
permissive
|
Chenmengpin/CoA-scRNAseq-pipeline
|
3005b6c58a690080ab55bcc8b787e36013f5f10f
|
89b211d33dae1f669fea4a08de13bce5cd98bd00
|
refs/heads/master
| 2020-09-18T18:55:56.472782
| 2018-12-12T05:50:21
| 2018-12-12T05:50:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,465
|
r
|
QualityControl.R
|
# Do quality control on a cell and gene level
# Cell-level quality control
CellQC <- function(q_array, m_array, id, qc_m_array, original_q_array) {
geneset_size <- rowSums(q_array != 0)
gene_qc <- geneset_size > 1500
gene_m_array <- m_array[gene_qc == TRUE,] # these need to be separated so there can be a separate library size column in the QC array
gene_m_array <- na.omit(gene_m_array)
m_array <- cbind.data.frame(m_array, geneset_size) # add all library and geneset info to metadata
m_array <- m_array[gene_qc == TRUE,]
q_array <- q_array[gene_qc == TRUE,] # this filters on geneset size
q_array <- na.omit(q_array)
library_size <- rowSums(q_array)
library_qc <- !isOutlier(library_size, nmads = 3, type = "lower", log = TRUE) # best to use negated versions for metadata import
q_array <- q_array[library_qc == TRUE, ] # dual filters on library and geneset size, needs to pass both simultaneously here
m_array <- m_array[library_qc == TRUE,]
m_array <- cbind.data.frame(m_array, library_size) # add all library and geneset info to metadata
assign(paste0("CellQC_quant_",id), q_array, env = .GlobalEnv) # these need to be made like this so that it returns both with custom names
assign(paste0("CellQC_metadata_",id), m_array, env = .GlobalEnv)
print("Beginning array export for graphing")
geneset_array <- cbind.data.frame(geneset_size, gene_qc)
library_array <- cbind.data.frame(library_size, library_qc)
assign(paste0("library_export_",id), library_array, env = .GlobalEnv) # these need to be made like this so that it returns both with custom names
assign(paste0("geneset_export_",id), geneset_array, env = .GlobalEnv)
print("Beginning metadata QC annotation")
pass_library_qc <- is.element(rownames(qc_m_array), rownames(gene_m_array)) # this is why there cannot be a simultaneous dual filter on library and geneset size
pass_gene_qc <- is.element(rownames(qc_m_array), rownames(m_array))
library_size <- rowSums(original_q_array) # recalculate for the QC graphs
geneset_size <- rowSums(original_q_array != 0)
qc_m_array <- cbind.data.frame(qc_m_array, library_size, pass_library_qc, geneset_size, pass_gene_qc)
assign(paste0("QC_metadata_",id), qc_m_array, env = .GlobalEnv)
}
# Gene-level quality control
GeneQC <- function(q_array, id) {
q_array <- q_array[, -which(is.na(colnames(q_array)))] # need this to remove extra values that break the whole thing
q_array <- as.data.frame(t(rowsum(t(q_array), group = rownames(t(q_array))))) # collate duplicate genes
print("Duplicates removed")
gene_metadata <- data.frame(matrix(0, nrow = ncol(q_array), ncol = 4), row.names = colnames(q_array)) # makes metadata array with slots for all metrics
colnames(gene_metadata) <- c("mean_nontransformed_expression", "mean_transformed_expression", "cells_per_gene", "pass_cellnumber_qc")
mean_nontransformed_expression <- as.vector(colMeans(q_array))
mean_transformed_expression <- log2(mean_nontransformed_expression + 1)
gene_metadata[,1] <- mean_nontransformed_expression
gene_metadata[,2] <- mean_transformed_expression
print("Mean expression counted")
cells_per_gene <- as.vector(colSums(q_array != 0)) # finds number of cells without zero counts for gene
gene_metadata[,3] <- cells_per_gene
print("Amount of cells expressing calculated")
pass_cellnumber_qc <- cells_per_gene >= 3
gene_metadata[,4] <- pass_cellnumber_qc
q_array <- q_array[, cells_per_gene >= 3]
print("QC finished")
assign(paste0("GeneQC_quant_",id), q_array, env = .GlobalEnv)
assign(paste0("GeneQC_metadata_",id), gene_metadata, env = .GlobalEnv)
}
#Mitochondrial quality control
MitoQC <- function(q_array, m_array, id, qc_m_array, original_q_array) {
mt_fraction <- rowSums(q_array[, grepl('mt-', colnames(q_array))]) / rowSums(q_array)
mt_qc <- mt_fraction < .12
print("Mitochondrial genes identified")
m_array <- cbind.data.frame(m_array, mt_fraction)
q_array <- q_array[mt_qc == TRUE,] # this actually does the filtering
m_array <- m_array[mt_qc == TRUE,]
assign(paste0("mt_quant_",id), q_array, env = .GlobalEnv) # these need to be made like this so that it returns both with custom names
assign(paste0("mt_metadata_",id), m_array, env = .GlobalEnv)
mt_array <- cbind.data.frame(mt_fraction, mt_qc)
assign(paste0("mt_export_",id), mt_array, env = .GlobalEnv)
print("Beginning metadata QC annotation")
mt_fraction <- rowSums(original_q_array[, grepl('mt-', colnames(original_q_array))]) / rowSums(original_q_array)
mt_qc <- is.element(rownames(qc_m_array), rownames(m_array))
qc_m_array <- cbind.data.frame(qc_m_array, mt_fraction, mt_qc)
assign(paste0("QC_metadata_",id), qc_m_array, env = .GlobalEnv)
}
# Scaling by size factor
NormalizeCountData <- function(q_array, m_array, id, qc_m_array) {
method_id <- m_array[1,4]
cell_ids <- rownames(q_array) # these ensure cell and gene info survives matrix transformation
gene_ids <- colnames(q_array)
q_array <- t(q_array) # scran normalization requires a matrix with cells in columns and genes in rows
deconvolution_clusters <- quickCluster(q_array) # low-level clustering improves deconvolution performance by minimizing differential expression
print("Deconvolution clusters calculated")
if (method_id == "10X") {
size_factors <- computeSumFactors(q_array, sizes = seq(20, 120, 2), # computes size factors per cell, use more pools for higher precision
clusters = deconvolution_clusters, min.mean = .1) # UMI counts need lower threshold
} else {
size_factors <- computeSumFactors(q_array, sizes = seq(20, 120, 2), # computes size factors per cell, use more pools for higher precision
clusters = deconvolution_clusters) # clusters improve performance by reducing differential expression
}
size_factors_dataframe <- cbind.data.frame(colnames(q_array), size_factors)
colnames(size_factors_dataframe) <- c("row", "size_factor")
print("Size factors computed for QC")
q_array <- scale(q_array, center = FALSE, scale = size_factors) # performs scaling with these factors
print("Cells scaled by library size")
q_array <- data.frame(t(q_array), row.names = cell_ids) # returns to metadata-compatible format
colnames(q_array) <- gene_ids
q_array <- q_array[size_factors > 0,]
m_array <- m_array[size_factors > 0,]
q_array <- q_array + 1 # prevents undefined values for zeroes in log transformation
q_array <- log2(q_array) # log-transforms data to account for heteroscedasticity, log2 used because it is fine-grained and easy to represent fold changes
m_array <- cbind.data.frame(m_array, size_factors[size_factors > 0])
colnames(m_array)[8] <- 'size_factor'
assign(paste0("normalized_quant_",id), q_array, env = .GlobalEnv) # returns original quant array identifier with modifier indicating normalization
assign(paste0("normalized_metadata_",id), m_array, env = .GlobalEnv)
print("Beginning metadata QC annotation")
pass_size_qc <- is.element(rownames(qc_m_array), rownames(m_array))
qc_m_array <- cbind.data.frame(qc_m_array, pass_size_qc)
qc_m_array$row <- rownames(qc_m_array)
qc_m_array <- merge(qc_m_array, size_factors_dataframe, by='row', all=TRUE)
qc_m_array[is.na(qc_m_array)] <- 0
qc_m_array <- qc_m_array[, -1]
assign(paste0("QC_metadata_",id), qc_m_array, env = .GlobalEnv)
}
DoubletQC <- function(q_array, m_array, id, qc_m_array, original_q_array) {
q_array <- t(q_array)
doublet_score <- doubletCells(q_array, force.match = TRUE)
doublet_qc <- doublet_score < quantile(doublet_score, .99)
print("Doublets removed")
q_array <- q_array[doublet_qc == TRUE,]
m_array <- m_array[doublet_qc == TRUE,]
assign(paste0("doublet_quant_",id), q_array, env = .GlobalEnv) # these need to be made like this so that it returns both with custom names
assign(paste0("doublet_metadata_",id), m_array, env = .GlobalEnv)
print("Beginning metadata QC annotation")
doublet_array <- cbind.data.frame(doublet_score, doublet_qc)
assign(paste0("doublet_export_",id), doublet_array, env = .GlobalEnv)
original_q_array <- t(original_q_array)
doublet_score <- doubletCells(original_q_array, force.match = TRUE)
doublet_qc <- is.element(rownames(qc_m_array), rownames(m_array))
qc_m_array <- cbind.data.frame(qc_m_array, doublet_score, doublet_qc)
assign(paste0("QC_metadata_",id), qc_m_array, env = .GlobalEnv)
}
|
d0cd6ae3538847fcfe394032d3fc3a2efb252f14
|
f18a02daf3f78f763962f1b7165b7bfa4525cdb2
|
/Random number mean.R
|
52c7b21701c29d020b80c8ee9ffb2e06733544b1
|
[] |
no_license
|
rashmigangadharaiah/Statistical-Analysis-using-R
|
e1d8c393a113c946dde16b4d795599bf9348cf61
|
d3e635d8a697c40731498e0928ce174d43d80668
|
refs/heads/master
| 2021-10-08T07:59:13.618425
| 2018-03-28T04:50:10
| 2018-03-28T04:50:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 156
|
r
|
Random number mean.R
|
n<-10000
count<-0
for (i in rnorm(n)){
if (-1<i & i<1){
count<-count+1
}
}
answer<-count/n
answer
x<- rnorm(5)
for (i in x){
print (i)}
|
50e96d1e43ea4fe20cdd1b12161cc200c82af861
|
2e4ca04aaff834b3e0a8177d1ebb6f86c057e674
|
/man/sortData.Rd
|
cdd6c8e29d46c733c072f45c33f216cf68a9a9ea
|
[] |
no_license
|
crtahlin/medplot
|
60691530bc273a056143883c15f451d966b1e009
|
1c8365ca0c79459170b2108ac12fe2db982303ba
|
refs/heads/master
| 2021-01-19T07:34:35.405634
| 2016-01-26T06:04:10
| 2016-01-26T06:04:10
| 7,378,840
| 4
| 2
| null | 2015-01-22T09:48:42
| 2012-12-30T17:59:19
|
R
|
UTF-8
|
R
| false
| false
| 718
|
rd
|
sortData.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{sortData}
\alias{sortData}
\title{Sort results data}
\usage{
sortData(data, sortMethod = "BEA", nUNITS, DATES.COLUMN.FIRST,
DATES.COLUMN.LAST, TEST.RESULT.LEVELS)
}
\description{
Function that sorts the data according to a criterion. Not to
be called directly by the user.
}
\details{
Function is called by the plotTests function, to sort the data before
it starts working on it. Different methods of sorting can be used.
"DateIn" sorts by the date of admission.
"BEA" uses seriation package and Bond Energy Algorithm.
"BEA_TSP" as above with TSP to optimize the measure of effectivenes.
"PCA" uses seriation package First principal component algorithm.
}
|
2f32045b147b7e9483a917ea254c56b0fbe4f52d
|
0c0aad04a8a20651d11ace04fb47167798cbad7f
|
/VFGA_DESAFIO2.R
|
3ca768f02d8d6d8b796473dcf1157abce2a8bb20
|
[] |
no_license
|
Vero-arte/VFGA_DESAFIO2
|
de281cf9b645914e98f011b3b683e896a235151b
|
641a8ee59d283e3edfdf41eb8309a51151859e27
|
refs/heads/master
| 2022-12-25T12:08:16.877458
| 2020-09-24T03:27:10
| 2020-09-24T03:27:10
| 298,157,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,400
|
r
|
VFGA_DESAFIO2.R
|
#
#Hecho con gusto por Veronica F. Garcia Arteaga (UAEH)
#
# LABORATORIO - Desafio 2
#
#cargar datos
#
#
#
#
# cargar libreria ggplot2
library(ggplot2)
# grafica de puntos con colores LEYvd
ggplot(data = gender,
mapping = aes(x=STATE,
y=LEYvd,
color= STATE)) +
geom_point()
#grafica de puntos con colores LEYprop
ggplot(data = gender,
mapping = aes(x=STATE,
y=LEYprop,
color= STATE)) +
geom_point()
# escala discreta LEYer
# haciendo grafica de puntos por pais latinoamericano LEYer
g2<- ggplot(data=gender,
mapping = aes(x=StatE
y=LEYer)) +
geom_point()
#ver g2
g2
# escala discreta Leynod
# haciendo grafica de puntos por pais latinoamericano Leynod
g3<- ggplot(data=gender,
mapping = aes(x=StatE
y=Leynod)) +
geom_point()
#ver g3
g3
# haciendo boxplot en paises
g4<- ggplot(data = gender,
mapping = aes(x=STATE,
y=LEYvd,
fill= STATE)) +
geom_boxplot()
#ver g4
g4
#FACETING
# colocando grafica base
p <- ggplot(data=gender,
mapping = aes(x=STATE,
y=LEYsh,
color=STATE)) +
geom_point()
# crear objeto p
p
# facet_wrap (recomendado con una variable)
p+facet_wrap(~STATE)
|
aedd42d4411cf23d09b29665076558b747dab96e
|
1c6c63233fbd72e06573c114dbfc881aecde130d
|
/code/analysis/compare.ROC.R
|
d4ce2f115294c1b4451971da50d75ba7319633fe
|
[] |
no_license
|
Minzhe/geneck.ena
|
2f361a4754865262ebf9539363860c13eb360369
|
498f886a36136c22b7b1ddb1f86d53af236a27dc
|
refs/heads/master
| 2021-01-01T18:53:48.235866
| 2017-12-20T18:09:09
| 2017-12-20T18:09:09
| 98,461,856
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,169
|
r
|
compare.ROC.R
|
############################################################
### Compare.ROC.R ###
############################################################
setwd("/project/bioinformatics/Xiao_lab/s418336/projects/geneck.ena")
suppressMessages(library(argparse))
source("code/analysis/compare.net.roc.fun.R")
############# 1. Parse comandline argument #################
parser <- ArgumentParser(description = "This pipline is to compare gene network construction methods based on their auc of ROC curve.")
parser$add_argument("-p", "--path", type = "character", help = "base data folder")
parser$add_argument("-s", "--size", type = "character", help = "size of network")
parser$add_argument("-v", "--sigma2", type = "double", help = "sigma2 when simulating network data")
parser$add_argument("-n", "--num", type = "integer", help = "number of samples when simulating network data")
args <- parser$parse_args()
folder <- args$path
size <- args$size
sigma2 <- args$sigma2
n <- args$num
res <- compare.ggm.net.roc(base.folder = folder, size = size, n.sample = n, sigma = sigma2, verbose = TRUE, plot = TRUE, save = TRUE, store.temp = TRUE)
|
9d2fc1a2621c375e988554c61e530edcdd77ca83
|
b01c5ad3ea739749059c4ee94a4734349a5f71ed
|
/R/biom_survey_map_compespecie.R
|
001704c8068ca18922953dd9bbe2c654ffb1cab4
|
[] |
no_license
|
PabloMBooster/fenix
|
278e7f1d26241528a2c60001372c47b033c5b41e
|
3f19c4ce0108d33d7813eedc6b1c07c5f9557d2c
|
refs/heads/master
| 2023-08-03T14:40:33.426599
| 2023-07-31T21:45:30
| 2023-07-31T21:45:30
| 76,564,342
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,245
|
r
|
biom_survey_map_compespecie.R
|
biom_survey_map_compespecie = function(baseDat, outFolder = ".", outFile = "MapPieComposicionSp.png", xLim = c(-83, -70), yLim = c(-20, -3), Pch = 16,
Cols = rainbow(6), CexPoint = 0.9, widthFig = 700, heightFig = 820,
spLabels = NULL, Add = F, save = F, portImport = 1,
addIsoAreas = F, Legend = TRUE, LatTck = 2, n_perfil = 1){
require(mapplots)
if(is.null(outFolder)){
outFolder <- getwd()
}
if(save){
png(filename = file.path(outFolder, outFile), width = widthFig, height = heightFig,
units = "px", res = 130)
}
par(mar = c(4,4,1,1), oma = c(0,0,0,0))
mapa_peru(area_iso = addIsoAreas, n_perfil = n_perfil )
selDat = baseDat
sort(unique(selDat[, "NOMBRE_COMERCIAL"]))
capBySp = by(data = selDat[, "REE_NPESESP"], INDICES = selDat[, "NOMBRE_COMERCIAL"], FUN = unique)
xb = as.list(capBySp)
xc = sapply(X = xb, FUN = sum, na.rm = T)
Sps = names(xc)
capSp = as.vector(xc)
outd = data.frame(sp = Sps, cap = capSp)
outd2 = outd[order(outd$cap, decreasing = T), ]
outd2$sp = as.character(outd2$sp)
otSp = outd2$sp[6:nrow(outd2)]
for(i in 1:nrow(baseDat)){
if(baseDat[i, "NOMBRE_COMERCIAL"] %in% otSp){
baseDat[i, "NOMBRE_COMERCIAL"] = "Otros"
} else {
baseDat[i, "NOMBRE_COMERCIAL"] = baseDat[i, "NOMBRE_COMERCIAL"]
}
}
baseDat$indLance = paste0(baseDat[, "EMBARCACION"], baseDat[, "REO_NNUMLAN"])
outMatSp = NULL
idx2 = unique(baseDat$indLance)
for(i in seq_along(idx2)){
tmp = baseDat[which(baseDat[, "indLance"] == idx2[i]), ]
tmpMat = .getSpData(dat2 = tmp)
outMatSp = rbind(outMatSp, tmpMat)
}
xyz = make.xyz(outMatSp$lon,outMatSp$lat,outMatSp$capt,outMatSp$sp)
xyz$z = xyz$z[,order(names(xyz$z[1,]))]
draw.pie(xyz$x, xyz$y, xyz$z, radius = 0.2, col=Cols, scale = FALSE)
if(Legend){
if(is.null(spLabels)){
legend("bottomleft", legend = colnames(xyz$z), pch = 15, bty = "n", col = Cols,
cex = 0.9)
} else {
legend("bottomleft", legend = spLabels, pch = 15, bty = "n", col = Cols,
cex = 0.9)
}
}
if(save){
dev.off()
}
return(invisible())
}
|
cba7f1184d1263695da2ea117392ee3eca3fb7fb
|
d5e64b2499f6a4ae18dff2c15894caf91bd41fc7
|
/R/spheroid_dist.R
|
4adf33c6a3f049293c9cbac1e986887efd9bfa06
|
[
"MIT"
] |
permissive
|
bczernecki/climate
|
cbe81b80335d3126ad943726c8d3185805900462
|
9c168a6a58854c374cd4c7b13b23cba28adeb7e2
|
refs/heads/master
| 2023-04-14T03:02:26.083700
| 2023-04-01T13:48:48
| 2023-04-01T13:48:48
| 197,452,909
| 66
| 23
|
NOASSERTION
| 2023-04-01T13:48:50
| 2019-07-17T19:49:40
|
R
|
UTF-8
|
R
| false
| false
| 1,099
|
r
|
spheroid_dist.R
|
#' Distance between two points on a spheroid
#'
#' Calculate the distance between two points on the surface of a spheroid
#' using Vincenty's formula. This function can be used when GIS libraries
#' for calculating distance are not available.
#'
#' @param p1 coordinates of the first point in decimal degrees (LON, LAT)
#' @param p2 coordinates of the second point in decimal degrees (LON, LAT)
#'
#' @return distance between two locations in kilometers
#' @export
#' @examples
#' p1 = c(18.633333, 54.366667) # longitude and latitude for Gdansk
#' p2 = c(17.016667, 54.466667) # longitude and latitude for Slupsk
#' spheroid_dist(p1, p2)
#'
spheroid_dist = function(p1, p2) {
r = 6371009 # mean earth radius in meters
vec = c(p1, p2) * pi / 180 # convert degrees to radians
diff_long = vec[3] - vec[1]
num = (cos(vec[4]) * sin(diff_long))^2 + (cos(vec[2]) * sin(vec[4]) - sin(vec[2]) * cos(vec[4]) * cos(diff_long))^2
denom = sin(vec[2]) * sin(vec[4]) + cos(vec[2]) * cos(vec[4]) * cos(diff_long)
d = atan(sqrt(num) / denom)
d = d * r
return(d / 1000) # output in km
}
|
0805acbd532702cf37599ac8fdb801b4302ed7ce
|
3396f4a2a342489c200ac10c09cdff4e6706d6d4
|
/Teleconnections/dataPrep/NLDAS_to_Met_Hourly.R
|
78f9473b265f4e651b5abf59f07151a5a30b52c3
|
[] |
no_license
|
CareyLabVT/MacrosystemsEDDIE
|
ab0b38fa49cffdb71d1638b639c601b1c17ec5dc
|
7f8cf883075eb332121bbae68a8c3a450d41d9c1
|
refs/heads/master
| 2023-06-07T04:43:13.636680
| 2023-05-26T13:52:03
| 2023-05-26T13:52:03
| 91,721,847
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,309
|
r
|
NLDAS_to_Met_Hourly.R
|
### Format NLDAS output into GLM-friendly format
pacman::p_load(tidyverse, lubridate)
options(scipen=999)
LakeName = 'Prairie Lake'
source <- paste('C:/Users/KJF/Desktop/R/MacrosystemsEDDIE/Teleconnections/dataPrep/NLDAS/',LakeName, '/',sep='')
glm_dir <- paste('C:/Users/KJF/Desktop/R/MacrosystemsEDDIE/Teleconnections/Lakes/in_Progress/',LakeName, sep='')
# Pull files and select/rename columns for met_hourly ####
shortwave <- read_csv(paste(source,LakeName,"_DSWRFsfc_110_SFC.csv", sep="")) %>% #DSWRF = shortwave radiation flux downwards (surface) [W/m2]
select(dateTime, DSWRFsfc_110_SFC) %>% rename(ShortWave = DSWRFsfc_110_SFC)
longwave <- read_csv(paste(source,LakeName,"_DLWRFsfc_110_SFC.csv", sep="")) %>% #DLWRF = longwave radiation flux downwards (surface) [W/m2]
select(dateTime, DLWRFsfc_110_SFC) %>% rename(LongWave = DLWRFsfc_110_SFC)
AirTemp <- read_csv(paste(source,LakeName,"_TMP2m_110_HTGL.csv", sep="")) %>% #TMP = 2 m aboveground temperature [K]
mutate(temp.dC = TMP2m_110_HTGL-273.15) %>% # Calculate in degrees C
select(dateTime, temp.dC) %>% rename(AirTemp = temp.dC)
RelHum <- (read_csv(paste(source,LakeName,"_PRESsfc_110_SFC.csv", sep="")) %>% #PRES = surface Pressure [Pa]
mutate(pressure.mb = PRESsfc_110_SFC/100) %>%
select(dateTime, pressure.mb)) %>%
left_join(read_csv(paste(source,LakeName,"_SPFH2m_110_HTGL.csv", sep="")) %>% #SPFH = 2 m aboveground Specific humidity [kg/kg]
select(dateTime, SPFH2m_110_HTGL) %>% rename(specific.humidity = SPFH2m_110_HTGL)) %>%
left_join(AirTemp) %>%
mutate (RelHum = (specific.humidity * pressure.mb / (0.378 * specific.humidity + 0.622)) /
(6.112 * exp((17.67 * AirTemp)/(AirTemp + 243.5)))) %>%
mutate(RelHum = ifelse(RelHum > 1, 1,
ifelse(RelHum < 0, 0, RelHum))) %>%
select(dateTime, RelHum)
windSpeed <- read_csv(paste(source,LakeName,"_UGRD10m_110_HTGL.csv", sep="")) %>% #UGRD = 10 m aboveground Zonal wind speed [m/s]
select(dateTime, UGRD10m_110_HTGL) %>%
left_join(read_csv(paste(source,LakeName,"_VGRD10m_110_HTGL.csv", sep="")) %>% #VGRD = 10 m aboveground Meridional wind speed [m/s]
select(dateTime, VGRD10m_110_HTGL)) %>%
mutate(WindSpeed = sqrt(UGRD10m_110_HTGL^2 + VGRD10m_110_HTGL^2)) %>% # wind speed (m/s)
select(dateTime, WindSpeed)
precip <- read_csv(paste(source,LakeName,"_APCPsfc_110_SFC_acc1h.csv", sep="")) %>% #APCP = precipitation hourly total [kg/m2]
mutate(precip.m.day=APCPsfc_110_SFC_acc1h/1000*24) %>%
select(dateTime, precip.m.day) %>% rename(Rain = precip.m.day)
## Other Files:
#LakeName_PEVAPsfc_110_SFC_acc1h #PEVAP = potential evaporation hourly total [kg/m2]
#LakeName_CAPE180_0mb_110_SPDY #CAPE = 180-0 mb above ground Convective Available Potential Energy [J/kg]
#LakeName_CONVfracsfc_110_SFC_acc1h #CONVfrac = fraction of total precipitation that is convective [unitless]
# Build GLM-style dataframe of met variables ####
met <- shortwave %>%
left_join(longwave) %>%
left_join(AirTemp) %>%
left_join(RelHum) %>%
left_join(windSpeed) %>%
left_join(precip) %>%
rename(time = dateTime) %>%
mutate(Snow = 0, time = as.POSIXct(time, format= "%Y-%m-%d HH:MM:SS"))
write.csv(met, paste(glm_dir, "/met_hourly.csv", sep=''), row.names = F,quote=F)
|
17f8d978aa28d2f2688956d5959ca3073a067cfb
|
b09958d658d683d351c30630bc6c9dacac825c42
|
/analysis/data_selection.R
|
8e4c6dfdcb336812c1bd5f5d795b890278869a6f
|
[
"MIT"
] |
permissive
|
opensafely/comparative-booster
|
e5e45a66c25315dc7c09492945d658fc7b9e330f
|
bca54292baa80e967187ca28988d4897ae88aedc
|
refs/heads/main
| 2023-08-23T10:44:15.655135
| 2023-03-01T15:29:21
| 2023-03-01T15:29:21
| 481,140,039
| 0
| 0
|
MIT
| 2022-11-28T16:21:42
| 2022-04-13T08:48:40
|
R
|
UTF-8
|
R
| false
| false
| 10,057
|
r
|
data_selection.R
|
# # # # # # # # # # # # # # # # # # # # #
# Purpose: import processed data and filter out people who are excluded from the main analysis
# outputs:
# - inclusion/exclusions flowchart data (up to matching step)
# # # # # # # # # # # # # # # # # # # # #
# Preliminaries ----
## Import libraries ----
library('tidyverse')
library('here')
library('glue')
library('gt')
library('gtsummary')
# Import custom user functions from lib
source(here("lib", "functions", "utility.R"))
# Import design elements
source(here("lib", "design", "design.R"))
# Import redaction functions
source(here("lib", "functions", "redaction.R"))
## create output directories for data ----
fs::dir_create(here("output", "data"))
## create output directories for tables/summaries ----
output_dir <- here("output", "prematch")
fs::dir_create(output_dir)
## Import processed data ----
data_processed <- read_rds(here("output", "data", "data_processed.rds"))
# Define selection criteria ----
data_criteria <- data_processed %>%
transmute(
patient_id,
vax3_type,
has_age = !is.na(age),
has_sex = !is.na(sex) & !(sex %in% c("I", "U")),
has_imd = imd_Q5 != "Unknown",
#has_ethnicity = !is.na(ethnicity_combined),
has_region = !is.na(region),
#has_msoa = !is.na(msoa),
isnot_hscworker = !hscworker,
isnot_carehomeresident = !care_home_combined,
isnot_endoflife = !endoflife,
isnot_housebound = !housebound,
vax1_afterfirstvaxdate = case_when(
(vax1_type=="pfizer") & (vax1_date >= study_dates$firstpfizer_date) ~ TRUE,
(vax1_type=="az") & (vax1_date >= study_dates$firstaz_date) ~ TRUE,
(vax1_type=="moderna") & (vax1_date >= study_dates$firstmoderna_date) ~ TRUE,
TRUE ~ FALSE
),
consistentvax3date = vax3_date == anycovidvax_3_date,
vax3_afterstartdate = vax3_date >= study_dates$studystart_date,
vax3_beforeenddate = vax3_date <= study_dates$studyend_date,
vax12_homologous = vax1_type==vax2_type,
has_vaxgap12 = vax2_date >= (vax1_date+17), # at least 17 days between first two vaccinations
has_vaxgap23 = vax3_date >= (vax2_date+17) | is.na(vax3_date), # at least 17 days between second and third vaccinations
has_knownvax1 = vax1_type %in% c("pfizer", "az"),
has_knownvax2 = vax2_type %in% c("pfizer", "az"),
has_expectedvax3type = vax3_type %in% c("pfizer", "moderna"),
has_norecentcovid = ((vax3_date - anycovid_0_date) >= 28) | is.na(anycovid_0_date),
isnot_inhospital = !inhospital,
jcvi_group_6orhigher = jcvi_group %in% as.character(1:6),
include = (
consistentvax3date &
vax1_afterfirstvaxdate &
vax3_afterstartdate & vax3_beforeenddate & has_expectedvax3type &
has_age & has_sex & has_imd & has_region & #has_ethnicity &
has_vaxgap12 & has_vaxgap23 & has_knownvax1 & has_knownvax2 & vax12_homologous &
isnot_hscworker &
isnot_carehomeresident & isnot_endoflife & isnot_housebound &
has_norecentcovid &
isnot_inhospital
),
)
data_cohort <- data_criteria %>%
filter(include) %>%
select(patient_id) %>%
left_join(data_processed, by="patient_id") %>%
droplevels()
write_rds(data_cohort, here("output", "data", "data_cohort.rds"), compress="gz")
arrow::write_feather(data_cohort, here("output", "data", "data_cohort.feather"))
data_inclusioncriteria <- data_criteria %>%
transmute(
patient_id,
vax3_type,
c0 = consistentvax3date & vax1_afterfirstvaxdate & vax3_afterstartdate & vax3_beforeenddate & has_expectedvax3type,
c1 = c0 & (has_age & has_sex & has_imd & has_region),
c2 = c1 & (has_vaxgap12 & has_vaxgap23 & has_knownvax1 & has_knownvax2 & vax12_homologous),
c3 = c2 & (isnot_hscworker),
c4 = c3 & (isnot_carehomeresident & isnot_endoflife & isnot_housebound),
c5 = c4 & (has_norecentcovid),
c6 = c5 & (isnot_inhospital),
) %>%
filter(c0)
write_rds(data_inclusioncriteria, here("output", "data", "data_inclusioncriteria.rds"), compress="gz")
## flowchart -- rounded so disclosure-safe ----
data_flowchart <-
data_inclusioncriteria %>%
select(-patient_id) %>%
group_by(vax3_type) %>%
summarise(
across(.fns=sum)
) %>%
pivot_longer(
cols=-vax3_type,
names_to="criteria",
values_to="n"
) %>%
group_by(vax3_type) %>%
mutate(
n_exclude = lag(n) - n,
pct_exclude = n_exclude/lag(n),
pct_all = n / first(n),
pct_step = n / lag(n),
crit = str_extract(criteria, "^c\\d+"),
criteria = fct_case_when(
crit == "c0" ~ "Aged 18+ and recieved booster dose of BNT162b2 or mRNA-1273 between 29 October 2021 and 31 January 2022", # paste0("Aged 18+\n with 2 doses on or before ", format(study_dates$lastvax2_date, "%d %b %Y")),
crit == "c1" ~ " with no missing demographic information",
crit == "c2" ~ " with homologous primary vaccination course of BNT162b2 or ChAdOx1",
crit == "c3" ~ " and not a health and social care worker",
crit == "c4" ~ " and not a care/nursing home resident, end-of-life or housebound",
crit == "c5" ~ " and no COVID-19-related events within 28 days",
crit == "c6" ~ " and not admitted in hospital at time of booster",
TRUE ~ NA_character_
)
) #
#write_csv(data_flowchart, here("output", "data", "flowchart.csv"))
## flowchart -- rounded so disclosure-safe ----
data_flowchart_rounded <-
data_inclusioncriteria %>%
select(-patient_id) %>%
group_by(vax3_type) %>%
summarise(
across(.fns=~ceiling_any(sum(.), 7))
) %>%
pivot_longer(
cols=-vax3_type,
names_to="criteria",
values_to="n"
) %>%
group_by(vax3_type) %>%
mutate(
n_exclude = lag(n) - n,
pct_exclude = n_exclude/lag(n),
pct_all = n / first(n),
pct_step = n / lag(n),
crit = str_extract(criteria, "^c\\d+"),
criteria = fct_case_when(
crit == "c0" ~ "Aged 18+ and received booster dose of BNT162b2 or mRNA-1273 between 29 October 2021 and 31 January 2022", # paste0("Aged 18+\n with 2 doses on or before ", format(study_dates$lastvax2_date, "%d %b %Y")),
crit == "c1" ~ " with no missing demographic information",
crit == "c2" ~ " with homologous primary vaccination course of BNT162b2 or ChAdOx1",
crit == "c3" ~ " and not a health and social care worker",
crit == "c4" ~ " and not a care/nursing home resident, end-of-life or housebound",
crit == "c5" ~ " and no COVID-19-related events within 28 days",
crit == "c6" ~ " and not admitted in hospital at time of booster",
TRUE ~ NA_character_
)
) #
write_csv(data_flowchart_rounded, fs::path(output_dir, "flowchart.csv"))
# table 1 style baseline characteristics amongst those eligible for matching ----
var_labels <- list(
N ~ "Total N",
treatment_descr ~ "Vaccine type",
vax12_type_descr ~ "Primary vaccine course",
vax23_interval ~ "Days between dose 2 and 3",
age ~ "Age",
jcvi_ageband ~ "Age band",
sex ~ "Sex",
ethnicity_combined ~ "Ethnicity",
imd_Q5 ~ "Deprivation",
region ~ "Region",
cev_cv ~ "JCVI clinical risk group",
sev_obesity ~ "Body Mass Index > 40 kg/m^2",
chronic_heart_disease ~ "Chronic heart disease",
chronic_kidney_disease ~ "Chronic kidney disease",
diabetes ~ "Diabetes",
chronic_liver_disease ~ "Chronic liver disease",
chronic_resp_disease ~ "Chronic respiratory disease",
asthma ~ "Asthma",
chronic_neuro_disease ~ "Chronic neurological disease",
immunosuppressed ~ "Immunosuppressed",
immuno_any ~ "Immunosuppressed (all)",
asplenia ~ "Asplenia or poor spleen function",
cancer_nonhaem ~ "Cancer (excluding haem), within previous 3 years",
cancer_haem ~ "Haematological cancer, within previous 3 years",
solid_organ_transplant ~ "Solid organ transplant",
immrx ~ "Immunosuppressive medications, within 6 months",
hiv_aids ~ "HIV/AIDS",
multimorb ~ "Morbidity count",
learndis ~ "Learning disabilities",
sev_mental ~ "Serious mental illness",
prior_tests_cat ~ "Number of SARS-CoV-2 tests",
prior_covid_infection ~ "Prior documented SARS-CoV-2 infection"
) %>%
set_names(., map_chr(., all.vars))
tab_summary_prematch <-
data_cohort %>%
mutate(
N=1L,
treatment_descr = fct_recoderelevel(as.character((vax3_type=="moderna")*1L), recoder$treatment),
) %>%
select(
treatment_descr,
all_of(names(var_labels)),
) %>%
tbl_summary(
by = treatment_descr,
label = unname(var_labels[names(.)]),
statistic = list(
N = "{N}",
age="{mean} ({sd})",
vax23_interval="{mean} ({sd})"
),
)
raw_stats <- tab_summary_prematch$meta_data %>%
select(var_label, df_stats) %>%
unnest(df_stats)
raw_stats_redacted <- raw_stats %>%
mutate(
n = roundmid_any(n, threshold),
N = roundmid_any(N, threshold),
p = n / N,
N_miss = roundmid_any(N_miss, threshold),
N_obs = roundmid_any(N_obs, threshold),
p_miss = N_miss / N_obs,
N_nonmiss = roundmid_any(N_nonmiss, threshold),
p_nonmiss = N_nonmiss / N_obs,
var_label = factor(var_label, levels = map_chr(var_labels[-c(1, 2)], ~ last(as.character(.)))),
variable_levels = replace_na(as.character(variable_levels), "")
)
write_csv(raw_stats_redacted, fs::path(output_dir, "table1.csv"))
#
# # love / smd plot ----
#
# data_smd <- tab_summary_baseline$meta_data %>%
# select(var_label, df_stats) %>%
# unnest(df_stats) %>%
# filter(
# variable != "N"
# ) %>%
# group_by(var_label, variable_levels) %>%
# summarise(
# diff = diff(p),
# sd = sqrt(sum(p*(1-p))),
# smd = diff/sd
# ) %>%
# ungroup() %>%
# mutate(
# variable = factor(var_label, levels=map_chr(var_labels[-c(1,2)], ~last(as.character(.)))),
# variable_card = as.numeric(variable)%%2,
# variable_levels = replace_na(as.character(variable_levels), ""),
# ) %>%
# arrange(variable) %>%
# mutate(
# level = fct_rev(fct_inorder(str_replace(paste(variable, variable_levels, sep=": "), "\\:\\s$", ""))),
# cardn = row_number()
# )
#
# write_csv(data_smd, fs::path(output_dir, "smd.csv"))
#
#
|
5dda57c1a58f10781f4da962493a2ca6a2c1152c
|
2e1f19f01e19a1acf2465d24fc3954263e281b52
|
/man/get_quarter_office_expenses_house_member.Rd
|
910d998ef2e942e97c8f4acd6ad559713f452163
|
[] |
no_license
|
DavytJ/ProPublicaR
|
ebdc03ac0bc30efa6933aaa62316fa3fcbf98b00
|
e9fe623ffc063665581238c3196f78bb32b08b77
|
refs/heads/master
| 2020-03-19T08:33:55.239980
| 2018-10-30T16:10:25
| 2018-10-30T16:17:09
| 136,215,002
| 0
| 0
| null | 2018-06-05T17:56:19
| 2018-06-05T17:56:19
| null |
UTF-8
|
R
| false
| true
| 1,248
|
rd
|
get_quarter_office_expenses_house_member.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_quarter_office_expenses_house_member.R
\name{get_quarter_office_expenses_house_member}
\alias{get_quarter_office_expenses_house_member}
\title{Get Quarterly Office Expenses by a Specific House Member}
\usage{
get_quarter_office_expenses_house_member(member_id, year, quarter, myAPI_Key)
}
\arguments{
\item{year}{2009-2017}
\item{quarter}{1,2,3,4}
\item{myAPI_Key}{use the congress API, you must sign up for an API key. The API key must be included in all API requests to the server, set as a header.}
\item{member-id}{The ID of the member to retrieve; it is assigned by the Biographical Directory of the United States Congress or can be retrieved from a member list request.l}
}
\value{
the amount a given lawmaker spent during a specified year and quarter by category,
}
\description{
The House of Representatives publishes quarterly reports detailing official office expenses by lawmakers. The Congress API has data beginning in the third quarter of 2009. HTTP Request: GET https://api.propublica.org/congress/v1/members/{member-id}/office_expenses/{year}/{quarter}.json
}
\examples{
\dontrun{
get_quarter_office_expenses_house_member('A000374', 2017, 4)
}
}
|
ae24878780b7735fea0e0414f088e6838e4ae69d
|
bae57f27c447250ef182abe8c6d12e13aea24ba2
|
/R/data-directory.R
|
d9b4d544cdf7a57d87e40d63d5e9cdf782d5b662
|
[] |
no_license
|
denalitherapeutics/archs4
|
a254680554856fdcb18a81a3f0fc6f71d045fd46
|
be3aa0e5b7eb3321223d5f63ef193d77a6f9a2fc
|
refs/heads/master
| 2021-04-15T16:54:55.559093
| 2018-09-19T21:54:21
| 2018-09-19T21:54:21
| 126,898,823
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,516
|
r
|
data-directory.R
|
# These are lower-level functions that support "the health" of the local
# datadir that is used to store the data required to drive a
# LocalArchs4Repository
#' Initialize a local datadir to act as an ARCHS4 data datadir
#'
#' @details
#' A local datadir needs to be created and initialized (wth a `meta.yaml`
#' file), to house ARCHS4 data for use in an Archs4Repository. This function
#' creates that datadir and copies an initial `meta.yaml` file.
#'
#' Please refer to the vignette section "ARCHS4 Data Download" for more
#' details.
#'
#' @export
#'
#' @param datadir the path to the datadir to create (or initialize) as an
#' ARCHS4 data datadir.
#' @param stop_if_exists by default, this function will `stop` if `datadir`
#' already exists. Set this to `FALSE` to continue. Setting it to `FALSE` is
#' convenient to initialize the target `datadir` with a `meta.yaml` file.
#' If a `meta.yaml` file already exists in `datadir`, then this function
#' will stop unconditionally. Move the `datadir/meta.yaml` out of the way
#' if you simply want to refresh it with the default version.
#' @return invisibly returns the path to the `meta.yaml` in the target
#' `datadir`
archs4_local_data_dir_create <- function(datadir = getOption("archs4.datadir"),
stop_if_exists = TRUE) {
assert_character(datadir)
d.exists <- file.exists(datadir)
meta.in <- system.file("extdata", "meta.yaml", package = "archs4",
mustWork = TRUE)
meta.to <- file.path(datadir, "meta.yaml")
if (d.exists && !dir.exists(datadir)) {
stop("Desired output datadir is already file(!): ", datadir)
}
if (d.exists) {
if (stop_if_exists) {
stop("Output datadir already exisits: ", datadir)
} else {
if (file.exists(meta.to)) {
stop("meta.yaml file already exists in output datadir, ",
"remove it if you want to replace it with the default meta.yaml")
}
}
} else {
parent.dir <- assert_directory(dirname(datadir), "w")
dir.create(datadir)
}
file.copy(meta.in, meta.to)
invisible(meta.to)
}
#' Check "the health" of a local ARCHS4 data datadir
#'
#' This function will notify the suer which files are missing from the
#' ARCHS4 data datadir, and what course of action they can use to
#' fix it.
#'
#' @export
#' @param echo echo validation diagnostic message to stdout via [base::cat()]
#' @param datadir the path to the datadir that stores local ARCHS4 data.
#' Defaults to `getOption("archs4.datadir")`.
#' @return A string that indicates "what's wrong", or `TRUE` if validation
#' succeeds.
archs4_local_data_dir_validate <- function(echo = TRUE,
datadir = getOption("archs4.datadir")
) {
msg <- character()
if (!dir.exists(datadir)) {
msg <- paste0(
"datadir does not exists, run ",
'`archs4_local_data_dir_create("', datadir, '")`\n')
if (echo) cat(msg)
return(invisible(msg))
}
meta.fn <- file.path(datadir, "meta.yaml")
if (!file.exists(meta.fn)) {
msg <- paste(
"meta.yaml file is missing from the data datadir, run ",
"`archs4_local_data_dir_create(datadir, stop_if_exists = FALSE)`\n")
if (echo) cat(msg)
return(invisible(msg))
}
finfo <- archs4_file_info(datadir)
missing <- filter(finfo, source == "archs4" & !file_exists)
if (nrow(missing)) {
msg <- paste0(
"The following ARCHS4 files are missing, please download them:\n",
paste0(
sprintf(" * %s: %s", missing[["name"]], missing[["url"]]),
collapse = "\n"))
if (echo) cat(msg)
return(invisible(msg))
}
missing <- filter(finfo, source == "ensembl" & !file_exists)
if (nrow(missing)) {
msg <- paste0(
"The following ensembl files are missing, please download them:\n",
paste0(
sprintf(" * %s: %s", missing[["name"]], missing[["url"]]),
collapse = "\n"))
if (echo) cat(msg)
return(invisible(msg))
}
missing <- filter(finfo, source == "computed" & !file_exists)
if (nrow(missing)) {
header <- "The following computed files are missing:"
filez <- paste(sprintf(" * %s\n", missing[["name"]]), collapse = "")
advice <- paste0(
"You can create them by running:\n",
" `create_augmented_feature_info(\"", datadir, "\")`")
msg <- sprintf("%s\n%s\n%s\n\n", header, filez, advice)
if (echo) cat(msg)
return(invisible(msg))
}
TRUE
}
|
ad0760f4b42fea66b46365d3cd27563506c35e05
|
81e3ecad25fd6fc01370f665d284ec1fe52f494e
|
/VARFIMA_lobato.R
|
71e92c63f2741ef9bf1b3e8c9377813201fd9b72
|
[] |
no_license
|
booleanboo/VEGARMA
|
2eddf961ba6fb9daeeae198245c5cfe7d7a5b5b5
|
0bf9d71b558c2f2ad133f225e4ad8ecea7a8197d
|
refs/heads/main
| 2023-04-23T13:47:58.626154
| 2021-04-08T01:08:41
| 2021-04-08T01:08:41
| 349,892,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,349
|
r
|
VARFIMA_lobato.R
|
library(LongMemoryTS)
library(MTS)
library(FKF)
library(orthopolynom)
library(MASS)
library(longmemo)
library(beyondWhittle)
library(forecast)
library(nlme)
library(msos)
theta1<-vector()
theta1[1]<-1
theta1[2]<-0.2
for(j in 3:1000)
{
theta1[j]=theta1[j-1]*(j-2+0.2)/(j-1)
}
theta2<-vector()
theta2[1]<-1
theta2[2]<-0.4
for(k in 3:1000)
{
theta2[k]=theta2[k-1]*(k-2+0.4)/(k-1)
}
Sigma <- matrix(c(1,0.8,0.8,1),ncol=2)
at<-mvrnorm(n = 3000, rep(0, 2), Sigma, empirical = TRUE)
xt<-filter(at[,1],theta1,method="convolution",sides=1)
yt<-filter(at[,2],theta2,method="convolution",sides=1)
n <- 500
m<-n^0.65
lambdaj <- 2 * pi * (1:m)/n
loglambdaj<-sum(log(lambdaj))/m
G_d<-matrix(rep(0,4),2,2)
varfima.whittle<-function(d1,d2)
{
weight.mat <- matrix(NA, n, m)
for (k in 1:m) {
weight.mat[, k] <- exp((0+1i) * (1:n) * lambdaj[k])
}
wx <- 1/sqrt(2 * pi * n) * xt[1000:1499] %*% weight.mat
wy <- 1/sqrt(2 * pi * n) * yt[1000:1499] %*% weight.mat
for(j in 1:m)
{
Lamdaj<-matrix(c(lambdaj[j]^d1,0,0,lambdaj[j]^d2),2,2)
###periodogram matrix
wz<-matrix(c(wx[,j],wy[,j]),2,1)
I.lambda <- wz%*%Conj(t(wz))
G_d<-Re(Lamdaj%*%I.lambda%*%Lamdaj) +G_d
}
R_d<-log(det(G_d/m))-2*(d1*loglambdaj+d2*loglambdaj)
R_d
}
objective <- function(theta) {
sp <-varfima.whittle(theta[1], theta[2])
return(sp)
}
theta<-c(0.1,0.1)
objective(theta)
fit2 <- optim(theta, objective, method="L-BFGS-B", lower=c(0, 0), upper=c(0.5, 0.5), hessian = TRUE)
fit2$par
######G_0################
G_d<-matrix(rep(0,4),2,2)
d1<-fit2$par[1]
d2<-fit2$par[2]
weight.mat <- matrix(NA, n, m) #500*m
for (k in 1:m) {
weight.mat[, k] <- exp((0+1i) * (1:n) * lambdaj[k])
}
wx <- 1/sqrt(2 * pi * n) * xt[1000:1499] %*% weight.mat #1*56
wy <- 1/sqrt(2 * pi * n) * yt[1000:1499] %*% weight.mat #1*56
for(j in 1:m)
{
Lamdaj<-matrix(c(lambdaj[j]^d1,0,0,lambdaj[j]^d2),2,2)
###periodogram matrix
wz<-matrix(c(wx[,j],wy[,j]),2,1)
I.lambda <- wz%*%Conj(t(wz))
G_d<-Re(Lamdaj%*%I.lambda%*%Lamdaj) +G_d
}
G_0<-G_d/m*2*pi
G_0
|
46a967d48d0604f58db7f253f3e91ab0f04046fc
|
4c51ece15418ab7523df7b5dffc55c0f5c2a2c6c
|
/verify_gRNA_matrix.R
|
5fef335b184ef2bc31636f7b2a7aebaa1f589fed
|
[] |
no_license
|
scarlettcanny0629/import-gasperini-2019
|
482e7b59feee8082c756376f81f31592ab43bfd0
|
98531a1466532b08f9687e69194e7e77db542e24
|
refs/heads/main
| 2023-06-28T04:53:11.901772
| 2021-08-06T16:13:25
| 2021-08-06T16:13:25
| 388,646,676
| 0
| 0
| null | 2021-07-23T01:42:02
| 2021-07-23T01:42:02
| null |
UTF-8
|
R
| false
| false
| 2,581
|
r
|
verify_gRNA_matrix.R
|
# test correctness of gRNA matrix
require(dplyr)
# set directories
gasp_offsite <- .get_config_path("LOCAL_GASPERINI_2019_DATA_DIR")
intermediate_data_dir <- paste0(gasp_offsite, "intermediate/")
raw_data_dir <- paste0(gasp_offsite, "raw/")
# load gRNA barcodes, count matrix, and indicator matrix
gRNA_barcodes_in_use <- readr::read_tsv(file = paste0(raw_data_dir, "GSE120861_grna_groups.at_scale.txt"),
col_names = c("group_name", "gRNA_barcode"), col_types = "cc")
gRNA_count_matrix <- readRDS(paste0(intermediate_data_dir, "gRNA_count_matrix.rds"))
gRNA_indicator_matrix <- readRDS(paste0(intermediate_data_dir, "gRNA_indicators.rds"))
# check equality between thresholded count matrix and indicator matrix
set.seed(10)
grp_names <- sample(gRNA_barcodes_in_use$group_name, 50)
# verify that our nonzero entries are a superset of Gasperini's
for (grp_name in grp_names) {
barcodes <- gRNA_barcodes_in_use %>% dplyr::filter(group_name %in% grp_name) %>% dplyr::pull(gRNA_barcode)
our_threshold <- (Matrix::colSums(gRNA_count_matrix[barcodes,] >= 5) >= 1)
gasp_threshold <- gRNA_indicator_matrix[,grp_name]
our_threshold_nonzero <- which(our_threshold)
gasp_threshold_nonzero <- which(gasp_threshold)
print(all(gasp_threshold_nonzero %in% our_threshold_nonzero))
}
#### check inconsistency of specific gRNA and cell
if (FALSE) {
gasp_offsite <- .get_config_path("LOCAL_GASPERINI_2019_DATA_DIR")
raw_data_dir <- paste0(gasp_offsite, "raw/")
intermediate_data_dir <- paste0(gasp_offsite, "intermediate/")
library(magrittr)
gRNA_barcodes <- readr::read_tsv(file = paste0(raw_data_dir, "GSE120861_grna_groups.at_scale.txt"),
col_names = c("group_name", "gRNA_barcode"), col_types = "cc")
gRNA_counts <- readr::read_tsv(paste0(raw_data_dir, "all_libraries.gRNAcaptured.aggregated.txt"),
col_names = TRUE,
col_types = "cccc") %>% dplyr::rename(cell_barcode = cell, gRNA_barcode = barcode)
gRNA_indicator_matrix <- readRDS(paste0(intermediate_data_dir, "gRNA_indicators.rds"))
gRNA_grp <- "chr10.2575_top_two"
gRNA_grp_barcodes <- dplyr::filter(gRNA_barcodes, group_name == gRNA_grp) %>% dplyr::pull(gRNA_barcode)
cell_barcode <- "GTCAAGTTCAGCGACC-1_1B_2_SI-GA-F3"
gRNA_counts_my_cell <- gRNA_counts[gRNA_counts$cell_barcode == gsub('.{9}$', '', cell_barcode),]
gRNA_counts_my_cell[gRNA_counts$gRNA_barcode %in% gRNA_grp_barcodes,]
gRNA_indicator_matrix[cell_barcode, gRNA_grp] # no gRNA in indicator matrix; inconsistency
}
|
8d178d88a412780d4699ceda225438b75d035873
|
26f722da50b82b98bf8c730a14e1e4bc886021af
|
/man/pca_time.Rd
|
f05492bd69cb622725a57890b93b2053b95688fa
|
[] |
no_license
|
panders225/mvstats
|
0633db302a7a40b64a35833ad972a02799e782fc
|
74c01ac74cb55e0c6bedc50a813e59806c6c0240
|
refs/heads/master
| 2021-05-15T07:13:11.407638
| 2017-11-20T22:06:47
| 2017-11-20T22:06:47
| 111,461,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 497
|
rd
|
pca_time.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pca_time.R
\name{pca_time}
\alias{pca_time}
\title{Producing PCA greatest hits}
\usage{
pca_time(x)
}
\arguments{
\item{x}{a matrix or dataframe object}
}
\description{
Produce a complete table of PCA loadings,
and a biplot of the first two principal components, if the table
is less than 101 observations and 16 columns.
}
\examples{
one <- cbind(rnorm(50, 1, 1), rnorm(50, 0, 1), rnorm(50, 3, 1.5))
pca_time(one)
}
|
567f3c67588c39272fc9926c51091650f45a6b9b
|
bc63aeafff31bb14fbc429a85de4d85078573d39
|
/ppp/ppp.R
|
83d77645f4c0276abc84dd8a382685e53dfe9688
|
[] |
no_license
|
nmmarquez/re_simulations
|
e5c0c286fc574809bda58f1bd082d267f9a74597
|
db25cbea4a512c71e3d90eabece7cd721ef1e424
|
refs/heads/master
| 2021-01-17T17:58:51.040721
| 2020-08-07T21:32:41
| 2020-08-07T21:32:41
| 70,098,697
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,333
|
r
|
ppp.R
|
rm(list=ls())
pacman::p_load(INLA, ggplot2, data.table, lattice, arm, dplyr, TMB, ar.matrix)
set.seed(124)
mesh2DF <- function(x){
M <- length(proj$x)
DT <- data.table(x=rep(proj$x, M), y=rep(proj$y, each=M),
obs=c(inla.mesh.project(proj, field=x)))
DT
}
n <- 1000
loc <- matrix(runif(n*2), n, 2)
mesh <- inla.mesh.create(loc, refine=list(max.edge=0.05))
plot(mesh)
points(loc[,1], loc[,2], col="red", pch=20)
proj <- inla.mesh.projector(mesh, dims=c(600, 600))
sigma0 <- .2 ## Standard deviation
range0 <- .1 ## Spatial range
kappa0 <- sqrt(8) / range0
tau0 <- 1/(sqrt(4*pi)*kappa0*sigma0)
spde <- inla.spde2.matern(mesh)
##############
Q2 <- tau0**2 * (kappa0**4 * spde$param.inla$M0 +
2 * kappa0**2 *spde$param.inla$M1 + spde$param.inla$M2)
x_ <- as.vector(sim.AR(1, Q2))
x <- x_ - mean(x_)
mesh2DF(x) %>%
ggplot(aes(x, y, z=obs)) +
geom_raster(aes(fill = obs)) +
theme_void() +
lims(y=c(0,1), x=c(0,1)) +
scale_fill_distiller(palette = "Spectral")
cov1 <- rnorm(n)
p <- invlogit(-1 + .2 * cov1 + x[mesh$idx$loc])
denom <- rpois(n, 100)
hist(p)
hist(denom)
DT <- data.table(
y=rbinom(n, denom, p),
cov=cov1, id=1:n, denom=denom,
geo=mesh$idx$loc-1,
lon=loc[,1], lat=loc[,2])
summary(DT)
spatform <- y ~ cov1 + f(id, model=spde)
system.time(spatmodel <- inla(
spatform,
data=DT,
control.compute=list(config=TRUE),
family="binomial",
Ntrials = DT$denom))
summary(spatmodel)
spatialhat <- inla.spde2.result(spatmodel, "id", spde)
xhat <- spatialhat$summary.values$`0.5quant`
bind_rows(
mutate(mesh2DF(x), state="Observed"),
mutate(mesh2DF(xhat), state="Estimated INLA")) %>%
ggplot(aes(x, y, z=obs)) +
geom_raster(aes(fill = obs)) +
theme_void() +
lims(y=c(0,1), x=c(0,1)) +
scale_fill_distiller(palette = "Spectral") +
facet_wrap(~state)
mesh2DF(spatialhat$summary.values$sd) %>%
ggplot(aes(x, y, z=obs)) +
geom_raster(aes(fill = obs)) +
theme_void() +
scale_fill_distiller(palette = "Spectral") +
geom_point(aes(x=lon, y=lat, z=NULL, fill=NULL), data=DT)
exp(spatmodel$summary.hyperpar$mean[1:2])
c(tau0, kappa0)
# now run the TMB model using ./st.cpp
setwd("~/Documents/re_simulations/ppp/")
runModel <- function(DT, recompile=FALSE, symbolic=TRUE, draws=1000){
Data <- list(
y=DT$y, x=DT$cov, geo=DT$geo, denom=DT$denom,
M0=spde$param.inla$M0, M1=spde$param.inla$M1, M2=spde$param.inla$M2)
Params <- list(
beta0=0, beta1=0, log_tau=0, log_kappa=0, z=rep(0, nrow(Q2))
)
# compile the code if not there
model <- "ppp"
if(recompile){
if(file.exists(paste0(model, ".so"))) file.remove(paste0(model, ".so"))
if(file.exists(paste0(model, ".o"))) file.remove(paste0(model, ".o"))
if(file.exists(paste0(model, ".dll"))) file.remove(paste0(model, ".dll"))
}
compile(paste0(model, ".cpp"))
dyn.load(dynlib(model))
Obj <- MakeADFun(data=Data, parameters=Params, DLL=model, random="z")
if(symbolic){
runSymbolicAnalysis(Obj)
}
Opt <- nlminb(
start=Obj$par,
objective=Obj$fn,
gradient=Obj$gr,
control=list(eval.max=1e4, iter.max=1e4))
sdrep <- sdreport(Obj, getJointPrecision=TRUE)
zindex <- "z" == row.names(sdrep$jointPrecision)
Qz <- sdrep$jointPrecision[zindex,zindex]
Zdraws <- sim.AR(draws, Qz)
Report <- Obj$report()
zDF <- tibble(
mu=Report$z,
sd=apply(Zdraws, 2, sd),
lwr=apply(Zdraws, 2, quantile, probs=.025),
upr=apply(Zdraws, 2, quantile, probs=.975)
)
list(obj=Obj, opt=Opt, sd=sdrep, z=zDF)
}
system.time(tmbModel <- runModel(DT))
bind_rows(
mutate(mesh2DF(x), state="Observed"),
mutate(mesh2DF(tmbModel$z$mu), state="Estimated TMB"),
mutate(mesh2DF(xhat), state="Estimated INLA")) %>%
ggplot(aes(x, y, z=obs)) +
geom_raster(aes(fill = obs)) +
theme_void() +
lims(y=c(0,1), x=c(0,1)) +
scale_fill_distiller(palette = "Spectral") +
facet_wrap(~state, nrow=2)
mesh2DF(tmbModel$z$sd) %>%
ggplot(aes(x, y, z=obs)) +
geom_raster(aes(fill = obs)) +
theme_void() +
scale_fill_distiller(palette = "Spectral") +
geom_point(aes(x=lon, y=lat, z=NULL, fill=NULL), data=DT)
|
dbe1f99535fa1c5482e8255e0c24d563652cb380
|
b93f14b970fe61ed7ffa4592654a027adc19b3fc
|
/man/make_filename.Rd
|
32e9aa04e5a7a88a8ddb022bec2c75943078b714
|
[] |
no_license
|
yuriygdv/farsfunctions
|
1093001e2349400a18c2c800f158d88b090058fd
|
0560cc087832a01c45e4014d8ecb3e6a837d31f0
|
refs/heads/master
| 2021-04-30T03:58:30.600541
| 2018-02-14T15:30:02
| 2018-02-14T15:30:02
| 121,523,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 693
|
rd
|
make_filename.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{make_filename}
\alias{make_filename}
\title{Make a filename in the format used for FARS data files}
\usage{
make_filename(year)
}
\arguments{
\item{year}{A year in the numeric format}
}
\value{
This function returns a character string that corresponds to the name of
the FARS datafile for the given year
}
\description{
This is a function that takes a year in numeric format as an argument
and returns a filename of the corresponding FARS data file for the given year.
}
\note{
spaces, commas or other characters in the numeric argumet will retur an error
}
\examples{
make_filename(2015)
}
|
1eba737a61e993a5c98f5830bf50346a15b530b4
|
c0594b6c8ad34662469cb3369cda7bbbf959ae69
|
/man/parse_d20200423_SANCHEZ-CANETE.Rd
|
fb53aefd0efa7c0f48c0dd793ed61e684a6863e8
|
[
"CC-BY-4.0"
] |
permissive
|
bgctw/cosore
|
289902beaf105f91faf2428c3869ac6bba64007f
|
444f7c5ae50750ec7f91564d6ab573a8dc2ed692
|
refs/heads/master
| 2022-10-22T13:48:37.449033
| 2020-06-17T05:46:19
| 2020-06-17T05:46:19
| 269,271,512
| 0
| 0
|
CC-BY-4.0
| 2020-06-17T05:46:20
| 2020-06-04T05:47:05
| null |
UTF-8
|
R
| false
| true
| 477
|
rd
|
parse_d20200423_SANCHEZ-CANETE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse-others.R
\name{parse_d20200423_SANCHEZ-CANETE}
\alias{parse_d20200423_SANCHEZ-CANETE}
\title{Parse a custom file from d20200423_SANCHEZ-CANETE}
\usage{
`parse_d20200423_SANCHEZ-CANETE`(path)
}
\arguments{
\item{path}{Data directory path, character}
}
\value{
A \code{data.frame} containing extracted data.
}
\description{
Parse a custom file from d20200423_SANCHEZ-CANETE
}
\keyword{internal}
|
4166325fb2032ca97b1bc8cab11b86483e5e0681
|
e573bc7fd968068a52a5144a3854d184bbe4cda8
|
/Recommended/boot/man/glm.diag.Rd
|
3d5b172bc5b1231e84d62763e68a5ce8029402ec
|
[] |
no_license
|
lukaszdaniel/ivory
|
ef2a0f5fe2bc87952bf4471aa79f1bca193d56f9
|
0a50f94ce645c17cb1caa6aa1ecdd493e9195ca0
|
refs/heads/master
| 2021-11-18T17:15:11.773836
| 2021-10-13T21:07:24
| 2021-10-13T21:07:24
| 32,650,353
| 5
| 1
| null | 2018-03-26T14:59:37
| 2015-03-21T21:18:11
|
R
|
UTF-8
|
R
| false
| false
| 1,482
|
rd
|
glm.diag.Rd
|
\name{glm.diag}
\alias{glm.diag}
\title{
Generalized Linear Model Diagnostics
}
\description{
Calculates jackknife deviance residuals, standardized deviance residuals,
standardized Pearson residuals, approximate Cook statistic, leverage and
estimated dispersion.
}
\usage{
glm.diag(glmfit)
}
\arguments{
\item{glmfit}{
\code{glmfit} is a \code{glm.object} - the result of a call to \code{glm()}
}}
\value{
Returns a list with the following components
\item{res}{
The vector of jackknife deviance residuals.
}
\item{rd}{
The vector of standardized deviance residuals.
}
\item{rp}{
The vector of standardized Pearson residuals.
}
\item{cook}{
The vector of approximate Cook statistics.
}
\item{h}{
The vector of leverages of the observations.
}
\item{sd}{
The value used to standardize the residuals. This is the estimate of
residual standard deviation in the Gaussian family and is the square root of
the estimated shape parameter in the Gamma family. In all other cases it is 1.
}}
\references{
Davison, A.C. and Snell, E.J. (1991) Residuals and diagnostics.
In \emph{Statistical Theory and Modelling: In Honour of Sir David Cox}.
D.V. Hinkley, N. Reid and E.J. Snell (editors), 83--106. Chapman and Hall.
}
\seealso{
\code{\link{glm}}, \code{\link{glm.diag.plots}}, \code{\link{summary.glm}}
}
\note{
See the help for \code{\link{glm.diag.plots}} for an example of the
use of \code{glm.diag}.
}
\keyword{regression}
\keyword{dplot}
% Converted by Sd2Rd version 1.15.
|
f7af66f6a489bc5e9720506bf0b4cbdef5a99abc
|
d07beaab6703de4a9b901138ad3f6609e49eb1b4
|
/glove_app/server.R
|
4e5656945298e0bb698ae8b7d850a822437e4760
|
[] |
no_license
|
lsempe77/NLP-and-digital-humanities
|
1a6eb9965e66cda1a9cd651ac97db24150d48bf8
|
f18456e5b93485200bc9254f381b808c21ec01cd
|
refs/heads/master
| 2023-04-10T04:03:35.546030
| 2021-02-22T14:13:51
| 2021-02-22T14:13:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,482
|
r
|
server.R
|
options(shiny.maxRequestSize=30*1024^2)
# Import libraries that are needed for processing in this module.
library(shiny)
library(dplyr)
library(data.table)
library(R.utils)
set.seed(42)
normalize = function(m, norm = c("l1", "l2", "none")) {
stopifnot(inherits(m, "matrix") || inherits(m, "sparseMatrix"))
norm = match.arg(norm)
if (norm == "none")
return(m)
norm_vec = switch(norm,
l1 = 1 / rowSums(m),
l2 = 1 / sqrt(rowSums(m ^ 2))
)
# case when sum row elements == 0
norm_vec[is.infinite(norm_vec)] = 0
if(inherits(m, "sparseMatrix"))
Diagonal(x = norm_vec) %*% m
else
m * norm_vec
}
sim2 = function(x, y = NULL, method = c("cosine", "jaccard"),
norm = c("l2", "none")) {
norm = match.arg(norm)
method = match.arg(method)
# check first matrix
stopifnot(inherits(x, "matrix") || inherits(x, "Matrix"))
FLAG_TWO_MATRICES_INPUT = FALSE
if (!is.null(y)) {
FLAG_TWO_MATRICES_INPUT = TRUE
}
# check second matrix
if (FLAG_TWO_MATRICES_INPUT) {
stopifnot(inherits(y, "matrix") || inherits(y, "Matrix"))
stopifnot(ncol(x) == ncol(y))
stopifnot(colnames(x) == colnames(y))
}
RESULT = NULL
if (method == "cosine") {
x = normalize(x, norm)
if (FLAG_TWO_MATRICES_INPUT) {
y = normalize(y, norm)
RESULT = tcrossprod(x, y)
}
else
RESULT = tcrossprod(x)
}
if (method == "jaccard") {
if (!inherits(x, "sparseMatrix"))
stop("at the moment jaccard distance defined only for sparse matrices")
if (norm != "none") {
msg = paste(norm, "norm provided. Howewer matrix will be converted to binary (0,1) automatically.")
msg = paste(msg, "'jaccard' can be computed only on sets which should be encoded as sparse matrices of 0, 1.")
logger$warn(msg)
}
x@x = sign(x@x)
if (FLAG_TWO_MATRICES_INPUT) {
y@x = sign(y@x)
}
RESULT = jaccard_sim(x, y)
}
RESULT
}
# After that, we can simply load the glove vectors stored locally
vectors = data.table::fread('glove.6B.50d.txt', data.table = F, encoding = 'UTF-8')
rownames(vectors) <- vectors$V1
# Define server logic required to summarize and view the result of word embeddings
shinyServer(function(input, output, session) {
# display the original text inputted by the user
output$Original <- renderText({
combineFormula <- paste(input$word1, "-", input$word2, "+", input$word3, sep=" ")
return(combineFormula)
})
results <- eventReactive(input$goButton,{
print("Computing!")
withProgress(message = 'Computing...', value = 0, {
target <- as.numeric(vectors[vectors[, "V1"] == tolower(input$word1),])[-1] - as.numeric(vectors[vectors[, "V1"] == tolower(input$word2),])[-1] + as.numeric(vectors[vectors[, "V1"] == tolower(input$word3),])[-1]
cos_sim = sim2(x = as.matrix(subset(vectors, select = -c(V1))), y = t(as.matrix(target)), method = "cosine", norm = "l2")
output_data = as.data.frame(head(sort(cos_sim[,1], decreasing = TRUE), 10))
rm("cos_sim")
gc()
setDT(output_data, keep.rownames = TRUE)[]
colnames(output_data) <- c("words", "cosine similarity")
})
rm("target")
gc()
return (output_data)
})
# Display the top words with highest consine similarity
output$cos_sim_sort <- renderTable({
if (is.null(results()))
return (NULL)
# Displaying the result
return(results())
})
})
|
ea34ad02f6647d180e9c887d629e8641878732ab
|
e5bd337550aa219533eb9039952d35d72bd97497
|
/man/birthrate.Rd
|
5c09ac9654eb5cdf79b1b83910de2385f4843ecb
|
[] |
no_license
|
nxskok/d29data
|
246c31081f30c34c83b9d0a5ea66b1f17d0faf0f
|
3dece19d3d3101b9349c446af0870a4aa8bc4f92
|
refs/heads/master
| 2021-01-10T05:48:31.573769
| 2015-12-27T15:12:06
| 2015-12-27T15:12:06
| 48,649,759
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 720
|
rd
|
birthrate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docco.R
\docType{data}
\name{birthrate}
\alias{birthrate}
\title{Vital statistics by country}
\format{A data frame with 97 rows and 4 variables:
\describe{
\item{birth}{Birth rate (units unspecified)}
\item{death}{Death rate}
\item{infant}{Infant mortality rate}
\item{country}{Name of country, no spaces}
}}
\source{
\url{https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_aceclus_sect003.htm}, from 1990 United Nations Demographic Yearbook.
}
\usage{
data(birthrate)
}
\description{
1990-era birth rates, death rates and infant mortality rates for
97 countries world-wide
}
\keyword{datasets}
|
449e088f555cbe58e9e4b65691108e7c36566f39
|
603ef4d458ae15590178a3bb83e41597bcbc0447
|
/man/format_date.Rd
|
d98559702dbd6822ec9a1629a013e6698368159b
|
[] |
no_license
|
ntncmch/myRtoolbox
|
8dace3f0d29e19670624e6e3c948ba6d0fa38cec
|
8ec2a6bc2e7dd33fb23d7f4b2c6cf2d95ca5ef8d
|
refs/heads/master
| 2020-05-14T14:14:34.465272
| 2014-09-22T13:17:47
| 2014-09-22T13:17:47
| 21,052,420
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,531
|
rd
|
format_date.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{format_date}
\alias{format_date}
\title{Format date variables in a data frame}
\usage{
format_date(df_2format, pattern = "date", orders = "dmy", year_max = NULL,
as_date = FALSE, excel = FALSE)
}
\arguments{
\item{df_2format}{A data frame}
\item{pattern}{Vector of strings to match the name of date variables}
\item{orders}{a character vector of date-time formats.
Each order string is series of formatting characters as
listed \code{\link[base]{strptime}} but might not include
the "%" prefix, for example "ymd" will match all the
possible dates in year, month, day order. Formatting
orders might include arbitrary separators. These are
discarded. See details for implemented formats.}
\item{year_max}{Numeric. If a year exceeds this value, one century is subtracted to it.}
\item{as_date}{Logical. If \code{TRUE} dates are converted to class \code{"Date"}.}
\item{excel}{Logical. If the orifginal file is an excel file put to \code{TRUE} and dates will be converted from Excel numbering to class \code{"Date"}.}
}
\value{
A data frame with dates formatted.
}
\description{
This functions detects all date variables matching \code{pattern} and then use \code{\link[lubridate]{parse_date_time}} with specified \code{orders} to parse them as POSIXct date-time objects.
}
\note{
The \code{year_max} arguments is to handle the fact that date origin in \R is "1970-01-01" so dates like "08-05-45" will be parsed as "2045-05-08" instead of "1945-05-08"
}
|
927d244099838510baaeed3827a9da3240a15a04
|
d0653d0ab1e14a079f7e9c33f133d5d43a9d003e
|
/week4/islr_logistic_reg_lab.R
|
d1d747ae25ed47db2888df0b1615bd5e61a39eb5
|
[] |
no_license
|
Brendafried/coursework
|
18e404791d79f97b478071fb589965dd54c72888
|
ce13e9510b5e798e3c7a24a0a69aa230cd97f74a
|
refs/heads/master
| 2020-06-03T03:18:52.333079
| 2019-07-08T03:06:46
| 2019-07-08T03:06:46
| 191,413,241
| 0
| 0
| null | 2019-06-11T16:50:09
| 2019-06-11T16:50:09
| null |
UTF-8
|
R
| false
| false
| 1,356
|
r
|
islr_logistic_reg_lab.R
|
library(ISLR)
names(Smarket)
dim(Smarket)
summary(Smarket)
cor(Smarket)
cor(Smarket[, -9])
attach(Smarket)
plot(Volume)
glm.fits = glm(Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume, family = binomial, data = Smarket)
summary(glm.fits)
coef(glm.fits)
summary(glm.fits)$coef[, -4]
glm.probs = predict(glm.fits, type = "response")
glm.probs[1:10]
contrasts(Direction)
glm.pred = rep("Down", 1250)
glm.pred[glm.probs > .5]="Up"
table(glm.pred, Direction)
(507+145)/ 1250
mean(glm.pred == Direction)
View(Smarket)
train = (Year < 2005)
Smarket.2005 = Smarket[!train,]
dim(Smarket.2005)
Direction.2005 = Direction[!train]
glm.fits = glm(Direction~Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume, data =Smarket, family = binomial, subset = train)
glm.probs = predict(glm.fits, Smarket.2005, type = "response")
glm.pred = rep("Down", 252)
glm.pred[glm.probs > .5] = "Up"
table(glm.pred, Direction.2005)
mean(glm.pred == Direction.2005)
mean(glm.pred != Direction.2005)
glm.fits = glm(Direction ~ Lag1 + Lag2, data = Smarket, family = binomial, subset = train)
glm.probs = predict(glm.fits, Smarket.2005, type= "response")
glm.pred = rep("Down", 252)
glm.pred[glm.probs > 0.5] = "Up"
table(glm.pred, Direction.2005)
mean(glm.pred == Direction.2005)
106/(106+76)
predict(glm.fits, newdata =data.frame(Lag1=c(1.2, 1.5), Lag2=c(1.1, -0.8)), type = "response")
|
14c89ae984640603f3ecea92b85b8b58f7f9ffbf
|
a99956a3b217e9c87daa50b3854e431c88beee0b
|
/plot3.R
|
d9b8c153b4ed67240172fcf5fac4e3e9cb487ece
|
[] |
no_license
|
cktc4b/ExData_Plotting1
|
fbf3b24d0701cff2aab421960261666ac893734e
|
ea5c1690522ee5f1182fdf010258e218e9f91dbf
|
refs/heads/master
| 2021-01-16T18:48:30.743068
| 2014-08-09T16:09:24
| 2014-08-09T16:09:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,368
|
r
|
plot3.R
|
##Identify file location and download
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, "HPC.zip")
##Unzip the file
unzip("HPC.zip")
##Load the data from the unzipped file
hpc<-read.table("household_power_consumption.txt", header=TRUE, sep=";")
##Convert the Date column to the date class, and reformat the Time column to include the complete date and time
hpc$Time<-strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S")
hpc$Date<-as.Date(hpc$Date, "%d/%m/%Y")
##Subset to include only data from 2/1/2007, and 2/2/2007
hpcsub<-subset(hpc, Date=='2007-02-01' |
Date=='2007-02-02')
##Change sub metering columns to numeric
hpcsub$Sub_metering_1<-as.numeric(as.character(hpcsub$Sub_metering_1))
hpcsub$Sub_metering_2<-as.numeric(as.character(hpcsub$Sub_metering_2))
hpcsub$Sub_metering_3<-as.numeric(as.character(hpcsub$Sub_metering_3))
##Create plot 3 and copy to a png file
png('plot3.png', width=480, height=480)
plot(hpcsub$Time, hpcsub$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="", )
lines(hpcsub$Time, hpcsub$Sub_metering_2, type="l", col="red")
lines(hpcsub$Time, hpcsub$Sub_metering_3, type="l", col="blue")
legend("topright", lty=c(1,1), col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
433f846fe082b5247a03becc9f7321de58715f3f
|
384c3dbc571be91c6f743d1427dec00f13e0d8ae
|
/r/kernels/panda1023-svm-example-for-titanic/script/svm-example-for-titanic.R
|
729c948c2ef88c0ac7024745e1170d10163676c3
|
[] |
no_license
|
helenaK/trustworthy-titanic
|
b9acdd8ca94f2fa3f7eb965596eed4a62821b21e
|
ade0e487820cf38974561da2403ebe0da9de8bc6
|
refs/heads/master
| 2022-12-09T20:56:30.700809
| 2020-09-10T14:22:24
| 2020-09-10T14:22:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,457
|
r
|
svm-example-for-titanic.R
|
# This script trains a Random Forest model based on the data,
# saves a sample submission, and plots the relative importance
# of the variables in making predictions
# I made some changes to add more models including
library(ggplot2)
library("e1071")
set.seed(1)
train <- read.csv("../input/train.csv", stringsAsFactors=FALSE)
test <- read.csv("../input/test.csv", stringsAsFactors=FALSE)
extractFeatures <- function(data) {
features <- c("Pclass",
"Age",
"Sex",
"Parch",
"SibSp",
"Fare",
"Embarked")
fea <- data[,features]
fea$Age[is.na(fea$Age)] <- -1
fea$Fare[is.na(fea$Fare)] <- median(fea$Fare, na.rm=TRUE)
fea$Embarked[fea$Embarked==""] = "S"
fea$Sex <- as.factor(fea$Sex)
fea$Embarked <- as.factor(fea$Embarked)
return(fea)
}
#rf <- randomForest(extractFeatures(train), as.factor(train$Survived), ntree=300, importance=TRUE)
svm_model <- svm(as.factor(train$Survived) ~ ., data=extractFeatures(train))
#submission <- data.frame(PassengerId = test$PassengerId)
#submission$Survived <- predict(rf, extractFeatures(test))
#write.csv(submission, file = "1_random_forest_r_submission.csv", row.names=FALSE)
submission_svm <- data.frame(PassengerId = test$PassengerId)
submission_svm$Survived <- predict(svm_model, extractFeatures(test))
write.csv(submission_svm, file = "1_svm_r_submission.csv", row.names=FALSE)
|
7a72cd778a6ecdae8ec973c7020f102337495e69
|
f8eb5031901516071f5ff3738f0b4e198fe3f381
|
/SentimentAnalysis/main.R
|
f1c3558683f93a0101ce3fdfe1d2226911ed7fc2
|
[] |
no_license
|
jordanatlas/HLML
|
0d255ffd17c2888420c27cf35a0ce082ccb615d1
|
723b78ea2f1464176b17511190560a2a95ae00c2
|
refs/heads/master
| 2016-09-06T03:11:24.884141
| 2014-06-20T21:20:01
| 2014-06-20T21:20:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,976
|
r
|
main.R
|
# required packages
require("plyr") # package to compute counts in aggregates
require("nnet") # multinomial models package
# constants
inputTestData <- "test.tsv"
inputTrainingData <- "train.tsv"
outputTestData <- "test_output.csv"
outputTestDataKaggle <- "test_output_kaggle.csv"
outputTestDataWeka <- "test_output_weka.csv"
outputTrainingData <- "train_output.csv"
outputTrainDataWeka <- "train_output_weka.csv"
emptySentiment <- -1 # set in the final output stage for Sentiment = NA
##############################################################################
# Load data - combine test and training data, set Sentiment = NA for test set,
# set PredictedSentiment to NA for test and training set.
##############################################################################
loadData <- function(testDataPath, trainingDataPath)
{
# parse test set
test <- read.table(testDataPath,
header = TRUE,
sep = "\t",
comment.char = "",
quote = "",
colClasses = c("integer","integer","character"))
# parse training set
train <- read.table(trainingDataPath,
header = TRUE,
sep = "\t",
comment.char = "",
quote = "",
colClasses = c("integer","integer","character","integer"))
# combine test and train set into data
Sentiment = rep(as.integer(NA), nrow(test))
test <- cbind(test, Sentiment)
data <- rbind(train, test)
Predicted_Sentiment <- rep(as.integer(NA), nrow(data))
data <- cbind(data, Predicted_Sentiment)
}
dataset = loadData(inputTestData, inputTrainingData)
##############################################################################
# Load features - Load features from external sources for cases where feature
# calculation is expensive. Combine these features with our dataset.
# NOTE: Currently assuming that feature data has all rows from train and test in
# same order as train and test. THis might not be safe for all new features
# going forward
##############################################################################
polarityFeatureData <- "train_test_polarity.csv"
polarity <- read.csv(polarityFeatureData)
dataset <- cbind(dataset, polarity$polarity.avg)
#####################
# Calculate features
#####################
# Determine number of characters in the phrase
NumCharactersInPhrase = sapply(dataset$Phrase, nchar)
dataset = cbind(dataset, NumCharactersInPhrase)
# Determine number of words in the phrase
NumWordsInPhrase = sapply(strsplit(dataset$Phrase, " "), length)
dataset = cbind(dataset, NumWordsInPhrase)
# Determine max word length in the phrase
MaxWordLengthInPhrase = sapply(lapply(strsplit(dataset$Phrase, " "), nchar), max)
dataset = cbind(dataset, MaxWordLengthInPhrase)
# Determine which phrases have a "!"
ContainsExclamation = sapply(dataset$Phrase, grepl, "!")
dataset = cbind(dataset, ContainsExclamation)
# Determine which phrases have a "#"
ContainsPound = sapply(dataset$Phrase, grepl, "#")
dataset = cbind(dataset, ContainsPound)
#####################
# Data summary
#####################
summary(dataset)
#####################
# Train model
#####################
model = multinom(Sentiment ~ NumCharactersInPhrase + NumWordsInPhrase + MaxWordLengthInPhrase + polarity$polarity.avg
+ ContainsExclamation + ContainsPound,
data = dataset,
na.rm = TRUE) # only use training set to train model
#####################
# Predict
#####################
prediction = predict(model, dataset)
dataset$Predicted_Sentiment = as.integer(as.character(prediction))
#####################
# Validate
#####################
accuracy = round(mean(dataset$Sentiment == dataset$Predicted_Sentiment, na.rm = TRUE) * 100,2)
#####################
# Output
#####################
# Reorder dataset so that Sentiment is last
numcol <- ncol(dataset)
dataset <- dataset[,c(1,2,3,6:numcol,5,4)]
test = dataset[is.na(dataset$Sentiment),]
train = dataset[!is.na(dataset$Sentiment),]
# set Sentiment = emptySentiment to numeric value so easier to import to other tools
test$Sentiment = emptySentiment
# write generic csv
write.csv(test, file = outputTestData, row.names = FALSE)
write.csv(train, file = outputTrainingData, row.names = FALSE)
# write kaggle test output csv
test_kaggle <- test[, c('PhraseId', 'Predicted_Sentiment')]
colnames(test_kaggle) <- c("PhraseId", "Sentiment")
write.csv(test_kaggle, file = outputTestDataKaggle, row.names = FALSE)
# write weka friendly output
test_weka = subset(test, select=-c(PhraseId,SentenceId,Phrase,Predicted_Sentiment))
test_weka$Sentiment = '?'
train_weka = subset(train, select=-c(PhraseId,SentenceId,Phrase,Predicted_Sentiment))
write.csv(test_weka, file = outputTestDataWeka, row.names = FALSE)
write.csv(train_weka, file = outputTrainDataWeka, row.names = FALSE)
|
7d1ecb63ad9cc87aff5a8bb26cea96ae4406c4ec
|
f099279224e672b76e7696650b8faa72e112ac88
|
/OM/model_base.R
|
efbeb2e2eb0b9bcafd9c3d5d2e25ee3ba2354627
|
[] |
no_license
|
iotcwpm/SWO
|
4d6559992774a400e600079c1a6116e4e0ff346c
|
a01312eb57231788b3018703cfcba8091ba1f8db
|
refs/heads/main
| 2022-03-06T22:49:57.645612
| 2022-03-03T20:21:14
| 2022-03-03T20:21:14
| 75,840,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,178
|
r
|
model_base.R
|
# model_base.R - Runs and diagnostics for the base case SS3 model
# SWO/OM/model_base.R
# Copyright Iago MOSQUEIRA (WMR), 2020
# Author: Iago MOSQUEIRA (WMR) <iago.mosqueira@wur.nl>
# Modified: Daniela Rosa (IPMA)
# Distributed under the terms of the EUPL-1.2
library(ss3om)
library(ss3diags)
library(icesTAF)
# SET base case = io4_h80_GoMf_r2_CL005
mkdir("om/model/base")
cp("om/data/sa/*", "om/model/base/")
# --- DIAGNOSTICS
# 2. Convergence level
res_base$Convergence_Level > 1e-4
# RE-RUN with starter.ss$$init_values_src = 1
# starter <- r4ss::SS_readstarter('model/base/starter.ss', verbose=FALSE)
# starter$jitter_fraction <- 0.25
#
# SS_writestarter(starter, dir=file.path("model/base"), overwrite=TRUE)
# --- RETRO
# CREATE dir
mkdir("om/model/base/retro")
# CREATE retros
prepareRetro("om/model/base", years=10)
# RUN retro
# system("cd model/base/retro; ls | parallel -j10 --progress '(cd {}; ss3 -nox)'")
# LOAD output
base <- readOMSss3("model/base", range=c(minfbar=2, maxfbar=8))
# LOAD retro
dirs <- setNames(c("model/base", list.dirs("model/base/retro",
recursive=FALSE)), nm=seq(0, 10))
retro <- lapply(dirs, readOutputss3)
retrostk <- loadFLS("model/retro")
# SUMMARIZE 5 peels only
retrosumm <- SSsummarize(retro[1:6])
# FLStocks retro
retrofls <- FLStocks(lapply(retro, buildFLSss330, range=c(minfbar=2, maxfbar=8)))
# SAVE
save(base, retro, retrosumm, file="model/base.Rdata", compress="xz")
# --- DIAGNOSTICS
load("model/base.Rdata")
# - fit & convergence
convergencelevel("model/base")
# - catch likelihood > 1e-5
base$likelihoods_used["Catch", "values"] > 1e-5
# - Mohn's rho
SSmohnsrho(retrosumm)
SSplotRetro(retrosumm, xmin=2005)
# - runs test
# CPUEs
# tiff(file="CPUE_runs.tiff", bg = "white", compression="lzw",width = 32,
# height = 20, units = "cm", res = 300)
sspar(mfrow=c(3, 2), plot.cex = 0.7)
cpue_rtes <- SSplotRunstest(out, add=T, subplots="cpue", indexselect=c(1:4,9))
# dev.off()
# LN
sspar(mfrow=c(2,2), plot.cex = 0.7)
len_rtes <- SSplotRunstest(out, add=T, subplots="len")
# - MASE HCXVAL
SShcbias(retrosumm)
sspar(mfrow=c(2, 2))
SSplotHCxval(retrosumm, indexselect = c(1,2,4,9))
|
4ab3fa3bd1497aa173732267a82b3021f11ea636
|
d14bcd4679f0ffa43df5267a82544f098095f1d1
|
/R/groupm.mleprobplot.R
|
e3862df0e863a22717b7369bd59831de269ff86e
|
[] |
no_license
|
anhnguyendepocen/SMRD
|
9e52aa72a5abe5274f9a8546475639d11f058c0d
|
c54fa017afca7f20255291c6363194673bc2435a
|
refs/heads/master
| 2022-12-15T12:29:11.165234
| 2020-09-10T13:23:59
| 2020-09-10T13:23:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,703
|
r
|
groupm.mleprobplot.R
|
#' Title
#'
#' @param data.ld
#' @param distribution
#' @param formula
#' @param group.var
#' @param xlab
#' @param ylab
#' @param conf.level
#' @param xlim
#' @param ylim
#' @param relationship
#' @param power
#' @param dump
#' @param mle.intervals
#' @param cex
#' @param grids
#' @param slope.axis
#' @param linear.axes
#' @param lty
#' @param plot.censored.ticks
#' @param time.range
#' @param shape
#' @param ci.list
#' @param col.ci
#' @param printem
#' @param trunc.correct
#' @param include.interaction
#' @param new.data
#' @param plotem
#' @param do.legend
#' @param stresses.limit
#' @param number.points
#' @param plotted
#' @param from.six.plot
#' @param debug1
#' @param theta.start
#' @param parameter.fixed
#' @param compute.subsets
#' @param check.level
#' @param title.line.adj
#' @param ...
#'
#' @return NULL
#' @export
#'
#' @examples
#' \dontrun{
#'
#' ICDevice2.ld <- frame.to.ld(icdevice2,
#' response.column = c(1,2),
#' censor.column = 3,
#' case.weight.column = 4,
#' x.column = 5,
#' data.title = "New Technology Device ALT",
#' xlabel = "Degrees C",
#' time.units = "Hours")
#'
#' groupi.mleprobplot(ICDevice2.ld,
#' distribution = "Lognormal")
#'
#' ICDevice02.groupm.lognor <- groupm.mleprobplot(ICDevice2.ld,
#' distribution = "Lognormal",
#' relationship = "Arrhenius",
#' ci.list = 6)
#' }
groupm.mleprobplot <-
function (data.ld,
distribution,
formula = NULL,
group.var = 1:ncol(xmat(data.ld)),
xlab = get.time.units(data.ld),
ylab = GetSMRDDefault("SMRD.LabelOnYaxis"),
conf.level = GetSMRDDefault("SMRD.ConfLevel")/100,
xlim = c(NA,NA),
ylim = c(NA,NA),
relationship = NULL,
power = NULL,
dump = 0,
mle.intervals = F,
cex = 1,
grids = F,
slope.axis = F,
linear.axes = F,
lty = NULL,
plot.censored.ticks = F,
time.range = c(NA,NA),
shape = NULL,
ci.list = NULL,
col.ci = 4,
printem = F,
trunc.correct = T,
include.interaction = F,
new.data = NULL,
plotem = T,
do.legend = "On plot",
stresses.limit = 18,
number.points = 55,
plotted = rep(T, length(stresses.plus)),
from.six.plot = F,
debug1 = F,
theta.start = NULL,
parameter.fixed = NULL,
compute.subsets = T,
check.level = SMRDOptions("SMRD.DataCheck"),
title.line.adj,
lwd = 2,
mar = c(4.5, 5.25, 3.5, 12.1),
bty = `if`(grids, "o","L"),...)
{
not.stripped <- function (data.d)
{
return(attr(data.d, "not.stripped"))
}
if (missing(title.line.adj)) { title.line.adj = -3 }
the.orig.xmat <- xmat(data.ld)
the.orig.data.ld <- data.ld
the.xmat <- as.data.frame(the.orig.xmat[, group.var, drop = F])
reduced.xmat.names <- dimnames(the.xmat)[[2]]
if (is.null(relationship)) {
group.var.numeric <- unlist(lapply(the.xmat, is.numeric))
relationship <- rep("linear", length = length(group.var))
relationship[!group.var.numeric] <- "class"
}
relationship.sanity(the.xmat, relationship)
relationship <- set.relationship.power(relationship, power)
names(relationship) <- reduced.xmat.names
for (i in 1:ncol(the.xmat)) {
if (generic.relationship.name(relationship[i]) == "class") {
the.xmat[, i] <- as.factor(the.xmat[, i])
}
}
dimnames(the.xmat)[[2]] <- reduced.xmat.names
assign(envir = .frame0,
inherits = TRUE,
"relationship.vector",
relationship)
if (is.null(formula)) formula <- get.default.formula(the.xmat,
relationship)
Terms <- terms(formula)
the.data.frame <- data.frame(Response = rep(1, nrow(the.xmat)),
the.xmat)
names(the.data.frame)[1] <- as.character(attr(Terms,
"variables"))[2]
the.model.matrix <- model.matrix(Terms, the.data.frame)
attr(the.model.matrix, "contrast.method") <- as.character(.Options$contrasts)
stresses <- get.x.markers(data.ld,
group.var = group.var,
do.order = F)
ordered.stresses <- get.x.markers(data.ld,
group.var = group.var,
do.order = T)
`if`(length(relationship) == 1 && relationship == "class",
stress.names <- get.x.markers(data.ld,
group.var = group.var,
do.order = F),
stress.names <- get.x.markers(data.ld,
group.var = group.var,
long = T, do.order = F))
`if`(!is.null(not.stripped(data.ld)),
{ the.not.stripped <- not.stripped(data.ld)
stress.order <- stress.order(data.ld) },
{ the.not.stripped <- rep(T, length(stresses))
stress.order <- match(ordered.stresses, stresses) })
if (map.SMRDDebugLevel() >= 6) {
cat("\n unordered stresses \n")
print(stresses)
cat("\n unordered stress names \n")
print(stress.names)
}
stress.names <- stress.names[stress.order]
stresses <- stresses[stress.order]
if (map.SMRDDebugLevel() >= 6) {
cat("\n ordered stresses \n")
print(stresses)
cat("\n ordered stress names \n")
print(stress.names)
}
if (length(relationship) != length(group.var)) {
stop(paste("\nLength of relationship =",
length(relationship),
" must equal length of group.var =",
length(group.var)))
}
if (plotem && length(stresses) > stresses.limit) {
warning(paste("\nThere are",
length(stresses),
"different explanatory variable combinations.\n",
"A probability plot will not be made.\n"))
plotem <- F
compute.subsets <- F
if (from.six.plot) {
return.list <- "dummy"
attr(return.list, "plotem") <- plotem
return(return.list)
}
}
xmat(data.ld) <- model.matrix.to.matrix(the.model.matrix)
attr(data.ld, "the.relationships") <- relationship
mlest.out <- mlest(data.ld,
distribution,
explan.vars = seq(1:ncol(xmat(data.ld))),
theta.start = theta.start,
parameter.fixed = parameter.fixed,
kprint = dump,
send.col.ones = T,
intercept = F,
embedded = T,...)
xmat(data.ld) <- the.xmat
mlest.out$relationship <- relationship
mlest.out$power <- power
xnames.matrix1 <- as.matrix(reduced.xmat.names)
xnames.matrix2 <- apply(xnames.matrix1, 1, regexpr, attr(Terms, "term.labels"))
xnames.matrix3 <- apply(as.matrix(xnames.matrix2), 2, max) > 0
attr(Terms, "xnames") <- reduced.xmat.names[xnames.matrix3]
mlest.out$terms <- Terms
mlest.out$the.model.matrix <- the.model.matrix
mlest.out$group.var <- 1:length(group.var)
mlest.out$stress.names <- stress.names
mlest.out$stresses <- stresses
attr(data.ld, "xlabel") <- reduced.xmat.names
mlest.out$data.ld <- data.ld
mlest.out$title <- paste(get.data.title(data.ld),
"\n",
paste(reduced.xmat.names,
name.relationship(relationship,
allow = T),
sep = "", collapse = ", "),
paste(", Dist:", distribution, sep = ""))
mlest.out$the.orig.data.ld <- the.orig.data.ld
return.list <- list(groupm.out = mlest.out)
assign(envir = .frame0, inherits = !TRUE,"test.groupm.out", return.list)
bands.list <- list()
nonparametric.list <- list()
ylim.data <- NULL
xlim.quant <- NULL
stresses.plus <- c(stress.names)
if (!is.null(new.data)) {
new.data <- as.data.frame(new.data)
x.names <- colnames(the.orig.xmat)
names(new.data) <- x.names[group.var]
stresses.plus <- unique(c(stress.names,
apply(new.data,
1,
paste,
reduced.xmat.names,
collapse = ";")))
}
plotted <- c(the.not.stripped,
rep(T, length(stresses.plus) - length(stress.names)))
if (is.null(lty)) {
`if`(GetSMRDDefault("SMRD.solid.lines"),
lty <- rep(1, length(plotted)),
lty <- (1:(length(plotted) + 1))[-2])
}
pch <- (1:(length(plotted) + 1))[-2]
col.fhat.vec <- (1:(length(plotted) + length(col.ci) + 1))[-col.ci]
pch <- pch[plotted]
col.fhat.vec <- col.fhat.vec[plotted]
lty <- lty[plotted]
cdfest.out <- cdfest(data.ld)
cdpoints.out <- cdpoints(cdfest.out)
upper.quantile.max <- NULL
if (compute.subsets) {
if (plotem) {
on.exit(par(xpd = F, bty = "o", mar = c(5, 4, 4, 2) + 0.1,err = -1))
}
for (i in 1:length(stresses.plus)) {
data.name <- stresses.plus[i]
`if`(is.onlist(i, ci.list),
conf.level.send <- conf.level,
conf.level.send <- 0 )
if (map.SMRDDebugLevel() >= 6) cat("stress = ",
stresses.plus[i],
"ci=",
conf.level.send,
"\n")
if (i <= length(stresses)) {
if (map.SMRDDebugLevel() >= 6) cat("stress = ",
stresses[i],
"stress name = ",
stress.names[i],
"\n")
data.subset.ld <- multiple.get.data.subset(data.ld,
stresses[i],
columns = 1:ncol(the.xmat))
if (map.SMRDDebugLevel() >= 4) {
cat("*******Loop index:", stresses.plus[i], "\n")
print(data.subset.ld)
}
if (is.null(data.subset.ld)) break
single.xmat <- as.data.frame(xmat(data.subset.ld))
dimnames(single.xmat)[[2]] <- reduced.xmat.names
get.location.out <- get.single.dist(mlest.out,
single.xmat[1, , drop = F])
mlest.dummy <- list(distribution = distribution,
theta.hat = get.location.out$thetavec,
vcv.matrix = get.location.out$vcv,
y = Response(mlest.out$data.ld),
ierfit = 0,
iervcv = 0)
if (!good.data(data.subset.ld,
check.level = check.level,
number.needed = 1)) {
if (plotem) message(paste("Skipping",
paste(stress.names[i],collapse = " "),
"in probability plot because too few failures\n"))
} else {
cdfest.out <- cdfest(data.subset.ld)
if (length(cdfest.out$q) > 0) {
cdpoints.out <- cdpoints(cdfest.out)
trunc.correct <- (!is.null(cdfest.out$left.trun.cond) ||
!is.null(cdfest.out$right.trun.cond)) &&
trunc.correct
if (trunc.correct) {
mlest.subset.out <- mlest(data.subset.ld, distribution)
cdpoints.out <- truncadj(cdpoints.out,
mlest.dummy,
debug1 = debug1)
}
nonparametric.list[[data.name]] <- cdpoints.out
}
}
if (printem) {
cat("\n\nAt conditions ", stress.names[i],":\n \n", sep = "")
print(get.location.out$thetavec)
cat("\n")
print(get.location.out$vcv)
cat("\n\n")
}
sublist <- list(stresses = stresses[i],
theta.hat = get.location.out$thetavec,
vcv.matrix = get.location.out$vcv,
distribution = distribution,
data.ld = data.subset.ld,
kodet = c(1, 2))
oldClass(sublist) <- "mlest"
return.list[[stress.names[i]]] <- sublist
upper.quantile <- 0.99 * max(cdpoints.out$pplot + 0.01)
upper.quantile.max <- max(upper.quantile, upper.quantile.max)
the.quantiles <- quantiles.mlest(mlest.dummy,
printem = F,
to = upper.quantile)[, "Quanhat"]
xlim.quant.now <- range(the.quantiles)
xlim.quant <- range(xlim.quant, xlim.quant.now)
xtvna <- is.na(time.range)
`if`(any(!xtvna),
{ xlim.quant <- range(time.range[!xtvna], xlim.quant)
xlim.quant.use <- xlim.quant },
{ xlim.quant.use <- xlim.quant.now })
bands <- get.parametric.bands.zhat(mlest.dummy,
conf.level = conf.level.send,
xlim = xlim.quant.use)
} else {
inow <- i - length(stresses)
the.quantiles <- quantiles.groupm.out(mlest.out,
new.data = new.data[inow, , drop = F],
printem = F,
to = upper.quantile.max)
the.quantiles <- the.quantiles[, "Quanhat"]
the.quantiles <- the.quantiles[the.quantiles != Inf]
tv.extend <- NULL
xtvna <- is.na(time.range)
if (any(!xtvna)) {
xlim.quant <- range(time.range[!xtvna], xlim.quant)
tv.extend <- range(time.range[!xtvna])
}
`if`(length(the.quantiles) > 0,
{ xlim.quant <- range(xlim.quant, the.quantiles)
tv.range <- c(the.quantiles, tv.extend) },
{ tv.range <- xlim.quant })
fail.prob.out <-
failure.probabilities.groupm.out(mlest.out,
new.data = new.data[inow, , drop = F],
time.vec = vec.from.range(range(tv.range),distribution, number.points = number.points),
printem = F,
conf.level = conf.level.send)
if (is.null(fail.prob.out)) {
bands.list[[data.name]] <- "dummy"
next
}
`if`(ncol(fail.prob.out) > 3,
bands <- list(times = fail.prob.out[, 1],
fhat = fail.prob.out[, 2],
lower = fail.prob.out[, 4],
upper = fail.prob.out[, 5]),
bands <- list(times = fail.prob.out[, 1],
fhat = fail.prob.out[, 2]))
}
bands.list[[data.name]] <- bands
ylim.data <- range(ylim.data,
cdpoints.out$pplot,
bands$fhat,
bands$lower,
bands$upper)
if (dump) browser()
}
if (plotem && length(nonparametric.list) == 0) {
warning(paste("No estimable data sets in", get.data.title(data.ld)))
plotem <- F
} else {
yrna <- is.na(ylim)
if (any(yrna)) ylim[yrna] <- ylim.data[yrna]
xrna <- is.na(xlim)
if (any(xrna)) xlim[xrna] <- xlim.quant[xrna]
}
if (plotem) {
log.of.data <- probplot.setup(distribution,
xlim,
ylim,
xlab = xlab,
ylab = ylab,
grids = grids,
linear.axes = linear.axes,
slope.axis = slope.axis,
cex = cex,
title.line.adj = title.line.adj,
mar = mar,
bty = bty,...)
}
bands.list.data.names <- names(bands.list)
if (map.SMRDDebugLevel() >= 6) {
cat("\nbands.list\n")
print(bands.list.data.names)
cat("\nstresses.plus\n")
print(stresses.plus)
}
for (i in 1:length(bands.list)) {
data.name <- bands.list.data.names[i]
if (plotem) {
if (is.onlist(data.name, names(nonparametric.list))) {
cdpoints.out <- nonparametric.list[[data.name]]
points.default(pp.data(cdpoints.out$yplot, log.of.data),
quant(cdpoints.out$pplot, distribution),
col = col.fhat.vec[i],
pch = pch[i]%%19,
cex = (1.2 * GetSMRDDefault("SMRD.point.size"))/100)
}
bands <- bands.list[[data.name]]
if (is.null(bands$times)) next
times <- bands$times
lines(pp.data(times, log.of.data),
pp.quant(bands$fhat, distribution, shape),
col = col.fhat.vec[i],
lty = lty[i],
lwd = lwd)
if (!is.null(bands$lower)) {
lines(pp.data(times, log.of.data),
pp.quant(bands$lower, distribution, shape),
col = col.ci,
lty = 3,
lwd = lwd)
lines(pp.data(times, log.of.data),
pp.quant(bands$upper, distribution, shape),
col = col.ci,
lty = 3,
lwd = lwd)
}
}
}
f.plot.censored.ticks(data.ld,
log.of.data,
plot.censored.ticks)
pch.done <- -pch
pch.done[1:length(stresses)] <- -pch.done[1:length(stresses)]
if (do.legend == "On plot" && plotem) {
par(xpd = T)
legend(x.loc(1.05),
y.loc(0.99),
legend = parse(text = switch.units(stresses.plus, data.ld)),
cex = 1,
bty = "n",
col = col.fhat.vec,
lty = lty,
lwd = lwd,
seg.len = 1.5,
pch = pch.done%%19,
y.intersp = 1,
adj = c(-0.1))
}
if (do.legend == "New page" && plotem) {
plot(c(0, 0),
c(1, 1),
xlab = "",
ylab = "",
type = "n",
xaxt = "n",
yaxt = "n")
legend(x.loc(0.003),
y.loc(0.994),
legend = parse(text = switch.units(stresses.plus,data,ld)),
cex = 1,
bty = "n",
col = col.fhat.vec,
lty = lty,
pch = pch.done%%19,
y.intersp = 0.675)
}
}
`if`(length(group.var) == 1,
oldClass(return.list) <- c("alt.fit", "groupm.out"),
oldClass(return.list) <- c("groupm.out"))
attr(return.list, "plotem") <- plotem
invisible(return.list)
}
|
3bd986ce8be11796a8222abe59b4e7df1615e04b
|
57f883e1a1b8031b09f884f6e1b1d57f0a24681a
|
/data-raw/tx-rates.R
|
928e10b3e8ae817b8afcf20933855ebc10912f12
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rnabioco/practical-data-analysis
|
704edbf97df3b6834263d98287db54f1e663135c
|
676e05830a1a65bd5d978f124cc120b2954c527f
|
refs/heads/master
| 2022-07-20T19:23:08.576961
| 2022-07-07T00:17:50
| 2022-07-07T00:17:50
| 105,456,301
| 7
| 2
|
NOASSERTION
| 2019-12-10T22:37:30
| 2017-10-01T16:32:15
|
R
|
UTF-8
|
R
| false
| false
| 644
|
r
|
tx-rates.R
|
download.file("ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE56nnn/GSE56977/suppl/GSE56977_rates_gene.tx.txt.gz",
"tx_rates.txt.gz")
dat <- read_tsv("tx_rates.txt.gz", skip = 1, col_names = F)
col_ids <- c(
"gene",
"quantile"
)
other_col_ids <- paste("rate at", seq(0, 180, by = 15))
new_col_ids <- c(col_ids, other_col_ids)
colnames(dat) <- new_col_ids
## tidy data into matrix
## drop quantile
dat %>% select(-quantile)
dat[, -2]
dat <- dat %>% select(-quantile)
tx_rates <- dat %>% as.data.frame() %>%
column_to_rownames("gene") %>%
as.matrix()
usethis::use_data(tx_rates, overwrite = TRUE, compress = "xz")
|
dd4d4bec44f89b7b35b4bbdabc2dcf58d4a40670
|
5b520b6461fc479ab03932eda8443804355a574f
|
/dummy_data_maker.R
|
5a673e5f932ca7bf9b729a6d97266022ed4afc23
|
[] |
no_license
|
qgeissmann/r_workshops
|
12ecf4f601eb07a565165b1fa73b61c598918310
|
5a43d08fbcfc328e3f382fa06ea59489061a1d5e
|
refs/heads/master
| 2020-09-03T03:25:53.505275
| 2019-11-03T23:13:04
| 2019-11-03T23:13:04
| 219,373,966
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,386
|
r
|
dummy_data_maker.R
|
rm(list=ls())
set.seed(1)
library( data.table)
N_PROTO <- 30
date <- as.Date("2019-03-01") + 1:N_PROTO * 7
site <- LETTERS[1:6]
trap_id <- 1:4
dt <- data.table(expand.grid(date=date,site=site,trap_id=trap_id),key=c("date","site"))
trend_pest <- cumsum(rnorm(N_PROTO))
trend_pred <- cumsum(rnorm(N_PROTO))
trend_para <- trend_pred + trend_pred
range01 <- function(x){(x-min(x))/(max(x)-min(x))}
trends <- data.table(date=date,
trend_pest=range01(trend_pest),
trend_pred=range01(trend_pred),
trend_para=range01(trend_para),
t=1:N_PROTO)
effects <- data.table(a1 = c(10.2, 7.4, 9.2, 15.3, 19.2, 21.9),
b1 = c(0.5, 2.1, 3.9, 6.1,3.1,9.0),
site=LETTERS[1:6])
dt <- trends[effects[dt, on='site'], on='date']
dt[, `:=`(
N_pest = rpois(.N, a1*trend_pest + b1),
N_pred = rpois(.N, a1*trend_pred + b1),
N_para =rpois(.N, a1*trend_para + b1)
)]
dt[date < '2019-04-01', `:=`(
N_pest = 0,
N_pred = 0,
N_para = 0
)]
dt <- dt[sample(1:nrow(dt),size = round(nrow(dt) * .8), replace=F)]
m <- rep(c('conv', 'orga'), each=3)
names(m) <- site
dt[, practice:=m[site]]
dt
dt_final <- dt[,.(date,site,practice, N_pest, N_para, N_pred)]
setkey(dt_final, date,site)
fwrite(dt_final, "2019-ag_practices_effect_on_pest.csv")
|
9c478ef9441007df9a797415eb727a3e4a4d5490
|
882445fe44bbd012c82d7c72633d67f2b7c0306c
|
/tests/testthat/helper.R
|
184ebbc523b8416057328749554ede25947cce48
|
[] |
no_license
|
Crunch-io/crunchgeo
|
941b9453b7ef471198a70e231d10b7ded173d15b
|
671afa96f7f2e6959b6916d2c308a8ffc5c80e81
|
refs/heads/master
| 2021-01-01T16:59:32.791673
| 2017-08-07T20:12:05
| 2017-08-07T20:12:05
| 97,972,639
| 0
| 0
| null | 2020-01-10T18:03:35
| 2017-07-21T17:39:15
|
R
|
UTF-8
|
R
| false
| false
| 265
|
r
|
helper.R
|
Sys.setlocale("LC_COLLATE", "C") ## What CRAN does; affects sort order
set.seed(999) ## To ensure that tests that involve randomness are reproducible
options(warn=1)
# grab the crunch package's test framework
source(system.file("crunch-test.R", package="crunch"))
|
ff1f6e5e896596e3b7e609c1686b96450dd9d62d
|
2327d0bc2cc45a5504c39109846e0f4cba266606
|
/QID-3203-SFEcomplogreturns/SFEcomplogreturns.R
|
a11524ba62c1b9cc1b4bdbfca00d6eb90aaf3b82
|
[] |
no_license
|
QuantLet/SFE
|
3d98a33cfcdc533210856c7618c32a78e111a6ce
|
d25a728a4371538eae982f44ea811b5b93328828
|
refs/heads/master
| 2022-06-15T13:35:17.387252
| 2022-06-08T01:22:00
| 2022-06-08T01:22:00
| 72,103,182
| 12
| 32
| null | 2022-01-30T18:58:21
| 2016-10-27T11:50:43
|
R
|
UTF-8
|
R
| false
| false
| 1,635
|
r
|
SFEcomplogreturns.R
|
# clear variables and close windows
rm(list = ls(all = TRUE))
graphics.off()
# load the data
x1 = read.table("FTSElevel(03.01.00-30.10.06).txt")
x2 = read.table("BAYERlevel(03.01.00-30.10.06).txt")
x3 = read.table("SIEMENSlevel(03.01.00-30.10.06).txt")
x4 = read.table("VWlevel(03.01.00-30.10.06).txt")
# calculating log returns
x1 = diff(log(x1[, 1]))
n1 = length(x1)
x2 = diff(log(x2[, 1]))
n2 = length(x2)
x3 = diff(log(x3[, 1]))
n3 = length(x3)
x4 = diff(log(x4[, 1]))
n4 = length(x4)
# 4 Plots for 4 data sets
par(mfrow = c(2, 2))
plot(x1, ylim = c(-0.1, 0.1), main = "FTSE", type = "l", col = "blue", xlab = "",
ylab = "Log-Returns", axes = FALSE, frame = TRUE)
axis(side = 1, at = seq(0, n1, 500), labels = seq(0, n1, 500))
axis(side = 2, at = seq(-0.1, 0.1, 0.1), labels = seq(-10, 10, 10))
plot(x2, ylim = c(-0.1, 0.1), main = "Bayer", type = "l", col = "blue", xlab = "",
ylab = "Log-Returns", axes = FALSE, frame = TRUE)
axis(side = 1, at = seq(0, n2, 500), labels = seq(0, n2, 500))
axis(side = 2, at = seq(-0.1, 0.1, 0.1), labels = seq(-10, 10, 10))
plot(x3, ylim = c(-0.1, 0.1), main = "Siemens", type = "l", col = "blue", xlab = "Time",
ylab = "Log-Returns", axes = FALSE, frame = TRUE)
axis(side = 1, at = seq(0, n3, 500), labels = seq(0, n3, 500))
axis(side = 2, at = seq(-0.1, 0.1, 0.1), labels = seq(-10, 10, 10))
plot(x4, ylim = c(-0.1, 0.1), main = "Volkswagen", type = "l", col = "blue",
xlab = "Time", ylab = "Log-Returns", axes = FALSE, frame = TRUE)
axis(side = 1, at = seq(0, n4, 500), labels = seq(0, n4, 500))
axis(side = 2, at = seq(-0.1, 0.1, 0.1), labels = seq(-10, 10, 10))
|
812f00b2600db0d802eba13092da4dc8498d66f4
|
9a27ad5e99fe494437b23043e6220c4846325d30
|
/r/man/print.RFCDE.Rd
|
47656c0e93506d18bfdff20f64c735b50818eb70
|
[] |
no_license
|
zhangc927/RFCDE
|
cbea299adf0de1c738e8b6c15fd2a7da2295a150
|
b388c4ff4eb60c59a8e5a4ff7518d4212e15c6f8
|
refs/heads/master
| 2023-05-26T14:30:48.945105
| 2021-06-10T20:28:45
| 2021-06-10T20:28:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 329
|
rd
|
print.RFCDE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RFCDE.R
\name{print.RFCDE}
\alias{print.RFCDE}
\title{Print method for RFCDE objects}
\usage{
\method{print}{RFCDE}(x, ...)
}
\arguments{
\item{x}{A RFCDE object.}
\item{...}{Other arguments to print}
}
\description{
Print method for RFCDE objects
}
|
5eaba32b268eae3f7552d523c40ae0359c966785
|
c29240b00e31dca6300b6c051d69d61b53e05c1a
|
/man/hill_rarefaction.Rd
|
16c0906846749c4ffb46d9f13124b31b1a574ac5
|
[] |
no_license
|
metabaRfactory/metabaR
|
b514c595c7bc8977afaf301312170ccd996c3733
|
23f3c8e3de9a08ca2e679949c29c0e4cd7c52282
|
refs/heads/master
| 2023-04-15T10:38:36.294253
| 2023-01-17T08:28:14
| 2023-01-17T08:28:14
| 165,215,140
| 14
| 2
| null | 2023-01-17T15:33:53
| 2019-01-11T09:18:24
|
HTML
|
UTF-8
|
R
| false
| true
| 4,029
|
rd
|
hill_rarefaction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hill_rarefaction.R
\name{hill_rarefaction}
\alias{hill_rarefaction}
\alias{gghill_rarefaction}
\title{Generating rarefaction curves using Hill numbers}
\usage{
hill_rarefaction(metabarlist, nboot = 10, nsteps = 10)
gghill_rarefaction(hill_rar, group = NULL)
}
\arguments{
\item{metabarlist}{a \code{metabarlist} object}
\item{nboot}{the number of resampling events to estimate \eqn{^{q}D} at a given sequencing depth.}
\item{nsteps}{the number of steps between sample sizes for the rarefaction curves.
Default is 10 steps.}
\item{hill_rar}{an object of class \code{"hill_rarefaction"}.}
\item{group}{a vector or factor giving the grouping of each pcr included in the
\code{"hill_rarefaction"} object. Missing values will be treated as
another group and a warning will be given. The elements should correspond to
the pcrs included in the `hill_rar$samples` object. Default is `NULL` for no grouping.}
}
\value{
The \code{hill_rarefaction} function returns an object of class \code{"hill_rarefaction"},
which corresponds to a table of diversity indices for each pcr rarefied at each `nsteps`
sequencing depth, as well as the arguments `nboot` and `nsteps` to conduct the analysis.
}
\description{
These functions generate and plot rarefaction curves from a \code{metabarlist} object using the hill numbers framework (i.e. \eqn{^{q}D}), as well as Good's coverage index.
}
\details{
\code{\link{hill_rarefaction}} builds a rarefaction analysis for each PCR of a \code{metabarlist} object using Hill numbers for q={0,1,2} (see Chao et al. 2014 for a review). These indices are equivalent to :
\itemize{
\item{Richness, for q=0}
\item{Exponential of the Shannon entropy, for q->1}
\item{Inverse of the Simpson index, for q=2}
}
The function also returns Good's coverage index (1-singletons/#reads). Note however that this index should be interpreted carefully in metabarcoding data:
#' \itemize{
\item{absolute singletons (across the whole metabarcoding dataset) are usually filtered out during bioinformatic process (which is the case for the \code{\link{soil_euk}} data). The Good's coverage estimate returned here is only based on the number of singletons per PCR after this filtering process, so the true number of singletons is underestimated here.}
\item{This coverage index gives an assessment of the coverage of the amplicon diversity within a pcr: it includes remaining errors, etc.. The coverage of the genuine DNA fragment diversity in the biological sample is likely to be misestimated with this index.}
}
}
\section{Functions}{
\itemize{
\item \code{hill_rarefaction}: Compute hill_rarefaction curves on a \code{metabarlist} object.
\item \code{gghill_rarefaction}: Plot a object of class \code{"hill_rarefaction"}
}}
\examples{
data(soil_euk)
library(ggplot2)
# Create a subset of pcrs: only a subset of samples from the H20 plot
soil_euk_h20 <- subset_metabarlist(soil_euk,
table = "pcrs",
indices = grepl("H20-[A-B]", rownames(soil_euk$pcrs)))
\donttest{
# run rarefaction (use boot = 20 to limit computation time)
soil_euk_h20.raref <- hill_rarefaction(soil_euk_h20, nboot = 20, nsteps = 10)
# plot the results
gghill_rarefaction(soil_euk_h20.raref)
# plot the results while differenciating litter vs. soil samples
p <- gghill_rarefaction(
soil_euk_h20.raref,
group = soil_euk_h20$samples$Material[match(soil_euk_h20$pcrs$sample_id,
rownames(soil_euk_h20$samples))])
p
p + scale_fill_manual(values = c("goldenrod4", "brown4", "grey")) +
scale_color_manual(values = c("goldenrod4", "brown4", "grey")) +
labs(color = "Material type")
}
}
\references{
Chao, A., Chiu, C. H., & Jost, L. (2014). Unifying species diversity, phylogenetic diversity, functional diversity, and related similarity and differentiation measures through Hill numbers. Annual review of ecology, evolution, and systematics, 45, 297-324.
}
\author{
Lucie Zinger
}
|
5a3346df9b9a4e34daabc6a58afca7702fa3d8fc
|
2d4523c043b19c3118071d3f9946b5a7a74d62f3
|
/tests/testthat/test-makeStandardTable.R
|
59267b2e027a39888ecc3fc555b0c8721f2179e8
|
[
"MIT",
"GPL-3.0-only"
] |
permissive
|
Rapporteket/NORIC
|
6d87df439a204354b73157684ca1a7abe18cbedb
|
515d8f014d489c9170203dfc40844497f7fb4f63
|
refs/heads/master
| 2023-09-01T02:12:43.940714
| 2023-06-29T13:45:33
| 2023-06-29T13:45:33
| 40,961,904
| 1
| 1
|
MIT
| 2023-09-06T10:20:00
| 2015-08-18T09:05:45
|
R
|
UTF-8
|
R
| false
| false
| 599
|
r
|
test-makeStandardTable.R
|
test_that("function returns kable objects", {
expect_true("kableExtra" %in% class(mst(tab = mtcars[1:10, ],
type = "html")))
expect_true("knitr_kable" %in% class(mst(tab = mtcars[1:10, ], lsd = TRUE)))
expect_true("knitr_kable" %in% class(mst(tab = mtcars[1:10, ],
type = "latex", lsd = FALSE)))
expect_true("matrix" %in% class(prettyTab(as.matrix(mtcars[1:10, ]))))
expect_true("matrix" %in% class(prettyTab(as.matrix(mtcars[1:10, ]),
add_totals = TRUE)))
})
|
c048e89dbf4f47d1181a396855762ebc40e10c12
|
a8e8000b370d54c2f6a097ee59876827f4daafbe
|
/9.4/code.R
|
a2f507dcbafebf7076276257e96e92294db3da07
|
[] |
no_license
|
weidaoming/R
|
142be073ebdf097740ae5a02a7e75308a06e30d1
|
5048ca1d46025ba41d03b00049a17b309e8dfedc
|
refs/heads/master
| 2021-07-12T10:47:27.552074
| 2017-10-18T07:09:09
| 2017-10-18T07:09:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 106
|
r
|
code.R
|
x<-c(1,9,2,8,3,9,4,5,7,6)
#均值
mean(x)
#中位数
median(x)
#方差
var(x)
#标准差
sd(x)
summary(x)
|
1fd693ce9b6fbf2a09ced23d74a237e9ee02f5b2
|
c9506e3bcfa0f3568eaac03772c02c2386840ede
|
/Aaron's General Workspace/Template1clusts.r
|
76ede0e80f5caddf2ac7d5debd8ae1f221a4f372
|
[] |
no_license
|
Adamyazori/EDA-Project
|
d374e0517a116298c8f670f34c003dcd98562bb9
|
088cf2e43c7cf98de12980fec340ecbd5d687944
|
refs/heads/master
| 2023-03-20T10:11:40.589122
| 2013-05-14T19:40:49
| 2013-05-14T19:40:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,495
|
r
|
Template1clusts.r
|
##########################################################
###################### CODE SECTION #######################
##########################################################
template.type <- 1
##########################################################
#AF CODE
roundpval<-function(pv){
rpv<-formatC(pv, digits = 2, format = "f")
if(pv<.1) rpv<-formatC(pv, digits = 3, format = "f")
if(pv<.01) rpv<-formatC(pv, digits = 4, format = "f")
if(pv<.0001) rpv<-formatC(pv, digits = 6, format = "f")
if(pv<.00001) rpv<-"<.00001"
return(rpv)
}
#################################
#Get data
olddir<-getwd()
setwd("C:/Users/Aaron/Documents/JH/EDA project")
load('data_for_clusts.RData')
setwd(olddir)
#beforeRep has nreps*2 unique values, and 6 indexes at the end refering to the original index of the graphs that are replicated
#tick1plots<-0
tick1clusts<-tick1clusts+1
i<-tick1clusts
###################################################
############### ANSWER SECTION ####################
###################################################
###### Assign the true p-value for the data to "answer"
###### (change from the current "NA" value)
answer <- NA
answer <- clusters[i]
########################################################
################ FIGURE SECTION ########################
#######################################################
png("FigureQuestion.png", width = 500, height = 500)
heatmap(samples[[i]])
dev.off()
|
c61f91a68c6fe62d0aa1b987c07007fb3ae9021c
|
941bcfc6469da42eec98fd10ad1f3da4236ec697
|
/R/track_bearing_to.R
|
6fc46f0c4150018871d8313b0b36a5e5a554d16c
|
[] |
no_license
|
cran/traipse
|
29c3fd65e98f65049da98b1d878512bfdd93940f
|
01635fd40512f2144e1ce712e0f5912143214e49
|
refs/heads/master
| 2022-10-22T02:59:19.828085
| 2022-10-10T06:40:02
| 2022-10-10T06:40:02
| 236,953,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,717
|
r
|
track_bearing_to.R
|
#' Track bearing to location/s
#'
#' Calculate geodesic bearing to a location or locations based on longitude,
#' latitude (from) input vectors and longitude, latitude (to) input vectors. The
#' unit of bearing is degrees. The *to* values may be a single value or
#' individual to each *from* location.
#'
#' No missing values are required as padding, but input data with `NA`s will incur an
#' `NA` in the output.
#'
#' To use this on multiple track ids, use a grouped data frame with tidyverse code like
#' `data %>% group_by(id) %>% mutate(bearing_to = track_bearing_to(lon, lat, to_lon, to_lat))`.
#'
#' Absolute bearing is relative to North (0), and proceeds clockwise positive and anti-clockwise
#' negative `N = 0, E = 90, S = +/-180, W = -90`.
#'
#' There is no `NA` padding in the output value (though missing values in the input will be mirrored
#' in the output).
#' @param x longitude
#' @param y latitude
#' @param to_x longitude vector of *to* location/s
#' @param to_y latitude vector of *to* locations/s
#' @export
#' @return a numeric vector of absolute bearing-to in degrees, see Details
#' @examples
#' track_bearing_to(trips0$x, trips0$y, to_x = 147, to_y = -42)[1:10]
#' # N E S W
#' track_bearing_to(0,0, c(0, 10, 0, -10), c(5, 0, -5, 0))
#'
#' # maximum and minimum value are the same direction (due south)
#' track_bearing(c(0, -0.00001), c(0, -1))
#' track_bearing(c(0, 0.00001), c(0, -1))
#'
#' # the absolute minimum is north
#' track_bearing(c(0, 0), c(0, 1))
track_bearing_to <- function(x, y, to_x, to_y){
xy <- cbind(x, y)
to_xy <- cbind(to_x, to_y)
if (nrow(xy) > nrow(to_xy) && nrow(to_xy) == 1L) {
to_xy <- to_xy[rep(1L, nrow(xy)), ]
}
geosphere::bearing(xy, to_xy)
}
|
651c24e0d8d641ea750a76124d4ce84a41c1150a
|
f298a1e000324a52cc70d682e2c5ef7e210b795d
|
/R/method.R
|
5268277e847bc85eb407de90fb81c58c65e0f463
|
[] |
no_license
|
botam2/test_list
|
90d23f7de85fec497903102632cb4c8d41d270c4
|
7660d9fbb6810b71efd88dcbd3e3a11ff091a2b6
|
refs/heads/main
| 2023-07-11T17:13:08.658935
| 2021-08-25T19:16:51
| 2021-08-25T19:16:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,725
|
r
|
method.R
|
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.count()
ee_count <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$count(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$count(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' kurtosis
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.Kurtosis()
ee_kurstosis <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$kurstosis(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$kurtosis(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
# ee.Reducer.max()
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
ee_max <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$max(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$max(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
# ee.Reducer.mean()
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
ee_mean <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$mean(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$mean(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.median
ee_median <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$median(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$median(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.min()
ee_min <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$min(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$min(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.mode()
ee_mode <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$mode(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$mode(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.percentile
ee_percentile <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$percentile(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$percentile(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.stdDev
ee_std <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$stdDev(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$stdDev(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.sum
ee_sum <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$sum(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$sum(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.variance()
ee_variance <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$variance(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$variance(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
#' Set of function to zonal statistic
#' @param x image of type Image o Image Collection
#'
#' @param y region of type Feacture o FeatureCollection
#'
#' @param by a limit of pass
#'
#' median
#' @import rgee
# Functions for extract the mean of pixels of a rasterdata
# ee.Reducer.first()
ee_first <- function(x, y, by = 1000) {
y_len <- y$size()$getInfo()
for (i in seq(1, y_len, by)) {
index <- i - 1
print(sprintf("Extracting information [%s/%s]...", index, y_len))
ee_value_layer <- ee$FeatureCollection(y) %>%
ee$FeatureCollection$toList(by, index) %>%
ee$FeatureCollection()
if (i == 1) {
dataset <- ee_extract(
x = x,
fun = ee$Reducer$first(),
y = ee_value_layer,
sf = T
)
} else {
db_local <- ee_extract(
x = x,
y = ee_value_layer,
fun = ee$Reducer$first(),
sf = T
)
dataset <- rbind(dataset, db_local)
}
}
return(dataset)
}
|
9ff85560b2791b3547a8f8749dec5350568ddd35
|
928683a31caed13e0ffea6eb32180cf29d77a74b
|
/man/read.digraph.Rd
|
715ef13d1968ee0fc7da3bbac6c2caba9e66d0a0
|
[] |
no_license
|
SWotherspoon/QPress
|
30fca8e4bba04bcdcf1559ee68187fbbafc42e15
|
699306e24d588c1b8254ba876b95d5608de87dc2
|
refs/heads/master
| 2022-10-04T00:37:17.928502
| 2022-09-20T05:31:10
| 2022-09-20T05:31:10
| 9,925,502
| 4
| 7
| null | 2019-06-12T21:27:17
| 2013-05-08T01:13:43
|
R
|
UTF-8
|
R
| false
| true
| 2,201
|
rd
|
read.digraph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text.R
\name{read.digraph}
\alias{read.digraph}
\alias{parse.digraph}
\alias{deparse.digraph}
\alias{write.digraph}
\title{Text Representations of Models}
\usage{
read.digraph(file, labels = NULL)
parse.digraph(lines, labels = NULL)
deparse.digraph(edges)
write.digraph(edges, file = "")
}
\arguments{
\item{file}{the name of the file to read or write}
\item{labels}{the sequence of labels to use for the nodes}
\item{lines}{a string representation of the model}
\item{edges}{an edge list.}
}
\value{
The \code{write.digraph} function invisibly returns the
text that was written to the file.
The functions \code{read.digraph} and \code{parse.digraph} return an
edge list - a data frame with columns
\item{\code{From}}{a factor indicating the origin of each edge (the node
that effects)}
\item{\code{To}}{a factor indicating the destination of each edge (the node
that is effected)}
\item{\code{Group}}{an integer vector that indicates the group each edge
belongs to}
\item{\code{Type}}{a factor indicating the edge type -
"N" (negative) ,"P" (positive),"U" (unknown) or "Z" (zero)}
\item{\code{Pair}}{an integer vector that indicates the pairing of
directed edges}
Each edge of the text specification is separated into two directed edges,
and every row of an edge list corresponds to a single directed edge.
}
\description{
Read and write text representations of models
}
\details{
The functions \code{read.digraph} and \code{parse.digraph} read a model
description from a text file and a string respectively, while
\code{write.digraph} writes a text representation of the model to and file.
These functions recognize the following text format. Each line corresponds
to an edge, and must consist of two node labels separated by an arrow. An
arrow consists of one of the character sequences "<","*","<>" or "" on the
left and ">","*","<>" or "" on the right, separated by a sequence of dashes
"-". The number of dashes used in the arrow defines the group number of the
edge.
}
\examples{
edges <- parse.digraph(c("A <-* B","C *-> A","C <- D",
"D -> B","B *--* C","A <--- D"))
edges
deparse.digraph(edges)
}
|
9c678293502c724851242827a4064070d2981bb4
|
36e4ecc719de97e498af4a1f7d2b3faeb220884a
|
/man/ques_invalidOptions.Rd
|
253fb2ccfb31c4a132617851d73cc2d383362332
|
[] |
no_license
|
takewiki/nscspkg
|
388fd23a3cfb0353e5669d535bd161aecade1640
|
259614ab9474c1f4a366db00eda80309bf7f82da
|
refs/heads/master
| 2021-07-17T20:10:37.632378
| 2020-10-02T07:32:44
| 2020-10-02T07:32:44
| 217,025,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 350
|
rd
|
ques_invalidOptions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/07_ques_multiA.R
\name{ques_invalidOptions}
\alias{ques_invalidOptions}
\title{增加辅助无效功能}
\usage{
ques_invalidOptions(data)
}
\arguments{
\item{data}{数据}
}
\value{
返回值
}
\description{
增加辅助无效功能
}
\examples{
ques_invalidOptions();
}
|
75a183cd539f7287e009328aa99bdb1bb73007bf
|
caf49f80f93709b63c5dd4f39b89dc65c5658639
|
/demo_14_creating_documents/LaTeX_from_R/Code/House_Price_Reg.R
|
d9b9fc84409ee5f3ff13d745c98398299fc8c0f2
|
[] |
no_license
|
LeeMorinUCF/QMB6358F20
|
20cbdf9bd5a263b1863391bb4feb41584a9f18be
|
7970330c26d25810eae277935a47eaddbebd8e73
|
refs/heads/master
| 2023-03-30T14:58:14.808107
| 2021-04-04T20:32:51
| 2021-04-04T20:32:51
| 288,279,006
| 7
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,888
|
r
|
House_Price_Reg.R
|
##################################################
#
# QMB 6358: Software Tools for Business Analytics
#
# OLS Regression Demo
# Regression with Data from Spreadsheet
#
# Lealand Morin, Ph.D.
# Assistant Professor
# Department of Economics
# College of Business Administration
# University of Central Florida
#
# October 17, 2020
#
##################################################
#
# House_Price_Reg gives an example of OLS regression
# using data imported from a spreadsheet.
# It automatically generates figures and tables for a
# pdf document built with LaTeX.
#
# Dependencies:
# The xtable library to create tex scripts for tables.
# The texreg library to create tex scripts for tables from
# regression models.
#
##################################################
##################################################
# Preparing the Workspace
##################################################
# Clear workspace.
rm(list=ls(all=TRUE))
# Set working directory.
# wd_path <- '/path/to/your/folder'
# wd_path <- 'C:/Users/le279259/Teaching/QMB6358_Fall_2020/GitRepos/QMB6358F20/demo_14_R_file_IO/LaTeX_from_R'
# setwd(wd_path)
# It's not necessary to set the directory, since we are working in the
# main directory already: that's where the shell script is running.
# Set data directory.
data_dir <- 'Data'
# Set directory for storing figures.
fig_dir <- 'Figures'
# Set directory for storing tables.
tab_dir <- 'Tables'
# Set directory for storing text.
text_dir <- 'Text'
# Load libraries.
# The xtable library creates tex scripts for tables.
# install.packages("xtable")
library(xtable)
# The texreg library creates tex scripts for tables from
# regression models.
# install.packages("texreg")
library(texreg)
##################################################
# Load Data
##################################################
# Read the newly saved dataset.
data_file_path <- sprintf('%s/housing_data.csv', data_dir)
housing_data <- read.csv(file = data_file_path)
# Inspect the data.
summary(housing_data)
##################################################
# Create Tables
##################################################
#--------------------------------------------------
# Summarize numeric variables.
#--------------------------------------------------
# Summarize numeric variables over the entire sample.
num_var_list <- colnames(housing_data)[lapply(housing_data, class) == 'numeric']
summary(housing_data[, num_var_list])
out_tab <- data.frame(matrix(nrow = 4, ncol = length(num_var_list) + 1))
colnames(out_tab) <- c('Statistic', num_var_list)
out_tab[, 'Statistic'] <- c('Min.', 'Mean', 'S.D.', 'Max.')
for (col_name in num_var_list) {
out_tab[1, col_name] <- min(housing_data[, col_name])
out_tab[2, col_name] <- mean(housing_data[, col_name])
out_tab[3, col_name] <- sd(housing_data[, col_name])
out_tab[4, col_name] <- max(housing_data[, col_name])
}
# Convert the table to a LaTex table.
out_xtable <- xtable(out_tab[, ],
digits = 2, label = 'tab:summary',
caption = 'Summary of Numeric Variables')
# Output to TeX file.
tab_file_name <- sprintf('%s/summary.tex', tab_dir)
cat(print(out_xtable), file = tab_file_name, append = FALSE)
#--------------------------------------------------
# Summarize categorical variables
#--------------------------------------------------
# Check that earthquakes occurred only in California:
table(housing_data[, 'in_cali'])
table(housing_data[, 'in_cali'], housing_data[, 'earthquake'])
# Create a table of counts of variables by state and earthquake incidence.
out_tab <- table(housing_data[, 'in_cali'], housing_data[, 'earthquake'])
# Add some column names.
rownames(out_tab) <- c('Other', 'California')
colnames(out_tab) <- c('None', 'Earthquake')
# Convert the table to a LaTex table.
out_xtable <- xtable(out_tab[, ],
digits = 2, label = 'tab:earthquakes',
caption = 'Earthquake Incidence by State')
# Output to TeX file.
tab_file_name <- sprintf('%s/earthquakes.tex', tab_dir)
cat(print(out_xtable), file = tab_file_name, append = FALSE)
#--------------------------------------------------
# Create table with correlation matrix
#--------------------------------------------------
# Calculate a correlation matrix for selected variables.
corr_var_names <- c('house_price', 'income', 'in_cali', 'earthquake')
corr_matrix <- cor(housing_data[, corr_var_names])
print(round(corr_matrix, 3))
# Convert the table to a LaTex table.
out_xtable <- xtable(corr_matrix[, ],
digits = 3, label = 'tab:corr',
caption = 'Correlation Matrix')
# Output to TeX file.
tab_file_name <- sprintf('%s/correlation.tex', tab_dir)
cat(print(out_xtable), file = tab_file_name, append = FALSE)
##################################################
# Plot Figures
##################################################
# Plot a scattergraph of income and housing prices.
plot(housing_data[, 'income'],
housing_data[, 'house_price'],
main = c('House Prices vs. Income', '(all figures in millions)'),
xlab = 'Income',
ylab = 'House Prices',
col = 'blue')
# Not very exciting.
# Maybe another plot will look better.
##################################################
# Estimating the Regression Model
# Model 1: All Variables Included
##################################################
# Note the formula object:
# Y ~ X_1 + X_2 + X_3
# Estimate a regression model.
lm_full_model <- lm(data = housing_data,
formula = house_price ~ income + in_cali + earthquake)
# Output the results to screen.
summary(lm_full_model)
##################################################
# Output table with regression estimates.
##################################################
# The texreg package makes a LaTeX table from the regression results.
# Print the output to a LaTeX file.
tab_file_name <- 'lm_model_1.tex'
out_file_name <- sprintf('%s/%s', tab_dir, tab_file_name)
texreg(lm_full_model,
digits = 3,
file = out_file_name,
label = 'tab:lm_model_1',
caption = "Regression Model 1")
##################################################
# Output text describing regression model.
##################################################
# See what's inside the lm_full_model object:
class(lm_full_model)
attributes(lm_full_model)
lm_full_model$coefficients
lm_full_model$coefficients['income']
lm_full_model$coefficients[2]
coef(lm_full_model)
# Model predictions:
summary(predict(lm_full_model))
housing_data[, 'predictions'] <- predict(lm_full_model)
# Other statistics are stored in the model.
attributes(summary(lm_full_model))
# The summary also returns statistics, such as R-squared.
lm_full_model_summ <- summary(lm_full_model)
lm_full_model_summ$adj.r.squared
# Create a script for a write-up about the parameters and statistics
# in the model.
# (I admit that this level of automation is a bit much
# but it highlights the possibilities.)
text_file_name <- 'regression.tex'
out_file_name <- sprintf('%s/%s', text_dir, text_file_name)
# Start a new file with append = FALSE.
cat('\n%% Regression model description:\n\n',
file = out_file_name, append = FALSE)
# Append new lines of text with append = TRUE.
cat('\n\nThe regression model predicts housing prices as follows \n',
file = out_file_name, append = TRUE)
cat('(all figures in millions).\n',
file = out_file_name, append = TRUE)
cat('For every one dollar increase in average income, housing prices are expected \n',
file = out_file_name, append = TRUE)
cat(sprintf('to rise by %1.3f. \n', lm_full_model$coefficients['income']),
file = out_file_name, append = TRUE)
cat('If the home is located in California, housing prices are expected \n',
file = out_file_name, append = TRUE)
cat(sprintf('to be %1.3f higher. \n', lm_full_model$coefficients['in_cali']),
file = out_file_name, append = TRUE)
cat('If there was an earthquake in the zip code, housing prices are expected \n',
file = out_file_name, append = TRUE)
cat(sprintf('to be %1.3f lower. \n', lm_full_model$coefficients['earthquake']),
file = out_file_name, append = TRUE)
# Include a summary of the quality of fit of the model.
cat('Overall, this model provides a fairly good description ',
file = out_file_name, append = TRUE)
cat(sprintf('with an $R^2$ of %1.3f.\n\n', lm_full_model_summ$adj.r.squared),
file = out_file_name, append = TRUE)
##################################################
# Plot regression results for selected model.
##################################################
# Calculate the predictions from the fitted model.
housing_data[, 'predictions'] <- predict(lm_full_model,
newdata = housing_data)
summary(housing_data[, c('house_price', 'predictions')])
plot(housing_data[, c('house_price', 'predictions')],
main = 'Regression Model Predictions',
xlab = 'House Price',
ylab = 'Prediction')
# So far the plot has printed to screen.
# Now use the setEPS and postscript functions to save the figure to a file.
fig_file_name <- 'predictions.eps'
out_file_name <- sprintf('%s/%s', fig_dir, fig_file_name)
setEPS()
postscript(out_file_name)
# Plot the actual house prices against the regression model predictions.
plot(housing_data[, 'house_price'], housing_data[, 'predictions'],
main = 'Regression Model Predictions',
xlab = 'House Price',
ylab = 'Prediction', pch = 16)
points(housing_data[housing_data[, 'in_cali'] == 1, 'house_price'],
housing_data[housing_data[, 'in_cali'] == 1, 'predictions'],
col = 'green', pch = 16)
points(housing_data[housing_data[, 'earthquake'] == 1, 'house_price'],
housing_data[housing_data[, 'earthquake'] == 1, 'predictions'],
col = 'red', pch = 16)
dev.off()
##################################################
# Add some regression lines to compare
# the predictions to the actual observations.
##################################################
# Plot the actual house prices against the regression model predictions.
plot(housing_data[, 'income'], housing_data[, 'house_price'],
main = 'Regression Model Predictions',
xlab = 'Income',
ylab = 'House Price', pch = 16)
points(housing_data[housing_data[, 'in_cali'] == 1, 'income'],
housing_data[housing_data[, 'in_cali'] == 1, 'house_price'],
col = 'green', pch = 16)
points(housing_data[housing_data[, 'earthquake'] == 1, 'income'],
housing_data[housing_data[, 'earthquake'] == 1, 'house_price'],
col = 'red', pch = 16)
# Use the lines() command to append to the above figure.
# You will need to create a vector of values on the line
# using the regression coefficients from the estimated model.
summary(lm_full_model)
coef(lm_full_model)
beta_0_hat <- coef(lm_full_model)['(Intercept)']
beta_income_hat <- coef(lm_full_model)['income']
beta_cali_hat <- coef(lm_full_model)['in_cali']
beta_earthquake_hat <- coef(lm_full_model)['earthquake']
# Draw a line for zip codes outside California.
income_grid <- seq(0.07, 0.13, by = 0.01)
reg_line_not_cali <- beta_0_hat + beta_income_hat*income_grid
lines(income_grid, reg_line_not_cali,
lwd = 3, col = 'black')
# Repeat for California without earthquakes (green)
reg_line_in_cali <- beta_0_hat +
beta_income_hat*income_grid +
beta_cali_hat
lines(income_grid, reg_line_in_cali,
lwd = 3, col = 'green')
# Repeat for California with earthquakes (red).
reg_line_earthquake <- beta_0_hat +
beta_income_hat*income_grid +
beta_cali_hat + beta_earthquake_hat
lines(income_grid, reg_line_earthquake,
lwd = 3, col = 'red')
# Again, so far the plot has printed to screen.
# Now use the setEPS and postscript functions to save the figure to a file.
fig_file_name <- 'regression.eps'
out_file_name <- sprintf('%s/%s', fig_dir, fig_file_name)
setEPS()
postscript(out_file_name)
plot(housing_data[, 'income'], housing_data[, 'house_price'],
main = 'Regression Model Predictions',
xlab = 'Income',
ylab = 'House Price', pch = 16)
points(housing_data[housing_data[, 'in_cali'] == 1, 'income'],
housing_data[housing_data[, 'in_cali'] == 1, 'house_price'],
col = 'green', pch = 16)
points(housing_data[housing_data[, 'earthquake'] == 1, 'income'],
housing_data[housing_data[, 'earthquake'] == 1, 'house_price'],
col = 'red', pch = 16)
# Plot regression lines.
lines(income_grid, reg_line_not_cali,
lwd = 3, col = 'black')
lines(income_grid, reg_line_in_cali,
lwd = 3, col = 'green')
lines(income_grid, reg_line_earthquake,
lwd = 3, col = 'red')
# The dev.off() closes the file for the plot.
dev.off()
##################################################
# End
##################################################
|
2bcefd12a26aac47f178dcfdc81bc5a275edd716
|
82f971819e9730c97b63fe69bca43c6e3c5b30fe
|
/09-2. 성별에 따른 월급 차이.R
|
7395c85ea58858531a009706bec745051e9ce9ec
|
[] |
no_license
|
xoyeon/Doit_R
|
1b762a690d4bca24e5184357d0b54ea19cd615ea
|
bd87b9b5497ef423eca6c8650197a2f5b8747f7a
|
refs/heads/main
| 2023-08-21T10:24:41.601246
| 2021-10-22T01:43:39
| 2021-10-22T01:43:39
| 404,265,867
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,745
|
r
|
09-2. 성별에 따른 월급 차이.R
|
# 데이터 불러오기
raw_welfare <- read.spss(file = 'Koweps_hpc10_2015_beta1.sav', to.data.frame = T, reencode='utf-8')
# 복사본 만들기
Welfare <- raw_welfare
# 데이터 검토하기
head(Welfare)
tail(Welfare)
View(Welfare)
dim(Welfare)
str(Welfare)
summary(Welfare)
# 변수명 바꾸기
Welfare <- rename(Welfare,
sex = h10_g3,
birth = h10_g4,
marriage = h10_g10,
religion = h10_g11,
income = p1002_8aq1,
code_job = h10_eco9,
code_region = h10_reg7)
# 성변 변수 검토 및 전처리
class(Welfare$sex)
table(Welfare$sex)
# 이상치 확인
table(Welfare$sex)
# 이상치 결측 처리
Welfare$sex <- ifelse(Welfare$sex == 9, NA, Welfare$sex) #모른다고 답하거나 응답하지 않았을 경우는 9로 코딩되어 있음
# 결측치 확인
table(is.na(Welfare$sex))
# 성별 항목 이름 부여
Welfare$sex <- ifelse(Welfare$sex == 1, "male", "female")
table(Welfare$sex)
qplot(Welfare$sex)
# 월급 변수 검토 및 전처리
class(Welfare$income)
summary(Welfare$income)
qplot(Welfare$income)
qplot(Welfare$income) + xlim(0,1000) # 0~1000까지만 표현되게 설정
# 이상치 확인
summary(Welfare$income)
# 이상치 결측 확인
Welfare$income <- ifelse(Welfare$income %in% c(0, 9999), NA, Welfare$income)
# 결측치 확인
table(is.na(Welfare$income))
# 성별에 따른 월급 차이 분석하기
sex_income <- Welfare %>%
filter(!is.na(income)) %>%
summarise(mean_income = mean(income))
sex_income
# 그래프 만들기
ggplot(data = sex_income, aes(x = sex, y = mean_income)) + geom_col()
|
a4824aa79644815714888d30f8e0541caa190567
|
54f9314cf3a933b39ae1316e1d1e78a21f7b8b56
|
/tests/testthat/helper-AlpacaforR.R
|
24925f8b038b288599312b1365f46d287ff680a4
|
[] |
no_license
|
tanho63/AlpacaforR
|
1b36bcd44a188bd73c908223708bfbdd78820196
|
d23df32fd337185c413dbeed8383bb0221a84034
|
refs/heads/master
| 2023-06-05T11:36:21.782384
| 2020-12-31T18:12:34
| 2020-12-31T18:12:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,629
|
r
|
helper-AlpacaforR.R
|
library("vcr")
if (basename(getwd()) == "AlpacaforR") {
vcr::vcr_log_file("tests/testthat/vcr.log")
invisible(vcr::vcr_configure(dir = "tests/testthat/vcr", log = TRUE, log_opts = list(file = "tests/testthat/vcr.log"), write_disk_path = "tests/testthat/vcr"))
} else {
vcr::vcr_log_file("vcr/vcr.log")
invisible(vcr::vcr_configure(dir = "vcr", log = TRUE))
}
# get all files that start with "test" in the testthat directory
# .fn <- list.files("tests/testthat", pattern = "^test", full.names = T)
# tests <- purrr::map(.fn %>% setNames(nm = basename(.)), ~{
# # read the file
# .lines <- readLines(.x)
# # get the line numbers of the test_that expressions.
# # script assumes that all tests are written in the format:
# # test_that("my test", {
# # [tests]...
# # })
# .b <- stringr::str_which(.lines, "test_that")
# # map over the lines of the test_that expressions and find the ends of the expression by counting open/closed bracket pairs
# .e <- purrr::map_int(.b, ~{
# .l <- 0
# # count open bracket/parentheticals
# .pc <- 1
# while (.pc != 0) {
# .l <- .l + 1
# .pc <- .pc + stringr::str_count(.lines[.x + .l], "\\{")
# .pc <- .pc - stringr::str_count(.lines[.x + .l], "\\}")
# }
# .e <- as.integer(.x + .l)
# })
# # return that list of beginnings and ending line numbers
# list(begin = .b, end = .e)
# })
# # if the number of beginnings and endings match
# if (all(purrr::map_lgl(tests, ~length(.x[[1]]) == length(.x[[2]])))) {
# # map over the beginnigs and endings, and the file names
# purrr::map2(tests, .fn, ~{
# # read in the file
# .lines <- readLines(.y)
# # start a counter that will be used to determine how many lines have been added to the vector of the file lines (.count * 2), 1 line for the beginning of use_cassette expression and 1 line for the end.
# .count <- 0
# purrr::walk2(.x$begin, .x$end, ~{
# # add use cassette
# .test <- stringr::str_extract(.lines[.x + .count * 2], "\"[^\"]+\"")
# # collapse the name into only it's alphanumeric characters and underscores (since vcr doesn't allow spaces/special chars)
# .test <- paste0(stringr::str_extract_all(.test, "[:alnum:]+")[[1]], collapse = "_")
# # if an appropriate name was not extracted bring up the browser
# browser(expr = is.na(.test) || identical("NA", .test))
# # if the use_that expression isnt already wrapped with use_cassette
# if (stringr::str_detect(.lines[.x + .count * 2], "use_cassette", negate = T)) {
# # append the use cassette expression one line above test_that
# .lines <<- append(.lines, paste0('vcr::use_cassette(\"',.test,"\", {"), after = .x - 1 + .count * 2)
# # and close it one line below the }) that closes test_That
# .lines <<- append(.lines, "})", .y + .count * 2)
# }
# # increment the counter
# .count <<- .count + 1
# })
# # show the entire document in the console
# cat(.lines, sep = "\n")
# # and pause with the browser to fix any issues before overwriting the file
# browser()
# write(.lines, .y)
# })
# }
#
# purrr::map2(tests, .fn %>% setNames(nm = basename(.)), ~{
# .lines <- readLines(.y)
# purrr::map_chr(.x$begin, ~{
# .test <- stringr::str_extract(.lines[.x - 1], "(?<=vcr::use_cassette\\()[^,]+")
# .fix <- paste0(stringr::str_extract_all(.test, "[:alnum:]+")[[1]], collapse = "_")
# .replace <- paste0("vcr::use_cassette(\"",.fix,"\", {")
# .lines[.x - 1] <<- .replace
# })
# cat(.lines, sep = "\n")
# browser()
# write(.lines, .y)
# })
|
4d201c67fa526f19201af1fec3e531fa894f554b
|
b0af05775cdeadd5941664062b4bff7005fc0927
|
/population_data/ISTATdataScript.R
|
6213fc0e0039787790cf06b43b03b806525cb290
|
[] |
no_license
|
timmmerlan/fossgis_project
|
a967197a79ff4138c9fd3a1457dd3740765d5423
|
f919d9d8f7bc2bce7a2c7861bce6bb9c3c908b90
|
refs/heads/master
| 2020-12-06T08:43:40.870424
| 2020-04-19T19:43:20
| 2020-04-19T19:43:20
| 232,413,289
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,122
|
r
|
ISTATdataScript.R
|
# script for cleaning the "raw" table of the population data from ISTAT
rm(list = ls())
setwd("C:/Users/jnlpu/Documents/Studium/Geographie/5. Semester/FOSSGIS/Abschlussprojekt")
# load table
TabelleUnbereinigt <- read.csv(file = "Censimento_2011_Indicatori_famiglie_per_Comuni_nella_regione_SICILIA.csv",
header = T, sep = ";")
# load package for select function; would also be possible with rename function of the same package only (had some issues with it though, possibly because of the original column names or because of spaces)
library(dplyr)
# select only necessary columns
TabelleBereinigt <- select(TabelleUnbereinigt, ??..CodiceIstat, Nome, Ampiezza.demografica, Densit??..di.popolazione)
# renaming the columns
colnames(TabelleBereinigt)[1] <- "Postal Code"
colnames(TabelleBereinigt)[2] <- "Name"
colnames(TabelleBereinigt)[3] <- "Population"
colnames(TabelleBereinigt)[4] <- "Population Density"
# checking whether everythings fine down there
sum(is.na(TabelleBereinigt))
# export as .csv
write.table(TabelleBereinigt, file = "ISTATdata.csv", sep = ";", row.names = F)
|
d0a8e0758e50392dd76a81a93a9dcada727d84dc
|
0c9257f066b92c904af7bf891fefde48a97da6cc
|
/Script 8.R
|
a75555e29c45f596bc02d59ab196ed8ee2cb57fa
|
[] |
no_license
|
homayoun1990/R-Basic
|
5270079e1799706d4ab0d1604998a4cfc65693aa
|
45011fa36dfdb4b615b677917856b67cca5a57e8
|
refs/heads/master
| 2020-04-05T22:25:13.777470
| 2018-11-12T18:50:08
| 2018-11-12T18:50:08
| 157,255,827
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,065
|
r
|
Script 8.R
|
## loading the datasets library although the package will automaticlly
## loads when R starts
library(datasets)
data(package = "datasets") # lists the datasets in a package
str(sleep) # shows the structure of an object
head(sleep) ## return some of the rows inside sleep datasets
help(sleep) ## access help about the variables in the dataset
## Creates box plot for two different groups
plot(extra ~ group, data = sleep)
## retrieve the extra values for each group and save the results in two object.
## Remember the subsetting procedure in the past modules
group1 = sleep$extra[sleep$group == 1]
group2 = sleep$extra[sleep$group == 2]
mean(group1)
mean(group2)
## One sample t test. test: H0: mean of population is equal 0.5
## H1: mean of population is not equal 0.5
t.test(x = group1 , y = NULL , mu = 0.5)
## Two sample t test. test: H0: mean of population 1 is equal to population 2
## H1: mean of population 1 is not equal to population 2
t.test(x = group1 , y = group2 , var.equal = TRUE)
## Welch t test. test: H0: mean of population 1 is equal to population 2
## H1: mean of population 1 is not equal to population 2
t.test(x = group1 , y = group2 , var.equal = FALSE)
## To check if the variances in two populations are equal, we can use F test as
## follows. The F test is not the same as ANOVA. it is called Levene's test.
help(var.test)
var.test(x= group1 , y = group2, ratio = 1)
## It seems that group1 and group2 variables are not normally distributed
hist(group1)
hist(group2)
## how to hypothetically test for normality?
help(shapiro.test)
z1=rnorm(100, mean = 5, sd = 3)
z2=runif(100, min = 2, max = 4)
shapiro.test(z1)
shapiro.test(z2)
shapiro.test(x = group1)
shapiro.test(x = group2)
## Albeit the shapiro test indicates that the data is normaly distributed
## but it would be a good idea to test the differences of means in two populations
## using nonparametric tests.
help(wilcox.test)
wilcox.test(x = group1, y = group2)
|
9f1536a3111bcb66ade5f33bf8d31d6c1432f5ac
|
f3e914e8a3ccb1c4d73555321e3eaf52b59f52e0
|
/R/3.4-course.R
|
9eacc272622e26a71e7246f1540d2d05ddc4bffa
|
[] |
no_license
|
youjia36313/learn_R
|
08be35ebc032839e8c25466c63ae5a0292069855
|
674de3d09e0e7dfec2d3e164ffab98e0c40ca597
|
refs/heads/master
| 2020-09-15T19:39:00.136679
| 2019-11-23T06:37:41
| 2019-11-23T06:37:41
| 223,541,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 444
|
r
|
3.4-course.R
|
df <- read.csv("RentData.csv")
#df[1:5,]
#mean(df$Rent.money)
#mean(df$Area)
#median(df$Rent.money)
#median(df$Area)
#there is no mode in R
#table(df$Room.layout)
#table(df$Structure)
x <- df$Rent.money
var(x)
#there is no varp/sample variance in R.only var/unbiased sample variance
varp <-function(x){var(x)*(length(x)-1)/length(x)}
varp(x)
sd(x)
sqrt(varp(x))
IQR(x)
quantile(x)
y<-df$Area
var(x,y)
((length(x)-1)/length(x))*var(x,y)
cor(x,y)
|
ec16d4007c84a3cc86c6adbc8b79febaffaed3da
|
688185e8e8df9b6e3c4a31fc2d43064f460665f1
|
/man/convert_txtCollection.Rd
|
ef2467722746a66630813539d58d415ca7806e44
|
[] |
no_license
|
IPS-LMU/emuR
|
4b084971c56e4fed9032e40999eeeacfeb4896e8
|
eb703f23c8295c76952aa786d149c67a7b2df9b2
|
refs/heads/master
| 2023-06-09T03:51:37.328416
| 2023-05-26T11:17:13
| 2023-05-26T11:17:13
| 21,941,175
| 17
| 22
| null | 2023-05-29T12:35:55
| 2014-07-17T12:32:58
|
R
|
UTF-8
|
R
| false
| true
| 1,601
|
rd
|
convert_txtCollection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emuR-convert_txtCollection.R
\name{convert_txtCollection}
\alias{convert_txtCollection}
\title{Converts a collection of audio files and plain text transcriptions into an emuDB}
\usage{
convert_txtCollection(
dbName,
sourceDir,
targetDir,
txtExtension = "txt",
mediaFileExtension = "wav",
attributeDefinitionName = "transcription",
cleanWhitespaces = TRUE,
verbose = TRUE
)
}
\arguments{
\item{dbName}{name of the new emuDB}
\item{sourceDir}{directory containing the plain text transcription files and media files}
\item{targetDir}{directory where the new emuDB will be stored}
\item{txtExtension}{file extension of transcription files}
\item{mediaFileExtension}{file extension of media files}
\item{attributeDefinitionName}{label name of the transcription items}
\item{cleanWhitespaces}{if true, any sequence of whitespaces in the transcription (including newlines and tabs)
is transformed into a single blank}
\item{verbose}{display progress bar}
}
\description{
This function takes as input pairs of media files (i.e. wav files) and plain text
transcriptions files. It creates a new emuDB with one bundle per media file, and
turns the associated transcription into an item in that bundle. For this purpose,
media files and text files belonging to the same bundle must be named identically
(with the exception of their respective file extensions). The newly created
emuDB is stored in the target directory, and its handle is returned.
}
\seealso{
convert_BPFCollection, convert_TextGridCollection
}
|
b997d6ec16cf45d177eb0664c641add0fc8aba2f
|
c0befdac32dd86f06994c71eb80cab99cb3e5c6a
|
/man/agedotoliths.Rd
|
1f1c023b88e8e45c6257e2b06b8ac5573947c4f1
|
[] |
no_license
|
aaronmberger-nwfsc/hakedataUSA
|
8180602ae01a47f85ad0a6166341db687e5c2fcb
|
f6ee60568885f670a559502e1728b00f5d90ed5b
|
refs/heads/master
| 2023-02-11T17:09:25.867759
| 2021-01-07T05:37:34
| 2021-01-07T05:52:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 686
|
rd
|
agedotoliths.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/agedotoliths.R
\name{agedotoliths}
\alias{agedotoliths}
\title{Summary of NORPAC Otoliths by Year}
\usage{
agedotoliths(agedata)
}
\arguments{
\item{agedata}{A data frame of NORPAC ages, often called atsea.ages.}
}
\value{
A data frame of proportions by year and month. The
function also saves a summary file (csv format) to the disk
with sample sizes from which proportions can be calculated.
}
\description{
Create a proportion table of age samples that have been aged
for the NORPAC data. This information is normally given to the
stakeholders at the December JTC meeting.
}
\author{
Kelli Faye Johnson
}
|
cfdfb3e879c77e9efe4444b6d46b8e8f35833747
|
e2f16485aa15699c8f8f0784215c081b497c2647
|
/gisssurface/server.R
|
70d99c5d555b51584180393def4e673c6c24322e
|
[] |
no_license
|
Unsa15120/shinyproject
|
f0f1a90712f9d706526fbf48b51f8d4eb08c6227
|
cad5edf1382c370e26cd3a14ed0438401bf98bd6
|
refs/heads/master
| 2020-07-10T06:25:48.413964
| 2019-08-25T21:28:26
| 2019-08-25T21:28:26
| 204,192,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,925
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(plotly)
library(shinythemes)
# Downloading and Cleaning Data
#Data 1 = Global
globalData<- read.csv("global.csv",header = FALSE,sep = ",",skip = 3,na.strings = "***")
cnames <-readLines("global.csv",2)
cnames<-strsplit(cnames,",",fixed = TRUE)
names(globalData) <- cnames[[2]]
dataGlobal <- na.omit(globalData)
#Data 2 = North
northData<- read.csv("noth.csv",header = FALSE,na.strings = "***",skip = 3)
nnames <- readLines("noth.csv",2)
nnames <- strsplit(nnames,",",fixed = TRUE)
names(northData)<- nnames[[2]]
dataNorth <- na.omit(northData)
#Data 3 = South
soutData <- read.csv("south.csv",header = FALSE,na.strings = "***",skip = 3)
snames <- readLines("south.csv",2)
snames <- strsplit(snames,",",fixed = TRUE)
names(soutData) <- snames[[2]]
dataSouth <- na.omit(soutData)
#Data 4 = Zonal
zonalData <- read.csv("zonal.csv",header = FALSE,na.strings = "***",skip = 3)
znames <- readLines("zonal.csv",1)
znames<- strsplit(znames,",",fixed = TRUE)
names(zonalData) <- znames[[1]]
dataZonal <- na.omit(zonalData)
server <- function(input, output) {
cols <- reactive({
as.numeric(c(input$var))
})
mylabel <- reactive({
if(input$poleInput=='dataGlobal'){
lable <- "Plot for Global Data"
}
if(input$poleInput=='dataSouth'){
lable <- "Plot for South Pole Data"
}
if(input$poleInput=='dataNorth'){
lable <- "Plot for North Pole Data"
}
if(input$poleInput=='dataZonal'){
lable <- "Plot for Zonal Data"
}
lable
})
myFinalData <- reactive({
#------------------------------------------------------------------
# Select data according to selection of ratdio button
if(input$poleInput=='dataGlobal'){
mydata <- dataGlobal
}
if(input$poleInput=='dataSouth'){
mydata <- dataSouth
}
if(input$poleInput=='dataNorth'){
mydata <- dataNorth
}
if(input$poleInput=='dataZonal'){
mydata <- dataZonal
}
#------------------------------------------------------------------
# Get data rows for selected year
mydata1 <- mydata[mydata$Year >= input$YearRange[1], ] # From Year
mydata1 <- mydata1[mydata1$Year <= input$YearRange[2], ] # To Year
#------------------------------------------------------------------
# Get Data for selected months as variable
mydata2<- mydata1[, c(1, sort(cols()))]
#------------------------------------------------------------------
# Get data rows for selected year
data.frame(mydata2)
#------------------------------------------------------------------
})
# Prepare "Data tab"
output$displayData <- renderPrint({
myFinalData()
})
# Prepare Structure Tab
renderstr <- reactive({ str(myFinalData())})
output$struct <- renderPrint({
renderstr()
})
# Prepare Summary Tab
rendersumry <- reactive({ summary(myFinalData())})
output$sumry <- renderPrint({
rendersumry()
})
output$mygraph <- renderPlot({
plotdata <- myFinalData()
plot(plotdata,col=c(1,2,3,4,5,6,7,8,9,10,11,12),main = mylabel(),pch=16,cex=2)
})
output$mygraph2 <- renderPlotly({
plotdata2 <- myFinalData()
plot_ly(plotdata2,x=input$poleInput,y=input$var,size = 20,sizes = 20, z=input$var,type = "scatter3d",mode="markers",color = input$var,colors = c('#E74292','#01CBC6','#BB2CD9','#1287A5','#F5BCBA','#00CCCD','#487EB0','#218F76','#0A3D62','#E1DA00','#FAC42F','#4C4B4B'),
marker=list(symbol='circle',sizemode='diameter',sizes=20,size=20))
})
}
|
9ff02f377d3d767d7a87e9a993e984aef6ed8b4f
|
3693150470d1dce04403f8dad6b2ef0b092d6020
|
/R/FacetMuiPlotresultBar.R
|
29b5698d1bd0346064ff8c1929aa2c2348fc3ef4
|
[] |
no_license
|
Jayoel/EasyAovWlxPlot
|
f77b11098be7fe8ffcf61e628d96632f78789731
|
46cad6119526193a3cdd260c22c1f434cd4f530f
|
refs/heads/master
| 2021-05-17T23:09:54.761864
| 2020-02-26T03:13:24
| 2020-02-26T03:13:24
| 250,995,058
| 2
| 0
| null | 2020-03-29T09:24:17
| 2020-03-29T09:24:16
| null |
UTF-8
|
R
| false
| false
| 3,937
|
r
|
FacetMuiPlotresultBar.R
|
# \item{data}{输入数据框,第一列为样本编号,第二列为分组,注意分组标签必须设定为group,第三列以后就是测定或者收集的指标了}
#
# \item{num}{代表您想要进行统计的列,这里可以输入多个列,只需要指定列号即可:例如:num = c(4:6)}
#
# \item{sig_show}{代表差异展示方式;sig_show ="abc"是使用字母表示;sig_show ="line"是使用连线和星号表示;如果是NA,那么就不显示显著性结果}
#
# \item{result}{代表显著性差异分析结果,是一个数据框,每一列是显著性标记字母,MuiKwWlx}
# \item{ncol}{代表分面展示每一行放几张图}
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Install Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
data(data_wt)
# # #使用案例
# result = MuiKwWlx(data = data_wt,num = c(4:6))
# result
# result1 = FacetMuiPlotresultBar(data = data_wt,num = c(4:6),result = result,sig_show ="abc",ncol = 2 )
# result1[[1]]
##-----------现在我们需要做多组分面图形--第二种, 柱状图
# num = c(4:6)
# N =4
FacetMuiPlotresultBar = function(data = data_wt,num = c(4:6),result = result,sig_show ="abc",ncol = 3 ){
Mytheme <- theme_bw()+
# scale_fill_manual(values = mi, guide = guide_legend(title = NULL))+
theme(
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.title = element_text(vjust = -8.5,hjust = 0.1),
axis.title.y =element_text(size = 20,face = "bold",colour = "black"),
axis.title.x =element_text(size = 24,face = "bold",colour = "black"),
axis.text = element_text(size = 20,face = "bold"),
axis.text.x = element_text(colour = "black",size = 14),
axis.text.y = element_text(colour = "black",size = 14),
legend.text = element_text(size = 15,face = "bold"),
legend.position = "none"#是否删除图例
)
data_wt = data
N = num[1]
name = colnames(data_wt[N])
as = result[match( name,colnames(result))]
colnames(as) = "groups"
as$group = row.names(as)
PlotresultBar = aovMuiBarPlot(data = data_wt, i= N,sig_show =sig_show,result = as)
p = PlotresultBar[[2]]
p
name = colnames(data_wt[N])
p$name = name
A = p
for (N in num[-1]) {
name = colnames(data_wt[N])
as = result[match( name,colnames(result))]
colnames(as) = "groups"
as$group = row.names(as)
PlotresultBox = aovMuiBarPlot(data = data_wt, i= N,sig_show =sig_show,result = as)
p = PlotresultBox[[2]]
p
name = colnames(data_wt[N])
p$name = name
A = rbind(A,p)
}
head(A)
# a = max(A$SD)*1.2
p<-ggplot(A, aes(x=group , y=mean ))+
geom_bar(aes(colour= group,fill = group),stat = "identity", width = 0.4,position = "dodge") +
geom_errorbar(aes(ymin=mean - SD,
ymax=mean+SD),
colour="black",width=0.1,size = 1)+
scale_y_continuous(expand = c(0,0))+#,limits = c(0,a)
labs(
# x=paste(name_i,"of all group", sep = "_"),
# y=name_i
# title = paste("Normality test",p1,"Homogeneity of variance",p2,sep = ":")
) +
theme_classic()+
geom_text(data=A, aes(x=group , y=mean+SD ,label=groups))+
guides(color=guide_legend(title = NULL),shape=guide_legend(title = NULL))+facet_wrap(.~name,scales="free_y",ncol = ncol)
p
p=p+Mytheme
p
if (length(unique(data_wt$group))>3){ p=p+theme(axis.text.x=element_text(angle=45,vjust=1, hjust=1))}
p
# path = "./Muibar_Facet/"
# dir.create(path)
# FileName <- paste(path,name,"Facet_bar", ".pdf", sep = "_")
# ggsave(FileName, p, width = 8, height = 8)
return(list(p,table = A))
}
|
0acbef9c362fc3bcd587204d651b94755f74eb45
|
73eec22a33e4f2f08a61cc3e5c8c5a2883009d73
|
/man/num.samples.Rd
|
6a8b96913b84b85fdef9f2b5c9121023a4ad260a
|
[] |
no_license
|
itsrainingdata/sparsebnUtils
|
958ec179724d75728dfd03a40bbde718f68cc0cc
|
a762b74dda916956d16e2654463736e55b57be0b
|
refs/heads/master
| 2020-04-06T06:36:08.237032
| 2019-05-29T11:04:07
| 2019-05-29T11:04:07
| 50,886,867
| 3
| 2
| null | 2017-04-10T22:45:30
| 2016-02-02T02:05:24
|
R
|
UTF-8
|
R
| false
| true
| 991
|
rd
|
num.samples.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3-generics.R, R/s3-sparsebnData.R,
% R/s3-sparsebnFit.R, R/s3-sparsebnPath.R
\name{num.samples}
\alias{num.samples}
\alias{num.samples.sparsebnData}
\alias{num.samples.sparsebnFit}
\alias{num.samples.sparsebnPath}
\title{num.samples}
\usage{
num.samples(x)
\method{num.samples}{sparsebnData}(x)
\method{num.samples}{sparsebnFit}(x)
\method{num.samples}{sparsebnPath}(x)
}
\arguments{
\item{x}{a \code{\link{sparsebnFit}} or \code{\link{sparsebnPath}} object.}
}
\value{
Number of samples as \code{integer}.
}
\description{
Extracts the number of samples used to estimate the associated object.
}
\section{Methods (by class)}{
\itemize{
\item \code{sparsebnData}: Extracts the number of samples of \link{sparsebnData} object.
\item \code{sparsebnFit}: Extracts the number of samples of \link{sparsebnFit} object.
\item \code{sparsebnPath}: Extracts the number of samples of \link{sparsebnPath} object.
}}
|
05534ab248d93eb54787b7f53dbf876da362122c
|
0dbd60b634c090f2153f21f945fb306495a67df6
|
/R/ROMS_COBALT/call_make_ROMS_files.R
|
9b7e6589717fd6ae7aea02b0b6e8af48af5916eb
|
[] |
no_license
|
wechuli/large-pr
|
afa8ec8535dd3917f4f05476aa54ddac7a5e9741
|
5dea2a26fb71e9f996fd0b3ab6b069a31a44a43f
|
refs/heads/master
| 2022-12-11T14:00:18.003421
| 2020-09-14T14:17:54
| 2020-09-14T14:17:54
| 295,407,336
| 0
| 0
| null | 2020-09-14T14:17:55
| 2020-09-14T12:20:52
|
TypeScript
|
UTF-8
|
R
| false
| false
| 3,125
|
r
|
call_make_ROMS_files.R
|
source(here::here('R','make_ROMS_files_2.R'))
# source('C:/Users/joseph.caracappa/Documents/GitHub/neus-atlantis/R/make_ROMS_files_new_levels.R')
dir.names = 1981:1983
# dir.names = 2010:2014
# ellapsed.t = list()
for(yr in 1:length(dir.names)){
# for(yr in 1:length(dir.names)){
if(!dir.names[yr] %in% dir('D:/Output')){
dir.create(paste0('D:/Output/',dir.names[yr]))
}
# Set from and to directories
orig.dir = paste0('D:/NWA_Revised/',dir.names[yr],'/')
local.dir = 'C:/Users/joseph.caracappa/Documents/Atlantis/ROMS_COBALT/ROMS_IN/'
local.output.dir = 'C:/Users/joseph.caracappa/Documents/Atlantis/ROMS_COBALT/ROMS_OUT/'
final.output.dir = paste0('D:/Output/',dir.names[yr],'/')
#Copy files from external to local directory
files2copy.in = list.files(orig.dir,paste0('neusNWA_Cob10_avg_',dir.names[yr],'_*'),full.names = T)
files2copy.in.short = list.files(orig.dir,paste0('neusNWA_Cob10_avg_',dir.names[yr],'_*'),full.names = F)
files.in.local = list.files(local.dir,paste0('neusNWA_Cob10_avg_',dir.names[yr],'_*'),full.names = F)
if(!all(files2copy.in.short %in% files.in.local)){
tictoc::tic()
file.copy(files2copy.in, local.dir)
tictoc::toc()
}
if(!dir.names[yr] %in% dir(local.output.dir)){
dir.create(paste0(local.output.dir,dir.names[yr]))
}
make_ROMS_files(
roms.dir = local.dir,
roms.prefix = paste0('neusNWA_Cob10_avg_',dir.names[yr],'*'),
roms.files = files2copy.in.short,
out.dir = paste0(local.output.dir,dir.names[yr],'/'),
# dz.file = here::here('Geometry','dz.csv'),
dz.file = 'C:/Users/joseph.caracappa/Documents/GitHub/neus-atlantis/Geometry/dz.csv',
# bgm.file = here::here('Geometry','neus_tmerc_RM2.bgm'),
bgm.file = 'C:/Users/joseph.caracappa/Documents/GitHub/neus-atlantis/Geometry/neus_tmerc_RM2.bgm',
# shp.file = here::here('Geometry','Neus_ll_0p01.shp'),
shp.file = 'C:/Users/joseph.caracappa/Documents/GitHub/neus-atlantis/Geometry/Neus_ll_0p01.shp',
name.out = 'roms_cobalt_v10_',
make.hflux = F,
make.physvars = T,
make.ltlvars = T,
make.nutvars = T
)
# Roms2Hydro(roms.dir =local.dir,
# roms.prefix = paste0('RM_NWA-SZ.HCob05T_avg_',dir.names[yr],'*'),
# out.dir = paste0(local.output.dir,dir.names[yr],'/'),
# name.out = 'roms_cobalt_')
files2copy.out = list.files(paste0(local.output.dir,dir.names[yr],'/'),full.names = T)
file.copy(files2copy.out,final.output.dir,overwrite = T)
# ncdf.tools::closeAllNcfiles()
# closeAllConnections()
# removeTmpFiles()
gc()
# close.files = dir(local.dir,full.names = T)
# for(i in 1:length(close.files)){
# x =nc_open(close.files[i])
# nc_close(x)
# rm(x)
# unlink(close.files[i])
# system(paste0('attrib -r', close.files[i]))
# }
file.remove(dir(local.dir,full.names = T))
# unlink(dir(local.dir,full.names = T),recursive = T)
# sapply(dir(local.dir,full.names = T),function(x){
# system(paste0('attrib -r', x))
# file.remove(x)
# })
print(paste0('################## ',yr,' #################'))
}
|
794570a81f650f1218b7f42215480a877eddfc47
|
0055c9911455887f73d902cb934bd32969208410
|
/dfest_2 step.R
|
40df1c279e9a02ce4fb125c0f2fa52b692be8d3a
|
[] |
no_license
|
christinaschang/2018-Datafest-Munich
|
e5e05efe39cd8c60b0cb5ab27c5ea1e9e4de256e
|
614fdaf15fb53615436d4190fe7a3ade3a0153c4
|
refs/heads/master
| 2021-04-15T14:47:12.132172
| 2018-04-20T14:35:01
| 2018-04-20T14:35:01
| 126,637,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,021
|
r
|
dfest_2 step.R
|
library(readr)
library(tidyverse)
library(ggplot2)
library(plotly)
library(RColorBrewer)
library(lubridate)
library(rworldmap)
library(scales)
setwd("/Users/Berlin/Desktop/datafest2018_data_and_documentation/data")
conn1 <- read_csv("conn_posix.csv")
View(conn1)
# Tiles by weekday and hour
conn1$weekday <- wday(conn1$ts_POS)
conn1$cweekday <- wday(conn1$ts_POS, label = T)
conn1$tod <- as.numeric(conn1$ts_POS - as.POSIXct(strftime(conn1$ts_POS,format="%Y-%m-%d")))/60
conn1$bins <- cut(conn1$tod,breaks=1:24,labels=F)
counts <- aggregate(X1~bins+weekday,conn1,length)
colnames(counts)[ncol(counts)] <- "Events"
View(conn1)
conn1 %>%
group_by(day) %>%
table(conn1$cweekday)
# Sun Mon Tue Wed Thu Fri Sat
# 103268 117269 113852 436644 170431 208713 96291
ggplot(counts, aes(x=bins,y=8-weekday))+
geom_tile(aes(fill=Events))+
scale_fill_gradientn(colours=brewer.pal(9,"YlOrRd"),
breaks=seq(0,max(counts$Events),by=100))+
scale_y_continuous(breaks=7:1,labels=c("Sat","Sun","Mon","Tues","Wed","Thurs","Fri"))+
labs(x="Time of Day (hours)", y="Day of Week")+
coord_fixed()
# Tiles by weekday & month
conn1$day1 <- day(conn1$ts_POS)
conn1$week <- week(conn1$ts_POS)
conn1$month <- month(conn1$ts_POS)
conn1$year <- year(conn1$ts_POS)
counts1 <- aggregate(X1~week+month,conn1,length)
colnames(counts1)[ncol(counts1)] <- "Events1"
ggplot(counts1, aes(day1, week)) +
geom_tile(aes(fill = Events1)) +
scale_fill_gradientn(colours=brewer.pal(9,"YlOrRd"),
breaks=seq(0,max(counts$Events),by=100))
ggplot(conn1, aes(week, month, z = day1)) + geom_tile(aes(fill = day1))
+
scale_y_continuous(breaks=7:1,labels=c("Sun","Mon","Tues","Wed","Thurs","Fri","Sat"))+
labs(x="Time of Day (hours)", y="Day of Week")+
coord_fixed()
# Wednesday & Friday
class(conn1$duration)
conn1$duration <- as.numeric(conn1$duration)
conn1$duration_minutes <- conn1$duration/60
summary(conn1$duration_minutes)
# Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# 0.0 0.0 0.0 0.2 0.2 42.6 837801
# NOT helpful
p <- ggplot(conn1, aes(x=cweekday, y=duration)) +
geom_boxplot()
ggplotly(p)
# duration over 1 minute
install.packages("rworldmap")
n <- joinCountryData2Map(conn1, joinCode="N", nameJoinColumn="country")
h <- hist(conn1$ts_POS, breaks=32)
conn1$day <- as.numeric(conn1$ts_POS - as.POSIXct(strftime(conn1$ts_POS,format="%Y-%m-%d")))
hist(conn1$ts_POS, breaks = "days", # or weeks
col="red", main = "Histogram of Connections",
xlab = "Timestamp", ylab = "Frequency",freq=TRUE)
conn1$day <- strftime(conn1$ts_POS, "%Y/%m/%d")
View(conn1)
sort(table(conn1$day))
ggplot(conn1, aes(day)) +
geom_bar(identity="count")
freqs <- aggregate(conn1$ts_POS, by=list(conn1$ts_POS), FUN=length)
View(freqs)
freqs$names <- as.Date(freqs$Group.1, format="%Y-%m-%d")
freqs %>%
group_by(names) %>%
ggplot(., aes(x=names, y=x, fill=x)) + geom_bar(stat="identity") +
ylab("Frequency") + xlab("Year and Month") +
scale_x_date(labels=date_format("%Y-%b")) +
scale_y_continuous(labels = comma) +
scale_fill_gradient()
class(conn1$conn_state)
conn1$day <- strftime(conn1$ts_POS, "%Y/%m/%d")
conn1$day <- as.Date(conn1$day)
conn1$conn_state <- as.factor(conn1$conn_state)
conn1 %>%
group_by(day, conn_state) %>%
mutate(total=n()) %>%
ggplot(.,aes(x=day, y= total, group = conn_state)) +
geom_line(aes(color = conn_state)) +
ylab("Freq") + xlab("Year and Month") +
ggtitle("Frequency of Connection by Status") +
scale_x_date(labels=date_format("%Y-%b")) +
scale_y_continuous(labels = comma)
# aggregated
ggplot(conn1,aes(x=day)) +
geom_histogram(aes(color="red", fill = "red")) +
ylab("Freq") + xlab("Year and Month") +
ggtitle("Frequency of Connection by Date") +
scale_x_date(labels=date_format("%Y-%b")) +
scale_y_continuous(labels = comma)
View(conn1)
ggplotly(spike_plot)
write.csv(conn1, file = "conn2.csv")
ggplot(conn1, aes(x=day, y=temp)) +
geom_line(aes(group=day, fill=day, color=day))+
stat_summary(fun.y = mean, na.rm = TRUE, group = 3, color = 'black', geom ='line')
write.csv(freqs, file = "freqs.csv")
conn1 %>%
filter(day == "2014/10/24") -> oct24 # friday
View(oct23)
sort(table(oct24$id.orig_h))
conn1 %>%
filter(day=="2015/04/29") -> apr29 # wednesday
sort(table(apr29$id.orig_h))
spikes <- cbind(c("2015/04/29","2014/10/24"),
c("331266", " 90849"),
c("5.61.38.11", "84.151.62.120"),
c("327711","85040"),
c("Germany","Germany"))
colnames(spikes) <- c("date","connections_total","ip_adress","ip_counts","geo_location")
spikes <- as.data.frame(spikes, stringsAsFactors = FALSE)
spikes$connections_total <- as.numeric(spikes$connections_total)
spikes$ip_counts <- as.numeric(spikes$ip_counts)
spikes <- transform(spikes, ip_percent = ip_counts / connections_total)
# Detailed Info Retrieval
conn1$id.orig_h[conn1$id.orig_h =="192.168.0.12"]
|
4b47ac4730384ebfd467f92101c7706d08403435
|
11923a0d573d8a87f5b7a16d443722367594b223
|
/AMI_code/explain/scfa_mol/scfa_explain_af.R
|
afca8f4968323a398f0631299a1fbc78d2df06e3
|
[] |
no_license
|
BioLcl/AMI
|
ed6a932d72ac836984e8dacf72ea3048aa0fbd6b
|
295e3c1b4b4ce6b90ec4f56313e6606279c0e72b
|
refs/heads/main
| 2023-02-24T03:14:16.987204
| 2021-01-24T09:55:36
| 2021-01-24T09:55:36
| 332,409,641
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 5,421
|
r
|
scfa_explain_af.R
|
library(psych)
int <- function(x){
qnorm((rank(x,na.last="keep")-0.5)/sum(!is.na(x)))
}
setwd("D:/FangCloudV2/Zheng lab/Zheng lab共享资料/刘成林/ACS/ACS_code/explain/scfa_mol")
### all group explain
meta<-read.delim("scfa_mol_ratio.txt", row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
meta<-as.data.frame(meta)
### top75 pathway
pathway<-read.delim("pathway_rand.txt", row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
pathway_num<-ncol(pathway)
pathway<-as.data.frame(pathway)
pathway$ID<-rownames(pathway)
meta$ID<-rownames(meta)
### prevalence >10 species
genus<-read.delim("species_rand.txt", row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
genus_num<-ncol(genus)
genus<-as.data.frame(genus)
genus_o<-genus
#####
genus$ID<-rownames(genus)
#####
genus<- merge(genus, pathway, by = "ID",all=FALSE)
##############
library(glmnet)
data_meta <- merge(meta, genus, by = "ID",all=FALSE)
daixie<-data_meta[,2:7]
bac<-data_meta[,8:ncol(data_meta)]
bac_notrans<-bac
for (i in 1:ncol(bac)){
bac[,i]<-int(bac[,i])
}
for (i in 1:ncol(daixie)){
daixie[,i]<-int(daixie[,i])
}
library(broom)
explatin_table<-matrix(NA,nrow = 100,ncol = ncol(daixie))
colnames(explatin_table)<-c(colnames(daixie))
for (j in 1:100){
for (i in 1:ncol(daixie)){
y<-as.matrix(daixie[,i])
bac_log<-as.matrix(bac)
cv_fit <- cv.glmnet(x=bac_log, y=y, nlambda = 1000,alpha = 1,family="gaussian")
model_lasso_min <- glmnet(x=bac_log, y=y, alpha = 1, lambda=cv_fit$lambda.min,family="gaussian")
explatin_table[j,i]<-model_lasso_min$dev.ratio
}
}
### 100 result
write.csv(explatin_table,"all_explin_table_scfa_mol_100_af.csv",row.names = F)
### 100 mean
write.csv(t(as.data.frame(colMeans(explatin_table))),"all_explin_table_scfa_100_mol_mean_af.csv",row.names = F)
###############################################
#### case explain
meta<-read.delim("scfa_mol_ratio.txt", row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
meta<-as.data.frame(meta)
Case_index<-grep("A",rownames(meta))
meta<-meta[Case_index,]
### top75 pathway
pathway<-read.delim("pathway_rand.txt", row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
pathway_num<-ncol(pathway)
pathway<-as.data.frame(pathway)
pathway$ID<-rownames(pathway)
meta$ID<-rownames(meta)
### prevalence >10 species
genus<-read.delim("species_rand.txt", row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
genus_num<-ncol(genus)
genus<-as.data.frame(genus)
genus_o<-genus
#####
genus$ID<-rownames(genus)
#####
genus<- merge(genus, pathway, by = "ID",all=FALSE)
##############
library(glmnet)
data_meta <- merge(meta, genus, by = "ID",all=FALSE)
daixie<-data_meta[,2:7]
bac<-data_meta[,8:ncol(data_meta)]
bac_notrans<-bac
for (i in 1:ncol(bac)){
bac[,i]<-int(bac[,i])
}
for (i in 1:ncol(daixie)){
daixie[,i]<-int(daixie[,i])
}
library(broom)
explatin_table<-matrix(NA,nrow = 100,ncol = ncol(daixie))
colnames(explatin_table)<-c(colnames(daixie))
for (j in 1:100){
for (i in 1:ncol(daixie)){
y<-as.matrix(daixie[,i])
bac_log<-as.matrix(bac)
cv_fit <- cv.glmnet(x=bac_log, y=y, nlambda = 1000,alpha = 1,family="gaussian")
model_lasso_min <- glmnet(x=bac_log, y=y, alpha = 1, lambda=cv_fit$lambda.min,family="gaussian")
explatin_table[j,i]<-model_lasso_min$dev.ratio
}
}
### 100 result
write.csv(explatin_table,"Case_explin_table_scfa_mol_100_af.csv",row.names = F)
### 100 mean
write.csv(t(as.data.frame(colMeans(explatin_table))),"Case_explin_table_scfa_100_mol_mean_af.csv",row.names = F)
##################################### control
#### control explain
meta<-read.delim("scfa_mol_ratio.txt", row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
meta<-as.data.frame(meta)
Case_index<-grep("B",rownames(meta))
meta<-meta[Case_index,]
### top75 pathway
pathway<-read.delim("pathway_rand.txt", row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
pathway_num<-ncol(pathway)
pathway<-as.data.frame(pathway)
pathway$ID<-rownames(pathway)
meta$ID<-rownames(meta)
### prevalence >10 species
genus<-read.delim("species_rand.txt", row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
genus_num<-ncol(genus)
genus<-as.data.frame(genus)
genus_o<-genus
#####
genus$ID<-rownames(genus)
#####
genus<- merge(genus, pathway, by = "ID",all=FALSE)
##############
library(glmnet)
data_meta <- merge(meta, genus, by = "ID",all=FALSE)
daixie<-data_meta[,2:7]
bac<-data_meta[,8:ncol(data_meta)]
bac_notrans<-bac
for (i in 1:ncol(bac)){
bac[,i]<-int(bac[,i])
}
for (i in 1:ncol(daixie)){
daixie[,i]<-int(daixie[,i])
}
library(broom)
explatin_table<-matrix(NA,nrow = 100,ncol = ncol(daixie))
colnames(explatin_table)<-c(colnames(daixie))
for (j in 1:100){
for (i in 1:ncol(daixie)){
y<-as.matrix(daixie[,i])
bac_log<-as.matrix(bac)
cv_fit <- cv.glmnet(x=bac_log, y=y, nlambda = 1000,alpha = 1,family="gaussian")
model_lasso_min <- glmnet(x=bac_log, y=y, alpha = 1, lambda=cv_fit$lambda.min,family="gaussian")
explatin_table[j,i]<-model_lasso_min$dev.ratio
}
}
### 100 result
write.csv(explatin_table,"Control_explin_table_scfa_mol_100_af.csv",row.names = F)
### 100 mean
write.csv(t(as.data.frame(colMeans(explatin_table))),"Control_explin_table_scfa_100_mol_mean_af.csv",row.names = F)
|
1c69bce8d4be30cc6c2f15275fddc8a500b7ff5d
|
0d40400d131f04630afba060bdab5c80f6b4df87
|
/Anova_video_problems.R
|
f3357db8f3c495bd6574a0b8cb632f386e3481b3
|
[] |
no_license
|
tpaarth/R_code
|
5d6239fc485587e9000884fd690c96bc824375ff
|
a8937bf8c601330ab293651977480fc9c019bade
|
refs/heads/master
| 2021-06-22T16:59:10.392499
| 2021-03-28T11:03:46
| 2021-03-28T11:03:46
| 203,936,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,814
|
r
|
Anova_video_problems.R
|
##One Factor ANOVA
setwd("C:/Users/paart/Documents/PGP_BABI/R_Programming/datasets")
library(plyr)
library(ggplot2)
library(lattice)
library(MASS)
golf_data <- read.csv('Golfball.csv',header = T)
attach(golf_data)
golf_data
model <- aov(Distance~Design, data = golf_data)
summary(model)
print(summary(model),digits = 6)
# Post Hoc Analysis
TukeyHSD(model)
##Two Factor ANOVA
setwd('C:/Users/paart/Documents/PGP_BABI/Handouts/Advanced Statistics/Reference/ANOVA Datasets & Presentations')
# Read the data
paul_newfood_csv <- read.csv('paul-newfood.csv')
paul_newfood_csv
# Prepare the data
str(paul_newfood_csv)
paul_newfood_csv$PriceLevel <- as.factor(paul_newfood_csv$PriceLevel)
paul_newfood_csv$AdLevel <- as.factor(paul_newfood_csv$AdLevel)
str(paul_newfood_csv)
# 1. Is Sales normally distributed
shapiro.test(paul_newfood_csv$Sales)
# Inference: p-value = 0.01969 < 0.05 i.e.
# H0 rejected - data is not normally distributed
# 2. 2-factor,not normal - Mann-Whitney test
wilcox.test(paul_newfood_csv$Sales~paul_newfood_csv$AdLevel)
# Inference: p-value = 0.4095 > 0.05 i.e.
# H0 accepted - AdLevel impacts Sales
# 3. Test the homogeneity of variance
fligner.test(paul_newfood_csv$Sales~paul_newfood_csv$AdLevel)
# Inference: p-value = 0.3481 > 0.05 i.e.
# H0 accpeted - var.equal = T
# using Anova
model1 <- aov(Sales~PriceLevel+AdLevel,data = paul_newfood_csv)
summary(model1)
# Inference -
# PriceLevel - p-value = 0.000182 < 0.05 - H0 rejected - makes an impact
# AdLevel - p-value = 0.969624 > 0.05 - H0 acceptedd - makes no impact
# interaction
paul_newfood_csv$Price <- factor(paul_newfood_csv$PriceLevel,levels = c(1,2,3),labels = c("Low","Medium","High"))
paul_newfood_csv$Advertisement <- factor(paul_newfood_csv$AdLevel,levels = c(1,2),labels = c("Low","High"))
interaction.plot(paul_newfood_csv$Price
,paul_newfood_csv$Advertisement
,paul_newfood_csv$Sales
,col = c("Red","Blue")
,main = "Interaction between Price and Advertisement")
model2 <- aov(Sales~Price+Advertisement+Price*Advertisement,data = paul_newfood_csv)
summary(model2)
# Inference -
# Price:
# p-value = 0.000159 < 0.05 i.e. H0 rejected - makes an impact
# Advertisement:
# p-value = 0.968450 > 0.05 i.e. H0 accepted - no significant impact
# Price:
# Advertisement: p-value = 0.190898 > 0.05
# i.e. H0 accepted - interaction model makes no impact
# It appears that Ads was not effective though price was highly effective
# and statistically signigicant.
# Is StoreSize making a significant diff
# correlation between sales and storesize
cor(paul_newfood_csv$Sales,paul_newfood_csv$StoreSize)
cor.test(paul_newfood_csv$Sales,paul_newfood_csv$StoreSize)
# p-value = 0.03636 < 0.05 i.e. H0 rejected - storesize impact sales
# So we need to remove the linear effect due to StoreSize and then
# run ANCOVA. Concomitant variable to be removed
# ANCOVA - Analysis of Co-Variance
# interaction effect adjusted for concomitant variable StoreSize
model3 <- aov(Sales~StoreSize+Price+Advertisement+Price*Advertisement,data = paul_newfood_csv)
summary(model3)
# Inference -
# StoreSize:
# p-value = 0.000448 < 0.05 i.e. H0 rejected - makes an impact
# Price:
# p-value = 9.51e-06 < 0.05 i.e. H0 rejected - makes an impact
# Advertisement
# p-value = 0.002458 < 0.05 i.e. H0 rejected - makes an impact
# Results of ANOVA indicated that Ads did not significantly impact Sales. However,
# it was also discovered that StoreSize was impacting Sales. Using ANCOVA,
# we found Ad was indeed highly significant and its impact was masked by
# the concomitant variable StoreSize. When removed the linear effect of StoreSize,
# Ad became a major driver.
|
67da2bb7a8314dc9e58d8ba6c2a0382f0fea8672
|
f4a081e3696cc3737cef833bbe36e6cbba0b4022
|
/man/cov.wml.Rd
|
6cbcd6fbeee8e3a84e6ee10b96c9d6334d32f05e
|
[] |
no_license
|
cran/fpc
|
5ba4ad1e5d8bd50009060ce009161df3a765421f
|
f16ec459c931722a5354788e23325931ad835e8f
|
refs/heads/master
| 2023-01-09T20:22:03.277537
| 2023-01-06T23:20:13
| 2023-01-06T23:20:13
| 17,696,125
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,257
|
rd
|
cov.wml.Rd
|
\name{cov.wml}
\alias{cov.wml}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{Weighted Covariance Matrices (Maximum Likelihood)}
\description{
Returns a list containing estimates of the weighted covariance
matrix and the mean of the data, and optionally of the (weighted)
correlation matrix. The
covariance matrix is divided by the sum of the weights,
corresponding to \code{n} and the ML-estimator in the case of equal
weights, as opposed to \code{n-1} for \code{\link{cov.wt}}.
}
\usage{
cov.wml(x, wt = rep(1/nrow(x), nrow(x)), cor = FALSE, center = TRUE)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{x}{a matrix or data frame. As usual, rows are observations and
columns are variables.}
\item{wt}{a non-negative and non-zero vector of weights for each
observation. Its length must equal the number of rows of
\code{x}.}
\item{cor}{A logical indicating whether the estimated correlation
weighted matrix will be returned as well.}
\item{center}{Either a logical or a numeric vector specifying the centers
to be used when computing covariances. If \code{TRUE}, the
(weighted) mean of each variable is used, if `\code{FALSE}, zero is
used. If \code{center} is numeric, its length must equal the
number of columns of \code{x}.}
}
\value{
A list containing the following named components:
\item{cov}{the estimated (weighted) covariance matrix.}
\item{center}{an estimate for the center (mean) of the data.}
\item{n.obs}{the number of observations (rows) in \code{x}.}
\item{wt}{the weights used in the estimation. Only returned if given
as an argument.}
\item{cor}{the estimated correlation matrix. Only returned if `cor' is
`TRUE'.}
}
\author{Christian Hennig
\email{christian.hennig@unibo.it}
\url{https://www.unibo.it/sitoweb/christian.hennig/en/}}
\seealso{\code{\link{cov.wt}}, \code{\link{cov}}, \code{\link{var}}}
\examples{
x <- c(1,2,3,4,5,6,7,8,9,10)
y <- c(1,2,3,8,7,6,5,8,9,10)
cov.wml(cbind(x,y),wt=c(0,0,0,1,1,1,1,1,0,0))
cov.wt(cbind(x,y),wt=c(0,0,0,1,1,1,1,1,0,0))
}
\keyword{multivariate}% at least one, from doc/KEYWORDS
|
1c52b5543a1d0a28c8ccd3a770e92286816d5f92
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkEntrySetIconFromPixbuf.Rd
|
964ae03c08e71b91dab1918492eaea0bf706b912
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 618
|
rd
|
gtkEntrySetIconFromPixbuf.Rd
|
\alias{gtkEntrySetIconFromPixbuf}
\name{gtkEntrySetIconFromPixbuf}
\title{gtkEntrySetIconFromPixbuf}
\description{Sets the icon shown in the specified position using a pixbuf.}
\usage{gtkEntrySetIconFromPixbuf(object, icon.pos, pixbuf = NULL)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkEntry}}}
\item{\verb{icon.pos}}{Icon position}
\item{\verb{pixbuf}}{A \code{\link{GdkPixbuf}}, or \code{NULL}. \emph{[ \acronym{allow-none} ]}}
}
\details{If \code{pixbuf} is \code{NULL}, no icon will be shown in the specified position.
Since 2.16}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
8aee5cd2c100bc651e55c6c45ee89af3f55c2450
|
af325890e4442dc45c2c9316400c3eb7fad9e107
|
/R/geary.R
|
cfb5e408fdf388da9594be6ade92615b8317282f
|
[] |
no_license
|
cran/moments
|
57475456b7c3a7130490b2e3f985658a013a4df0
|
724bcfd8a214f24872cda0d8a8a46bcb0e846b64
|
refs/heads/master
| 2022-05-21T10:55:18.637220
| 2022-05-02T12:01:55
| 2022-05-02T12:01:55
| 17,697,626
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 398
|
r
|
geary.R
|
"geary" <-
function (x, na.rm = FALSE)
{
if (is.matrix(x))
apply(x, 2, geary, na.rm = na.rm)
else if (is.vector(x)) {
if (na.rm) x <- x[!is.na(x)]
n <- length(x)
rho <- sqrt(sum((x-mean(x))^2)/n);
tau <- sum(abs(x-mean(x)))/n;
tau/rho
}
else if (is.data.frame(x))
sapply(x, geary, na.rm = na.rm)
else geary(as.vector(x), na.rm = na.rm)
}
|
0f7dbcc326c99780511513769d2ac9ae33082dbb
|
92719b80937aa4aaa47865285e390c015012b5a4
|
/man/get.basis.Rd
|
346e4538157439f7c6413fe80bba8976fcba5c3d
|
[] |
no_license
|
cran/lpSolveAPI
|
a3702d143aa3c9027e09ab4db49ba6917f9cf96c
|
0ebfc5cabed2946fe5b2b3dfc31c7d5b793335b1
|
refs/heads/master
| 2022-11-06T04:22:42.063480
| 2022-10-20T16:12:46
| 2022-10-20T16:12:46
| 17,697,189
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 826
|
rd
|
get.basis.Rd
|
\name{get.basis}
\alias{get.basis}
\title{Get Basis}
\description{
Retrieve the basis from a solved lpSolve linear program model object.
}
\usage{
get.basis(lprec, nonbasic = FALSE)
}
\arguments{
\item{lprec}{an lpSolve linear program model object.}
\item{nonbasic}{a logical value. If \code{TRUE}, the nonbasic variables are returned as well.}
}
\value{
an integer vector containing the indices of the basic (and nonbasic if requested) variables. If an error occurs (for instance when calling \code{get.basis} on a model that has not yet been solved) a \code{NULL} value is returned.
}
\references{\url{https://lpsolve.sourceforge.net/5.5/index.htm}}
\author{Kjell Konis \email{kjell.konis@me.com}}
\examples{
lps.model <- make.lp(3, 3)
## build and solve model ##
get.basis(lps.model)
}
\keyword{programming}
|
c621be6b9f55a6be83bc640c0c700622fc9c4e82
|
3593fdf70b57effc2abff5004209220aac2c7f41
|
/R/Stats.R
|
0e9cd5d11991628ab76fa13b6407caf1ff48eb36
|
[] |
no_license
|
ShunHasegawa/WTC_IEM
|
dcc00054709c59acf226044c5aa3ddcc09b6da16
|
3ffb6c0f306ac366e61d6a2e5de02c26da30501d
|
refs/heads/master
| 2016-09-06T10:46:05.650853
| 2015-08-31T21:06:30
| 2015-08-31T21:06:30
| 20,788,741
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,489
|
r
|
Stats.R
|
#####################################
# merge soil moisture and temp data #
#####################################
load("Data/WTC_soilMoistTemp_Chamber_DailySummary.RData")
# restructure
names(soilChmSmry)[4] <- "probe"
SoilChMlt <- melt(soilChmSmry, id = c("Date", "Chamber", "temp", "probe"))
SoilCh <- cast(SoilChMlt, Date + Chamber + temp ~ probe + variable)
# chamber mean for IEM
plot(iem$nh)
# use iemExOl where outlier of nh is removed
# Add NP ratio
iemExOl$NP <- with(iemExOl, (no + nh)/po)
# remove Inf
iemExOl$NP[is.infinite(iemExOl$NP)] <- NA
IEM_ChMean <- ddply(iemExOl, .(Time, insertion, sampling, date, Chamber, temp),
function(x) {
d1 <- colMeans(x[,c("no", "nh", "po", "NP")], na.rm = TRUE)
d2 <- with(x, gm_mean(NP, na.rm = TRUE))
return(c(d1, gmNP = d2))
})
ddply(iemExOl, .(Time, insertion, sampling, date, Chamber, temp), summarise, N=sum(!is.na(nh)))
names(IEM_ChMean)[4] <- "Date"
# mean of soil vars during incubation period
head(SoilCh)
SoilIncSampMean <- function(insertion, sampling, Chm, data = SoilCh){
a <- subset(data, Date >= insertion & Date <= sampling & Chamber == Chm)
vars <- names(a)[which(!names(a) %in% c("Date", "Chamber", "temp"))]
b <- ddply(a, .(Chamber), function(x) colMeans(x[, vars], na.rm = TRUE))
return(cbind(insertion, sampling, b))
}
IEM_DF <- ddply(IEM_ChMean, .(Time, Date, insertion, sampling, Chamber, temp, no, nh, po, NP, gmNP),
function(x) SoilIncSampMean(insertion= x$insertion, sampling= x$sampling, Chm = x$Chamber))
IEM_DF$moist <- IEM_DF$SoilVW_5_25_Mean
p <- ggplot(SoilCh, aes(x = Date, y = SoilVW_5_25_Mean))
p2 <- p +
geom_line() +
geom_point(data = IEM_DF, aes(x = Date, y = SoilVW_5_25_Mean),
col = "red", size = 2)+
facet_wrap( ~ Chamber)+
geom_vline(xintercept = as.numeric(unique(IEM_DF$insertion)), linetype = "dashed") +
geom_vline(xintercept = as.numeric(unique(IEM_DF$sampling)), linetype = "dashed")
p2
# good
###########
# Nitrate #
###########
source("R/Stats_NO.R")
############
# Ammonium #
############
source("R/Stats_NH.R")
#############
# Phosphate #
#############
source("R/Stats_P.R")
############
# NP Ratio #
############
source("R/Stats_NPRatio.R")
########################
## Result of contrast ##
########################
ContrastDF <- rbind(WTC_IEM_Nitrate_CntrstDf, WTC_IEM_Ammonium_CntrstDf)
save(ContrastDF, file = "output//data/WTC_IEM_ContrastDF.RData")
#################
## Temp x Time ##
#################
# create stat summary table for LMM with Temp and time
TempTimeStatList <- list(no = AnvF_no, nh = AnvF_nh, po = AnvF_po)
Stat_TempTime <- ldply(names(TempTimeStatList),
function(x) StatTable(TempTimeStatList[[x]], variable = x))
save(Stat_TempTime, file = "output//data/TempTime_Stat.RData")
############
## ANCOVA ##
############
# change row names
AnvF_ancv_NP2 <- AnvF_ancv_NP
row.names(AnvF_ancv_NP2) <- c("temp", "moist", "Temp5_Mean", "temp:moist")
AncvLst <- list('no' = AnvF_ancv_no,
'nh' = AnvF_ancv_nh,
'po' = AnvF_ancv_po,
'np' = AnvF_ancv_NP2)
AncvRes <- AncvSmmryTbl(AncvRes = AncvLst, predictor = row.names(Anova(Iml_ancv_no)))
write.csv(AncvRes, file = "Output/Table/SummaryANCOVA.csv", row.names = FALSE)
|
3577de30015ae35184bae57c8629e968eabecf9f
|
a5170d90be2827eb50b62fa9ee31a4515f914616
|
/Assignment 4/src/Question2.R
|
f0110c9a11b205325c5278533711fade2cbda22e
|
[] |
no_license
|
ayeshabhimdi/Machine-Learning
|
c2aa4de5b58fcd53e79c3ca6fb4f8f93d18f6b67
|
bfa7c6ea693d9d0470bb1ea5e6ca22081464aa93
|
refs/heads/master
| 2021-10-11T06:29:12.971455
| 2016-07-01T15:10:34
| 2016-07-01T15:10:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,182
|
r
|
Question2.R
|
data=read.csv('C:/ML/Assignment 3/a3barebones/susysubset.csv')
mydata<- data[sample(1:nrow(data),2700,replace = FALSE),]
# Split Data set
sub <- sample(nrow(mydata),floor(nrow(mydata) * 0.75))
# Spliting Training and Test Data in ratio 3:1
training <- mydata[sub, ]
testing <- mydata[-sub, ]
# X train and Y train
xtrain<- training[,-9]
ytrain<- training[,9]
# X test and Y test
xtest<- testing[,-9]
predictions<- testing[,9]
# Adding Colummn of 1's to X test and X train
xtrain$newcol<-rep(1,nrow(xtrain))
xtest$newcol<-rep(1,nrow(xtest))
# Initialize weights as Null
weight=c()
regwt=0.01
# Replace 0 by -1
yt=ytrain
yt[yt=='0']=-1
c<- xtrain[sample(1:nrow(xtrain),40,replace=FALSE),]
k=c()
ct=t(c)
k=as.matrix(xtrain)%*%as.matrix(ct)
numsamples=ncol(k)
kt=t(k)
term1=kt%*%k/numsamples
term2=regwt*diag(40)
term3= term1+term2
term4=solve(term3)
term5=kt%*%yt
weight=term4%*%term5
Ktest=as.matrix(xtest)%*%as.matrix(ct)
ytest = as.matrix(Ktest)%*%as.matrix(weight)
ytest[ytest>0]=1
ytest[ytest<0]=0
correct=0
for(i in 1:nrow(ytest)){
if(predictions[i]==ytest[i]){
correct = correct + 1
}
}
acc = correct/length(ytest)*100
|
380d3389aa7cb3084bff946d85199034a4ada14d
|
c8a3e165e3c142337578e818947ca1da1262fb3f
|
/Churn.r
|
f68aab0f7f49bb46a69c19173df52d861aa3b060
|
[] |
no_license
|
AdityaKanungo/Customer-Churn-Reduction
|
d1333723e1d0dcddb0ea10301b05a488d7480612
|
f3fa23e9c0584da8ec93e12fc661f64e9038dde1
|
refs/heads/master
| 2020-03-18T20:54:52.539019
| 2018-05-29T07:27:15
| 2018-05-29T07:27:15
| 135,246,869
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,412
|
r
|
Churn.r
|
rm(list=ls())
setwd("C:/Users/BATMAN/Desktop/1st project working copy")
getwd()
#------------------------------
library(corrplot)
library(DMwR)
library(e1071)
library(caret)
library(class)
library(C50)
#------------------------------
# Load train data
train_df = read.csv("C:/Users/BATMAN/Desktop/1st project working copy/Train_data.csv")
test_df = read.csv("C:/Users/BATMAN/Desktop/1st project working copy/Test_data.csv")
final_submission = test_df
#------------------------------
#remove un-wanted variables
train_df <- subset(train_df, select = -c(state,area.code,phone.number))
test_df <- subset(test_df, select = -c(state,area.code,phone.number))
#------------------------------
#Replacing yes and no with 1 and 0/ true false with 1 and 0
#train
train_df$international.plan = as.numeric(as.factor(train_df$international.plan)==" yes",0,1)
train_df$voice.mail.plan = as.numeric(as.factor(train_df$voice.mail.plan)==" yes",0,1)
train_df$Churn = as.numeric(as.factor(train_df$Churn)==" True.",0,1)
#test
test_df$international.plan = as.numeric(as.factor(test_df$international.plan)==" yes",0,1)
test_df$voice.mail.plan = as.numeric(as.factor(test_df$voice.mail.plan)==" yes",0,1)
test_df$Churn = as.numeric(as.factor(test_df$Churn)==" True.",0,1)
#Converting into factor
train_df$Churn = as.factor(train_df$Churn)
train_df$international.plan = as.factor(train_df$international.plan)
train_df$voice.mail.plan = as.factor(train_df$voice.mail.plan)
test_df$Churn = as.factor(test_df$Churn)
test_df$international.plan = as.factor(test_df$international.plan)
test_df$voice.mail.plan = as.factor(test_df$voice.mail.plan)
#------------------------------
#Check missing vales
sum(is.na(train_df))
sum(is.na(test_df))
#------------------------------
## feature selection
#------------------------------
#correlation plot
w = train_df[,5:16]
x = cor(w)
corrplot(x, type = "upper", order = "hclust", tl.col = "black", tl.srt = 45)
#From the above heat-map we can infer the following:
#- total day minutes & total day charge are highly +vely correlated.
#- total eve minutes & total eve charge are highly +vely correlated.
#- total night minutes & total night charge are highly +vely correlated.
#- total intl minutes & total intl charge are highly +vely correlated.
#Therefore we will drop the total day charge, total eve charge, total night charge i.e variables carrying redundant information
train_df <- subset(train_df, select = -c(total.day.minutes,total.eve.minutes,total.night.minutes,total.intl.minutes))
test_df <- subset(test_df, select = -c(total.day.minutes,total.eve.minutes,total.night.minutes,total.intl.minutes))
#------------------------------
## Outlier analysis using boxplot
#------------------------------
numeric_index = sapply(train_df,is.numeric) #selecting only numeric
numeric_data = train_df[,numeric_index]
cnames = colnames(numeric_data)
#loop to plot box-plot for each variable
for(i in cnames)
{
boxplot(train_df[i])
}
#loop to remove all outliers
#train
for(i in cnames)
{
val = train_df[,i][train_df[,i] %in% boxplot.stats(train_df[,i])$out]
train_df[,i][train_df[,i] %in% val] = NA
}
train_df = knnImputation(train_df, k=3)
numeric_index = sapply(test_df,is.numeric) #selecting only numeric
numeric_data = test_df[,numeric_index]
numeric_data <- subset(numeric_data, select = -c(number.vmail.messages))
cnames = colnames(numeric_data)
#test
for(i in cnames)
{
val = test_df[,i][test_df[,i] %in% boxplot.stats(test_df[,i])$out]
test_df[,i][test_df[,i] %in% val] = NA
}
test_df = knnImputation(test_df, k=3)
#------------------------------
## Feature scaling
#------------------------------
#standardization
#train
numeric_index = sapply(train_df,is.numeric) #selecting only numeric
numeric_data = train_df[,numeric_index]
cnames = colnames(numeric_data)
for (i in cnames){
train_df[,i] = (train_df[,i] - mean(train_df[,i]))/sd(train_df[,i])
}
#test
numeric_index = sapply(test_df,is.numeric) #selecting only numeric
numeric_data = test_df[,numeric_index]
cnames = colnames(numeric_data)
for (i in cnames){
test_df[,i] = (test_df[,i] - mean(test_df[,i]))/sd(test_df[,i])
}
#---------------------------
## Naive Bayes
#---------------------------
NB_model = naiveBayes(Churn ~.,data=train_df)
NB_predictions = predict(NB_model, test_df[,-14],type = 'class')
conf_matrix_NB = table(test_df[,14],NB_predictions)
confusionMatrix(conf_matrix_NB)
sum(diag(conf_matrix_NB)/nrow(test_df))*100
#---------------------------
## Logistic regression
#---------------------------
logit = glm(Churn ~ ., train_df, family = 'binomial')
logit_predictions_prob = predict(logit, test_df[,-14], type = 'response')
logit_predictions = ifelse(logit_predictions_prob> 0.5, 1, 0)
ConfMatrix_logit = table(test_df[,14], logit_predictions)
confusionMatrix(ConfMatrix_logit)
sum(diag(ConfMatrix_logit)/nrow(test_df))*100
#---------------------------
## KNN
#---------------------------
knn_predictions = knn(train_df[,1:14],test_df[,1:14], train_df$Churn, k=3)
confMatrix_knn = table(knn_predictions, test_df$Churn)
confusionMatrix(confMatrix_knn)
sum(diag(confMatrix_knn)/nrow(test_df))*100
#------------------------------
#***********************************************************************************************
#As we have to calculate the churn score; therefore we will need an output in terms of probablity.
#Gaussian Naive Bayes and Logistic Regression gives probablity as output
#As accuracy of Logictic regression is higher as compared to Naive Bayes we will use Logistic Regression.
#************************************************************************************************
final_submission = subset(final_submission, select = -c(state,account.length,area.code,international.plan,
voice.mail.plan,number.vmail.messages,total.day.minutes,
total.day.calls,total.day.charge,total.eve.minutes,
total.eve.calls,total.eve.charge,total.night.minutes,total.night.calls,
total.night.charge,total.intl.minutes,total.intl.calls,total.intl.charge,
number.customer.service.calls,Churn))
final_submission$Churn.Score = logit_predictions_prob
write.csv(final_submission,'Churn_Score_r.csv',row.names=FALSE)
|
d2d8dd533ecf7e6b377241925768c2b0e2f03dbb
|
348e226ce8f69f44eb678234ce10bec52d0a4a66
|
/man/wine.Rd
|
316dede379cbffa207227bd896536741223e78c4
|
[] |
no_license
|
cran/datasetsICR
|
846765e801298cea340c64626f0c5ac5d7799344
|
3c7779f10d7ebc8e12cc6d7501bf26464381a0cb
|
refs/heads/master
| 2022-09-11T08:35:05.163141
| 2020-06-04T10:40:10
| 2020-06-04T10:40:10
| 269,666,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,319
|
rd
|
wine.Rd
|
\name{wine}
\alias{wine}
\docType{data}
\title{
wine dataset}
\description{
Chemical analysis of wines grown in the same region in Italy but derived from 3 different cultivars.
}
\usage{data(wine)}
\format{
A data.frame with 178 rows on 14 variables (including 1 classification variable).
}
\details{
The dataset includes 178 Italian wines characterized by 13 constituents (quantitative variables).
The dataset contains an additional variable, \code{Class}, distinguishing the wines in 3 groups according to the cultivar. The quantitative variables are \code{Class}, \code{Alcohol}, \code{Malic acid}, \code{Ash}, \code{Alcalinity of ash}, \code{Magnesium}, \code{Total phenols}, \code{Flavanoids}, \code{Nonflavanoid phenols}, \code{Proanthocyanins},
\code{Color intensity}, \code{Hue}, \code{OD280/OD315 of diluted wines} and \code{Proline}.
}
\source{
http://archive.ics.uci.edu/ml
}
\references{
Dua, D., Graff, C.: UCI Machine Learning Repository. University of California, School of Information and Computer Science, Irvine, CA (2019) \cr
Giordani, P., Ferraro, M.B., Martella, F.: An Introduction to Clustering with R. Springer, Singapore (2020)
}
\author{
Paolo Giordani, Maria Brigida Ferraro, Francesca Martella
}
\examples{
data(wine)
X <- wine[,-1]
class <- wine[,1]
}
\keyword{data}
\keyword{multivariate}
|
ece3c104ce2c22895b53b1b0a945c5c1f5efe2fa
|
018556e178f4aa3af1b4dacbcf9cb6be3142c162
|
/first_round.R
|
cb0fbbe6f5954ca7801de2fcbd4ac25b67592974
|
[] |
no_license
|
eugenern/spurs-prospects-stats
|
54a75376a6b7e712a5871a67b698e079ce13211a
|
1c280a58fcd5679da23a0954986385651e4ad0a6
|
refs/heads/master
| 2020-04-29T22:25:25.468880
| 2019-03-24T15:26:38
| 2019-03-24T15:26:38
| 176,447,518
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,330
|
r
|
first_round.R
|
library(magick)
fig <- image_graph(width = 1920, height = 1080, res = 96)
# incomplete stats; will need updating
lw.stats <-
c(21.8, 8.8, 20, 1.5, 4.3, 2.7, 3.4, 0.9, 3, 3.8, 2.3, 1.5, 0.6, 1.9, 2.5)
lw.shooting <- c(43.9, 35.8, 81.1, 50.8)
dw.stats <-
c(25.7,
9.2,
20.3,
2.6,
7.7,
4.7,
5.4,
1.6,
4.7,
6.3,
4.5,
1.7,
1.4,
3.1,
2.1)
dw.shooting <- c(45.3, 33.1, 87.3, 56.6)
dm.stats <-
c(17.6, 7, 15.9, 0.5, 3.1, 3.1, 4.4, 1.3, 6.8, 8.1, 6.4, 2, 0.1, 5.1, 2.9)
dm.shooting <- c(44.2, 15.6, 70.3, 49.4)
player.names <-
c(
'Lonnie Walker IV - G\nAge: 20 GP: 29 MPG: 27.3',
'Derrick White - G\nAge: 23 GP: 24 MPG: 28.2',
'Dejounte Murray - G\nAge: 20 GP: 15 MPG: 35.1'
)
stats.names <-
c(
'PTS',
'FGM',
'FGA',
'3PM',
'3PA',
'FTM',
'FTA',
'OREB',
'DREB',
'REB',
'AST',
'STL',
'BLK',
'TOV',
'PF'
)
shooting.names <- c('FG%', '3P%', 'FT%', 'TS%')
# i don't know what's the best way to create the barplot; this seems to work
stats.matrix <-
matrix(
rbind(lw.stats, dw.stats, dm.stats),
ncol = 15,
dimnames = list(player.names, stats.names)
)
shooting.matrix <-
matrix(
rbind(lw.shooting, dw.shooting, dm.shooting),
ncol = 4,
dimnames = list(player.names, shooting.names)
)
# empty matrix to provide space between stats and shooting
# values are max of all values across files to keep axes consistent
dummy.matrix <- matrix(c(26.2, 26.2, 26.2))
# before combining stats with shooting, downscale shooting values by 4
m <- cbind(stats.matrix, dummy.matrix, shooting.matrix / 4)
par(mar = c(4.1, 4.1, 4.1, 4.1))
bp <-
barplot(
m,
beside = TRUE,
col = ifelse(colnames(m)[col(m)] == '', 'white',
c('#00B2A9', '#EF426F', '#FF8200')),
legend = player.names,
args.legend = list(
x = 'top',
bty = 'n',
x.intersp = 7,
y.intersp = 3,
border = NA
),
main = paste0('Spurs First Round Draft Picks\n',
'G League Stats Per 36 Minutes ',
'In First Season With Austin'),
border = NA,
space = c(0, 0.5)
)
# create the other y-axis
axis(side = 4,
at = (0:5 * 5),
labels = (0:5 * 20))
# add the exact values on the plot
actual.m <- cbind(stats.matrix, dummy.matrix, shooting.matrix)
text(
x = bp,
y = m,
labels = actual.m,
pos = 3,
cex = 0.7#, xpd = NA
)
dev.off()
lw <- image_read('https://ak-static.cms.nba.com/wp-content/uploads/headshots/nba/latest/260x190/1629022.png')
lw <- image_scale(lw, 'x65')
dw <- image_read('https://ak-static.cms.nba.com/wp-content/uploads/headshots/nba/latest/260x190/1628401.png')
dw <- image_scale(dw, 'x65')
dm <- image_read('https://ak-static.cms.nba.com/wp-content/uploads/headshots/nba/latest/260x190/1627749.png')
dm <- image_scale(dm, 'x65')
out <- image_composite(fig, lw, offset = '+833+84')
out <- image_composite(out, dw, offset = '+831+157')
out <- image_composite(out, dm, offset = '+834+229')
image_write(out, path = 'first_round.png', format = 'png')
|
922ebce18958dacb6ecaf13d8a3d498b6a9eb3ff
|
0851f0cb3cd0d4ab242fb228aa1f6a1d7b0b0ac1
|
/R/lit_docker.R
|
ffdd7eb9c362d79019afa47ceb4ac15224633767
|
[] |
no_license
|
nbarsch/pineium
|
21e5eec45082dfae12a14a29b65814ef71121241
|
7946911dde2b838f080190c42cee9a449ee184b2
|
refs/heads/master
| 2022-08-12T19:24:51.738308
| 2022-07-08T09:27:22
| 2022-07-08T09:27:22
| 211,971,778
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,575
|
r
|
lit_docker.R
|
#' lit_docker()
#'
#' Primary selenium browser launcher
#' @param browser chrome or firefox
#' @param port port number
#' @param headless MUST BE =TRUE FOR USING DOCKER
#' @export
lit_docker <-function(port=4445,browser="chrome",headless=TRUE){
if(headless==FALSE){
print("NOTE: WHEN USING DOCKER YOU ARE REQUIRED TO RUN HEADLESS, OVERRIDING AND USING headless=TRUE")
Sys.sleep(2)
}
os <- tolower(Sys.info()[["sysname"]])
if(os!="windows"){
Sys.sleep(1)
port <- as.integer(port)
Sys.sleep(1)
system("sudo docker stop $(sudo docker ps -a -q)")
Sys.sleep(1)
system("sudo docker rm $(sudo docker ps -a -q)")
system(paste0("kill -9 $(lsof -t -i:",port," -sTCP:LISTEN)"))
system(paste0("kill -9 $(lsof -t -i:",port+1," -sTCP:LISTEN)"))
system(paste0("kill -9 $(lsof -t -i:",port-1," -sTCP:LISTEN)"))
Sys.sleep(1)
system("sudo docker stop $(sudo docker ps -a -q)")
Sys.sleep(1)
system("sudo docker rm $(sudo docker ps -a -q)")
Sys.sleep(1)
system(paste0('sudo docker pull selenium/standalone-',browser))
Sys.sleep(1)
system(paste0('sudo docker run -d -p ',port,":",port-1,' selenium/standalone-', browser))
Sys.sleep(6)
}
if(browser=="chrome"){
remDr <- remoteDriver(port=as.integer(port),browserName="chrome")
Sys.sleep(10)
remDr$open()
Sys.sleep(5)
}else{
remDr <- remoteDriver(port=as.integer(port),browserName="firefox")
Sys.sleep(10)
remDr$open()
Sys.sleep(5)
}
remDr$navigate("https://www.duckduckgo.com")
Sys.sleep(1)
return(remDr)
}
|
c8d288a27252cdf7dbda21dc8ceb4ca7f3975716
|
6329d08d30f8bff1ec7c9b75d59e32e708df047e
|
/tests/testthat.R
|
54960609006b567f0e62831f6be10810c300fe7a
|
[] |
no_license
|
vjcitn/pogos
|
44289491dc9ff7827541d2fab8fa217a63025bf2
|
b9541ec71ff8c8b012dedc8c9b3d1398e6380198
|
refs/heads/master
| 2023-02-13T01:22:59.602385
| 2023-02-05T20:37:19
| 2023-02-05T20:37:19
| 103,438,764
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(pogos)
test_check("pogos")
|
a75358b1c0b8415be5e7a5003adf1c676b528691
|
ab5089dfb654aa5dd230b93d46686506426c05af
|
/man/r2d3.Rd
|
a2a69267d24c1420da368699426bd1b115b78a2a
|
[
"BSD-3-Clause"
] |
permissive
|
rstudio/r2d3
|
a6c5cc3d4f5da06819c8ea41e4a559f8a60ec7b2
|
becfb81989c7fabfe79dee2dde999190025d4ba3
|
refs/heads/main
| 2023-08-21T20:27:09.447322
| 2021-11-18T21:31:50
| 2021-11-18T21:31:50
| 126,084,978
| 489
| 121
|
NOASSERTION
| 2022-02-25T17:45:41
| 2018-03-20T21:31:01
|
R
|
UTF-8
|
R
| false
| true
| 2,264
|
rd
|
r2d3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render.R
\name{r2d3}
\alias{r2d3}
\title{D3 visualization}
\usage{
r2d3(
data,
script,
css = "auto",
dependencies = NULL,
options = NULL,
d3_version = c("6", "5", "4", "3"),
container = "svg",
elementId = NULL,
width = NULL,
height = NULL,
sizing = default_sizing(),
viewer = c("internal", "external", "browser")
)
}
\arguments{
\item{data}{Data to be passed to D3 script.}
\item{script}{JavaScript file containing the D3 script.}
\item{css}{CSS file containing styles. The default value "auto" will use any CSS file
located alongside the script file with the same stem (e.g. "barplot.css" would be
used for "barplot.js") as well as any CSS file with the name "styles.css".}
\item{dependencies}{Additional HTML dependencies. These can take the form of paths to
JavaScript or CSS files, or alternatively can be fully specified dependencies created
with \link[htmltools:htmlDependency]{htmltools::htmlDependency}.}
\item{options}{Options to be passed to D3 script.}
\item{d3_version}{Major D3 version to use, the latest minor version is automatically
picked.}
\item{container}{The 'HTML' container of the D3 output.}
\item{elementId}{Use an explicit element ID for the widget (rather than an
automatically generated one). Useful if you have other JavaScript that needs to
explicitly discover and interact with a specific widget instance.}
\item{width}{Desired width for output widget.}
\item{height}{Desired height for output widget.}
\item{sizing}{Widget sizing policy (see \link[htmlwidgets:sizingPolicy]{htmlwidgets::sizingPolicy}).}
\item{viewer}{"internal" to use the RStudio internal viewer pane for output; "external"
to display in an external RStudio window; "browser" to display in an external
browser.}
}
\description{
Visualize data using a custom D3 visualization script
}
\details{
In order to scope CSS styles when multiple widgets are rendered, the Shadow DOM and
the wecomponents polyfill is used, this feature can be turned off by setting the
\code{r2d3.shadow} option to \code{FALSE}.
}
\examples{
library(r2d3)
r2d3(
data = c (0.3, 0.6, 0.8, 0.95, 0.40, 0.20),
script = system.file("examples/barchart.js", package = "r2d3")
)
}
|
c283c03168e766c3b7e9fe5ca9a8cc7edf0e0393
|
84b0b8e4ad2fd017ea7e14b5689fa32662140345
|
/example_installPackage.R
|
7734e2d551d1a6648bca5922237e9010cae45b77
|
[] |
no_license
|
gddickinson/R_code
|
23e2b0713f942370f7c7cfc32a88a75626302a93
|
8393a94ee37eacd013118743a70c7b506c66d235
|
refs/heads/master
| 2021-11-25T22:07:21.861994
| 2021-11-23T03:07:15
| 2021-11-23T03:07:15
| 56,209,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
example_installPackage.R
|
install.packages("rafalib")
library(rafalib)
install.packages("swirl")
library(swirl)
swirl()
|
096e36e6fe7521fc0ab70e2388c8b2d20071ad79
|
6e96ceacd5a6d4f66fc982f512a527732d1d3f38
|
/R/DBI-object.R
|
1347364251e784652b97e1e1e608fcf6d709eec2
|
[] |
no_license
|
carlganz/pool
|
334ea11eee9043688cc6ec7e26d1dd88fdc38c28
|
3073b629ddd5cb35561134d34ffd5f57a2f314fe
|
refs/heads/master
| 2021-01-02T23:02:28.348880
| 2017-07-04T15:10:16
| 2017-07-04T15:10:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,101
|
r
|
DBI-object.R
|
#' @include DBI.R
NULL
#' DBIObject methods.
#'
#' Pool object wrappers around DBIObject methods. See
#' \code{\link[DBI]{dbDataType}}, \code{\link[DBI]{dbGetInfo}}
#' and \code{\link[DBI]{dbIsValid}} for the original
#' documentation.
#'
#' @name DBI-object
NULL
#' @param dbObj,obj,... See \code{\link[DBI]{dbDataType}}.
#' @export
#' @rdname DBI-object
setMethod("dbDataType", "Pool", function(dbObj, obj, ...) {
connection <- poolCheckout(dbObj)
on.exit(poolReturn(connection))
DBI::dbDataType(connection, obj, ...)
})
#' @export
#' @rdname DBI-object
setMethod("dbGetInfo", "Pool", function(dbObj, ...) {
pooledObj <- poolCheckout(dbObj)
on.exit(poolReturn(pooledObj))
list(class = is(dbObj),
valid = dbObj$valid,
minSize = dbObj$minSize,
maxSize = dbObj$maxSize,
idleTimeout = dbObj$idleTimeout,
pooledObjectClass = is(pooledObj)[1],
numberFreeObjects = dbObj$counters$free,
numberTakenObjects = dbObj$counters$taken)
})
#' @export
#' @rdname DBI-object
setMethod("dbIsValid", "Pool", function(dbObj, obj, ...) {
dbObj$valid
})
|
cb18a2471c64cfcad869c2947fdbbd94b5eb6755
|
403f786c7c85fa551326d1e077bc895fea26e7c9
|
/man/py_eval.Rd
|
f52e49710c509dc9d221bb4d40b12ab1ab118d41
|
[
"Apache-2.0"
] |
permissive
|
rstudio/reticulate
|
81528f898d3a8938433d2d6723cedc22bab06ecb
|
083552cefe51fe61441679870349b6c757d6ab48
|
refs/heads/main
| 2023-08-22T01:41:52.850907
| 2023-08-21T16:19:42
| 2023-08-21T16:19:42
| 81,120,794
| 1,672
| 399
|
Apache-2.0
| 2023-09-13T20:35:47
| 2017-02-06T18:59:46
|
R
|
UTF-8
|
R
| false
| true
| 1,348
|
rd
|
py_eval.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/python.R
\name{py_eval}
\alias{py_eval}
\title{Evaluate a Python Expression}
\usage{
py_eval(code, convert = TRUE)
}
\arguments{
\item{code}{A single Python expression.}
\item{convert}{Boolean; automatically convert Python objects to R?}
}
\value{
The result produced by evaluating \code{code}, converted to an \code{R}
object when \code{convert} is set to \code{TRUE}.
}
\description{
Evaluate a single Python expression, in a way analogous to the Python
\code{eval()} built-in function.
}
\section{Caveats}{
\code{py_eval()} only supports evaluation of 'simple' Python expressions.
Other expressions (e.g. assignments) will fail; e.g.
\if{html}{\out{<div class="sourceCode">}}\preformatted{> py_eval("x = 1")
Error in py_eval_impl(code, convert) :
SyntaxError: invalid syntax (reticulate_eval, line 1)
}\if{html}{\out{</div>}}
and this mirrors what one would see in a regular Python interpreter:
\if{html}{\out{<div class="sourceCode">}}\preformatted{>>> eval("x = 1")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<string>", line 1
x = 1
^
SyntaxError: invalid syntax
}\if{html}{\out{</div>}}
The \code{\link[=py_run_string]{py_run_string()}} method can be used if the evaluation of arbitrary
Python code is required.
}
|
aaefcc7d04a65352a90d6cfa715ae8cb196cde2e
|
13be69f45af55cb89ea2605fc9d72d5eb1985c94
|
/R/util.r
|
54f695fe82fed7da7c1dc7c66af9afa4aa890aeb
|
[] |
no_license
|
AdamWongCH/corpustools
|
2e63ee8c650600f806d52fa4f62b51cdae44e226
|
971606c053aa852f9a41ae771f308d7d3b2e00d8
|
refs/heads/master
| 2020-08-29T03:22:17.642090
| 2017-05-22T06:50:13
| 2017-05-22T06:50:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,125
|
r
|
util.r
|
verbose_counter <- function(n, i=0, ticks=10){
function() {
i <<- i + 1
if (i %% ticks == 0) message(cat(i, ' / ', n, '\n'))
}
}
verbose_sum_counter <- function(n, i=0){
function(add) {
i <<- i + add
message(cat(i, ' / ', n, '\n'))
}
}
fast_dummy_factor <- function(x) { ## if , still return a factor for consistency, but no need to match stuff
x = as.integer(x)
nlevels = length(stats::na.omit(unique(x)))
attr(x, 'levels') = if (nlevels > 0) as.character(1:nlevels) else character()
class(x) <- 'factor'
x
}
fast_factor <- function(x, levels=NULL) {
if (!methods::is(x, 'factor')) {
if (!all(is.na(x))) {
if (is.null(levels)) levels = vector(class(x), 0)
x = .Call('corpustools_fast_factor', PACKAGE = 'corpustools', as.character(x), levels)
} else {
x = fast_dummy_factor(x)
}
} else {
if (length(levels) > 0) levels(x) = levels
}
x
}
col_to_hsv <- function(col, alpha=1) {
## make mapped to enable vectorization
hsv_col = grDevices::rgb2hsv(grDevices::col2rgb('red'))
grDevices::hsv(hsv_col[1], hsv_col[2], hsv_col[3], alpha=alpha)
}
|
02cc2d349b3ec7c3fcc74019a8468e59b19ee737
|
0f9fa909a1a2175302f2c8eb405482791145ee74
|
/man/export.Rd
|
a8206690759ce455dad28df98a1e0590db214a2f
|
[] |
no_license
|
jasenfinch/metaboMisc
|
21942aac4a41043b35bfe36cb26f6d79031fc9a6
|
36d6630c151e29fadb687a77f5b946c80293029c
|
refs/heads/master
| 2023-08-08T13:19:36.745124
| 2023-07-21T16:49:37
| 2023-07-21T16:49:37
| 144,775,967
| 0
| 0
| null | 2023-07-21T16:49:38
| 2018-08-14T21:56:31
|
R
|
UTF-8
|
R
| false
| true
| 5,819
|
rd
|
export.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export.R
\name{exportData}
\alias{exportData}
\alias{exportData,Binalysis-method}
\alias{exportData,MetaboProfile-method}
\alias{exportData,AnalysisData-method}
\alias{exportData,Analysis-method}
\alias{exportData,Assignment-method}
\alias{exportSampleInfo}
\alias{exportSampleInfo,Binalysis-method}
\alias{exportSampleInfo,MetaboProfile-method}
\alias{exportSampleInfo,AnalysisData-method}
\alias{exportSampleInfo,Analysis-method}
\alias{exportAccurateData}
\alias{exportAccurateData,Binalysis-method}
\alias{exportPeakInfo}
\alias{exportPeakInfo,MetaboProfile-method}
\alias{exportModellingMetrics}
\alias{exportModellingMetrics,Analysis-method}
\alias{exportModellingImportance}
\alias{exportModellingImportance,Analysis-method}
\alias{exportModelling}
\alias{exportModelling,Analysis-method}
\alias{exportCorrelations}
\alias{exportCorrelations,Analysis-method}
\alias{exportAssignments}
\alias{exportAssignments,Assignment-method}
\alias{exportSummarisedAssignments}
\alias{exportSummarisedAssignments,Assignment-method}
\alias{exportConstruction}
\alias{exportConstruction,Construction-method}
\alias{exportSummarisedConstruction}
\alias{exportSummarisedConstruction,Construction-method}
\alias{export}
\alias{export,Binalysis-method}
\alias{export,MetaboProfile-method}
\alias{export,AnalysisData-method}
\alias{export,Analysis-method}
\alias{export,Assignment-method}
\alias{export,Construction-method}
\title{Export results}
\usage{
exportData(x, outPath = ".", ...)
\S4method{exportData}{Binalysis}(x, outPath = ".")
\S4method{exportData}{MetaboProfile}(x, outPath = ".")
\S4method{exportData}{AnalysisData}(x, outPath = ".", idx = "name", prefix = "analysis")
\S4method{exportData}{Analysis}(x, outPath = ".", type = "raw", idx = "name")
\S4method{exportData}{Assignment}(x, outPath = ".")
exportSampleInfo(x, outPath = ".", ...)
\S4method{exportSampleInfo}{Binalysis}(x, outPath = ".")
\S4method{exportSampleInfo}{MetaboProfile}(x, outPath = ".")
\S4method{exportSampleInfo}{AnalysisData}(x, outPath = ".", prefix = "analysis")
\S4method{exportSampleInfo}{Analysis}(x, outPath = ".", type = "raw")
exportAccurateData(x, outPath = ".")
\S4method{exportAccurateData}{Binalysis}(x, outPath = ".")
exportPeakInfo(x, outPath = ".")
\S4method{exportPeakInfo}{MetaboProfile}(x, outPath = ".")
exportModellingMetrics(x, outPath = ".")
\S4method{exportModellingMetrics}{Analysis}(x, outPath = ".")
exportModellingImportance(x, outPath = ".")
\S4method{exportModellingImportance}{Analysis}(x, outPath = ".")
exportModelling(x, outPath = ".")
\S4method{exportModelling}{Analysis}(x, outPath = ".")
exportCorrelations(x, outPath = ".")
\S4method{exportCorrelations}{Analysis}(x, outPath = ".")
exportAssignments(x, outPath = ".")
\S4method{exportAssignments}{Assignment}(x, outPath = ".")
exportSummarisedAssignments(x, outPath = ".")
\S4method{exportSummarisedAssignments}{Assignment}(x, outPath = ".")
exportConstruction(x, outPath = ".")
\S4method{exportConstruction}{Construction}(x, outPath = ".")
exportSummarisedConstruction(x, outPath = ".")
\S4method{exportSummarisedConstruction}{Construction}(x, outPath = ".")
export(x, outPath = ".", ...)
\S4method{export}{Binalysis}(x, outPath = ".")
\S4method{export}{MetaboProfile}(x, outPath = ".")
\S4method{export}{AnalysisData}(x, outPath = ".", idx = "name", prefix = "analysis")
\S4method{export}{Analysis}(x, outPath = ".", type = "raw", idx = "name")
\S4method{export}{Assignment}(x, outPath = ".")
\S4method{export}{Construction}(x, outPath = ".")
}
\arguments{
\item{x}{S4 object of class \code{Binalysis}, \code{MetaboProfile}, \code{Analysis} or \code{Assignment},}
\item{outPath}{directory path to export to.}
\item{...}{arguments to pass to relevant method}
\item{idx}{sample information column name to use as sample IDs}
\item{prefix}{file name prefix description}
\item{type}{data type to extract. \code{raw} or \code{pre-treated}}
}
\value{
A character vector of exported file paths.
}
\description{
Export data tables from \code{Binalysis},\code{MetaboProfile}, \code{Analysis} and \code{Assignment} classes.
}
\examples{
## Retrieve file paths and sample information for example data
files <- metaboData::filePaths('FIE-HRMS','BdistachyonEcotypes')[1:2]
info <- metaboData::runinfo('FIE-HRMS','BdistachyonEcotypes')[1:2,]
## Perform spectral binning
analysis <- binneR::binneRlyse(files,
info,
parameters = binneR::detectParameters(files))
## Export spectrally binned data
export(analysis,outPath = tempdir())
## Perform data pre-treatment and modelling
p <- metabolyseR::analysisParameters(c('pre-treatment','modelling'))
metabolyseR::parameters(p,'pre-treatment') <- metabolyseR::preTreatmentParameters(
list(occupancyFilter = 'maximum',
transform = 'TICnorm')
)
metabolyseR::parameters(p,'modelling') <- metabolyseR::modellingParameters('anova')
metabolyseR::changeParameter(p,'cls') <- 'day'
analysis <- metabolyseR::metabolyse(metaboData::abr1$neg[,1:200],
metaboData::abr1$fact,
p)
## Export pre-treated data and modelling results
export(analysis,outPath = tempdir())
## Perform molecular formula assignment
future::plan(future::sequential)
p <- assignments::assignmentParameters('FIE-HRMS')
assignments <- assignments::assignMFs(assignments::feature_data,p)
## Export molecular formula assignment results
export(assignments,outPath = tempdir())
## Perform consensus structural classification
structural_classifications <- construction::construction(assignments)
## Export consensus structural classification results
export(structural_classifications,outPath = tempdir())
}
|
4d68648122c1f60a8307b380ab6b813d7b8002d8
|
e09d229dd1ad18879fb051e4cb7d97c1475f49aa
|
/man/trackr_timepoint.Rd
|
752c05b6def6a4ff4fdda576c97fb327a51ddc19
|
[
"MIT"
] |
permissive
|
hamishgibbs/rtrackr
|
15bc922c8f8dfb765ee5b5da80df66b84eb16b16
|
2a353b73f8507e96c71c32c1ea557cfc04f9c0b2
|
refs/heads/master
| 2022-11-11T17:35:52.513669
| 2020-06-20T12:19:33
| 2020-06-20T12:19:33
| 271,510,902
| 1
| 0
|
NOASSERTION
| 2020-06-12T14:45:06
| 2020-06-11T09:54:51
|
R
|
UTF-8
|
R
| false
| true
| 943
|
rd
|
trackr_timepoint.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trackr_timepoint.R
\name{trackr_timepoint}
\alias{trackr_timepoint}
\title{trackr_timepoint}
\usage{
trackr_timepoint(
dataframe,
trackr_dir = NULL,
timepoint_message = NULL,
log_data = TRUE,
suppress_success = FALSE
)
}
\arguments{
\item{dataframe}{A data.frame, the data to be logged.}
\item{trackr_dir}{A string, path to store trackr log files.}
\item{timepoint_message}{A string (optional), a message to identify the timepoint - similar to a git commit message.}
\item{log_data}{A boolean (optional), output a full dataset log with each trackr file. Default is "TRUE"}
\item{suppress_success}{A boolean (optional), suppress success messages. Default is "FALSE".}
}
\value{
A data.frame with an updated trackr_id column. Trackr log and data log files are written into the trackr_dir.
}
\description{
Log a timepoint in the data processing chain.
}
|
5fb360e3947ea0a5bc68d46ccbf232752a1984b7
|
8dd89b265cfbb974f40a4b0a5727fe8f0ecc8e5b
|
/man/download_healthdata_dailyrevision.Rd
|
3a95ff85f487a845294b994ac4d409cb719e37f4
|
[] |
no_license
|
reichlab/covidData
|
aa5fbadd032d1bb937011fac84cb53bb87660d7d
|
c306b3f6a3f5f37922723661101540065d7fd0c0
|
refs/heads/master
| 2023-08-19T07:25:44.765179
| 2023-08-19T00:24:22
| 2023-08-19T00:24:22
| 277,713,248
| 9
| 10
| null | 2022-11-09T20:27:43
| 2020-07-07T04:06:58
|
R
|
UTF-8
|
R
| false
| true
| 788
|
rd
|
download_healthdata_dailyrevision.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/healthdata_download.R
\name{download_healthdata_dailyrevision}
\alias{download_healthdata_dailyrevision}
\title{Download daily revision data at a specific issue date}
\usage{
download_healthdata_dailyrevision(issue_date, healthdata_dailyrevision_history)
}
\arguments{
\item{issue_date}{character issue date (i.e. report date) in format 'yyyy-mm-dd'}
\item{healthdata_dailyrevision_history}{a data.frame with hosp time series data update history}
}
\value{
data.frame with columns state, date,
previous_day_admission_adult_covid_confirmed,
previous_day_admission_pediatric_covid_confirmed,
and previous_day_admission_influenza_confirmed
}
\description{
Download daily revision data at a specific issue date
}
|
e2184f92117c51049abde38aae229d24490f75fd
|
0a23144af0f50b7039909476279c3e95aff27f32
|
/library/gpboost/function/interpret/gpb.plot.importance.R
|
9d3b72ed0f60e3900c18998402fd89e66005abdc
|
[] |
no_license
|
delta0726/r-hierarchical_model
|
5d9fed43df7a1e214c7c9277f13759620ae0d5fc
|
050d813fa1d1dc2a35929cd162c8da896280ca9e
|
refs/heads/master
| 2023-04-11T07:09:26.989051
| 2021-04-07T14:33:22
| 2021-04-07T14:33:22
| 349,565,116
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,123
|
r
|
gpb.plot.importance.R
|
# ***********************************************************************************************
# Function : gpb.plot.importance
# Objective : 計算された特徴の重要度を棒グラフとしてプロット
# Created by: Owner
# Created on: 2021/03/28
# URL : https://www.rdocumentation.org/packages/gpboost/versions/0.5.0/topics/gpb.plot.importance
# ***********************************************************************************************
# <概要>
# - 計算された特徴の重要度(ゲイン/カバー/頻度)を棒グラフとしてプロット
# <構文>
# gpb.plot.importance(tree_imp, top_n = 10L, measure = "Gain",
# left_margin = 10L, cex = NULL)
# <引数>
# - tree_imp :gpb.importance()から出力されたdata.tableオブジェクト
# - top_n :出力する上位N個の特徴量
# - measure :出力項目を指定(Grain/Cover/Frequency)
# - left_margin :
# - cex :
# <目次>
# 0 準備
# 1 データ確認
# 2 モデル構築
# 3 変数重要度分析
# 0 準備 -------------------------------------------------------------------------
# ライブラリ
library(tidyverse)
library(gpboost)
# データロード
data(agaricus.train, package = "gpboost")
# 1 データ確認 -------------------------------------------------------------------
# データ
train <- agaricus.train
dtrain <- train$data %>% gpb.Dataset(label = train$label)
# 2 モデル構築 -------------------------------------------------------------------
# パラメータ設定
params <-
list(objective = "binary",
learning_rate = 0.1,
min_data_in_leaf = 1L,
min_sum_hessian_in_leaf = 1.0)
# モデル構築
model <-
gpb.train(params = params,
data = dtrain,
nrounds = 5L)
# 3 変数重要度分析 -----------------------------------------------------------------
# 変数重要度の計算
tree_imp <- model %>% gpb.importance(percentage = TRUE)
# 確認
tree_imp %>% print()
tree_imp %>% class()
# プロット作成
tree_imp %>%
gpb.plot.importance(top_n = 5L, measure = "Gain")
|
0b1dda128b022922340ead5490f887db14d6953b
|
36c06c757ad713d2ae124d64c9372e7d6a5c4a42
|
/man/gradient.Rd
|
6a3b8079e83f00e402ca0a50ea5fdff64c4b169a
|
[] |
no_license
|
torekleppe/RAutoDiff
|
45044cef10a0fb953e4cfe86b70a7fc1f8d50728
|
28568fb1132cd7449d906adcd9fc1c9379a0c5b9
|
refs/heads/master
| 2023-04-15T13:36:35.871089
| 2021-04-23T06:44:54
| 2021-04-23T06:44:54
| 351,431,449
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 683
|
rd
|
gradient.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rad.R
\name{gradient}
\alias{gradient}
\title{Get gradient from scalar AD type}
\usage{
gradient(y)
}
\arguments{
\item{x}{An ADtype or AD_matrix type (overloaded also for numeric and matrix types)}
}
\value{
The gradient of the (scalar) AD type
}
\description{
Get gradient from scalar AD type
}
\examples{
# define some function
f <- function(x){
M <- matrix(x,2,2)
return(sum(solve(M,c(-1,2))))
}
# run function with regular variables
print(f(c(1,-1,2,4)))
# run function with second order AD-variables
x <- independent2(c(1,-1,2,4))
y <- f(x)
print(value(y))
print(gradient(y))
print(hessian(y))
}
|
19512efae792bfba62d7781f3258c7f94ef50287
|
9ad4b4acb8bd2b54fd7b82526df75c595bc614f7
|
/misc/PB T Cell Proportion.R
|
1c8536ad1bdc1b4dba42b880a5b0bb436fad569a
|
[] |
no_license
|
sylvia-science/Ghobrial_EloRD
|
f27d2ff20bb5bbb90aa6c3a1d789c625540fbc42
|
041da78479433ab73335b09ed69bfdf6982e7acc
|
refs/heads/master
| 2023-03-31T14:46:27.999296
| 2021-04-02T15:09:49
| 2021-04-02T15:09:49
| 301,811,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,285
|
r
|
PB T Cell Proportion.R
|
# Get mean num of PB T cells and mean proportion
# Remove junk cells and take only baseline PBMCs
data_harmony_run_label_remove = data_harmony_run_label[,data_harmony_run_label$`Sample Type` == 'PBMC']
data_harmony_run_label_remove = data_harmony_run_label_remove[,!(Idents(data_harmony_run_label_remove) %in% c('Remove', '13','30'))]
data_harmony_run_label_remove = data_harmony_run_label_remove[,data_harmony_run_label_remove$Treatment == 'baseline']
Idents(data_harmony_run_label_remove) = as.character(Idents(data_harmony_run_label_remove))
# Add CD8 T cells to CD4 T cells
idents = as.character(Idents(data_harmony_run_label_remove))
idents[idents == 'CD8+ T-cell'] = 'T-cell'
Idents(data_harmony_run_label_remove) = idents
#plot = DimPlot(data_harmony_run_label_remove,pt.size = 0.7, reduction = "umap",label = TRUE,label.size = 8)
#print(plot)
num_Cells = table(Idents(data_harmony_run_label_remove), data_harmony_run_label_remove$sample)
write.csv(as.data.frame.matrix(num_Cells), '/disk2/Projects/EloRD/Output/Harmony/AllSamples_PBMC/Batch_Sample_Kit/Cluster/PCA40/res3/Data/num_Cells.csv')
num_Cells
mean_num_T_Cell = mean(num_Cells['T-cell',])
proportions = num_Cells['T-cell',]/colSums(num_Cells)
colSums(num_Cells)
mean_proportion_T_Cell = mean(proportions)
|
c0d3bbadd7896ee86305dd5c8443faf666b25840
|
9ee9957c4aa96ec14f64009fa3cb81ce21739e9b
|
/R/functions/import.data.R
|
d945c9e367c497276187d7c0c66772fc6c089b4e
|
[] |
no_license
|
andrebrujah/ElsaPredictiveModeling
|
5461c24433ffaecfbd56a6d39c41776b9d2dd2e1
|
e1bb5fb9cc95959de372a9a0975832d09f961d77
|
refs/heads/master
| 2021-01-17T06:50:29.955937
| 2016-08-01T03:41:03
| 2016-08-01T03:41:03
| 50,808,885
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,496
|
r
|
import.data.R
|
############################################################################
####################### functions/import.data.R ############################
#
# Funções para importação dos dados. A definição de quais variáveis são
# categóricas ou numéricas é feita através de arquivos externos contendo os
# nomes das variáveis. O nome do arquivo é passado como parâmetro.
# Também é feita uma limpeza no conjunto de dados. Definições de variáveis a
# remover ou à selecionar, se for o caso, são feitas através de arquivos de
# texto externos, também.
#
############################################################################
## Importacao dos dados e selecao/limpeza baseados nos arquivos com nomes das variaveis
import.data <- function(dataset.file = "data/dataset.csv", save.output = FALSE, output.filename = "output/dataset",
verbose = TRUE, remove.women.vars = TRUE, foldertoremove = "data/var/vars_to_remove/",
factorial.filename = "data/var/categoricas.txt", orderedfactor.filename = "data/var/categoricas_ordenadas.txt",
numeric.filename = "data/var/numericas.txt")
{
missing.codes <- c(" ","", ".D", ".PULO", "P", "B", "S", "Q", "R", "F", "N", "C", "A", "T", "I", "L", "D", "V", "U", "E");
# missing.codes = c();
dataset <- read.table(dataset.file, header=TRUE, sep = "$", strip.white = TRUE,
comment.char = "", quote = "", dec=".", allowEscapes = FALSE,
na.strings = missing.codes, fileEncoding = "windows-1252");
# coloca nome das variaveis em minusculo para facilitar futuras buscas
names(dataset) <- tolower(names(dataset));
# remove casos que nao possuem informacao de classe
rows_na <- which(is.na(dataset$a_dm));
if (length(rows_na) > 0)
{
dataset <- dataset[-rows_na,];
}
# inverte valor 0 por 1 e viceversa
# pois alguns algoritmos pressupoe que o valor positivo da classe alvo seja 0 (e originalmente é 1)
rows_zero <- which(dataset$a_dm == 0);
dataset$a_dm[rows_zero] <- 1;
dataset$a_dm[-rows_zero] <- 0;
### Tratamento variáveis mulher
if (remove.women.vars)
{
# seleciona variaveis com respeito a mulher (mula)
mula.vars <- grep("mula", names(dataset));
# a principio remove essas variaveis, depois pode-se pensar em fazer modelo separado para as mulheres e para os homens
if (length(mula.vars) > 0)
{
dataset <- dataset[,-mula.vars];
}
}
else
{
# uma observação da variável mula5a (Menopausa natural) está preenchida incorretamente
# Seria para preencher com 1 (sim) ou 0 (não), porém foi preenchida com a idade quando ocorreu o evento
index.obs <- which(dataset$mula5a == 56);
if (length(index.obs) > 0)
{
dataset$mula5a[index.obs] <- 1;
}
}
if (length(foldertoremove) > 0)
{
dataset <- remove_variables_in_folder(foldertoremove, dataset);
}
# definir categoricas/numericas/texto
### CATEGORICAS
factorial.nomes <- read.table(factorial.filename);
factorial.indices <- which(names(dataset) %in% factorial.nomes$V1);
# transforma variaveis que devem ser categoricas para factor
for (ind in factorial.indices)
{
# substitui valores M por valores negativos. A principio suponho que o valor negativo para todas variáveis é 0
rows.m <- which(dataset[,ind] == "M");
if (length(rows.m) > 0)
{
dataset[,ind] <- as.character(dataset[,ind]);
dataset[rows.m, ind] <- 0;
}
dataset[,ind] <- as.factor(dataset[,ind]);
}
# ORDERED FACTOR
if (file.exists(orderedfactor.filename))
{
orderedfactorial.nomes <- read.table(orderedfactor.filename);
orderedfactorial.indices <- which(names(dataset) %in% orderedfactorial.nomes$V1);
for (ind in orderedfactorial.indices)
{
dataset[,ind] <- as.ordered(dataset[,ind]);
}
}
### NUMERICAS
if (file.exists(numeric.filename))
{
numericas.nomes <- read.table(numeric.filename);
numericas.indices <- which(names(dataset) %in% numericas.nomes$V1);
for (ind in numericas.indices)
{
dataset[,ind] <- as.numeric(dataset[,ind]);
}
}
## checar se existe alguma variavel nos dois conjuntos
categoricas_em_numericas <- factorial.indices[which(factorial.indices %in% numericas.indices)];
if (length(categoricas_em_numericas) > 0)
{
var_names <- names(dataset[,categoricas_em_numericas]);
cat("\n\nERRO: Variaveis categoricas e numericas ao mesmo tempo: \n");
print(var_names);
}
if (save.output)
{
save(dataset, file=output.filename);
}
return(dataset);
}
subset.var.from.file <- function(dataset, var.filename = "AnaliseVariaveis/fatores_literatura/subconjuntos/sem_laboratoriais.txt", verbose = TRUE)
{
var.names <- read.table(var.filename);
var.names <- tolower(var.names$V1);
base.indices <- which(names(dataset) %in% var.names);
var_not_found <- var.names[-which(var.names %in% names(dataset))];
if(length(var_not_found) > 0 && verbose)
{
cat("\nAs seguintes variaveis nao foram encontradas: \n");
print(var_not_found);
}
ind_class <- which(names(dataset) == "a_dm")
if (length(base.indices) > 0)
{
dataset <- dataset[,c(base.indices, ind_class)];
}
else
{
if (verbose) cat("\nConjunto de dados nao modificado pois nenhuma variavel foi encontrada\n");
}
return(dataset)
}
|
86203bf603dec0c2ee1e0ab24cf20c9667257bac
|
c28e41f60c74442d9fd22c5067f427b6f3828f14
|
/funs/fun.grep.R
|
31fa36f234db2175198c4d34e7e81de79d9b6cfc
|
[
"MIT"
] |
permissive
|
elifesciences-publications/CNApp
|
e832496b9bb2653581a9bd3e78fc25e215618afb
|
2974581a508b2db5bae35dc76ba275ba14e570c6
|
refs/heads/master
| 2020-12-19T04:24:48.985593
| 2020-01-17T20:39:51
| 2020-01-17T20:39:51
| 235,619,834
| 0
| 0
| null | 2020-01-22T16:53:36
| 2020-01-22T16:53:35
| null |
UTF-8
|
R
| false
| false
| 330
|
r
|
fun.grep.R
|
fun_grep <- function(x, z){
# HINT: Use it as in 'apply' object-data
# 'x' is a numeric matrix
# 'y' is a vector of terms to be counted
e_vector <- rep(NA, length(z))
for (i in 1:length(z)){
term <- z[i]
n_term <- length(which(x==term))
e_vector[i] <- n_term
}
ans <- as.numeric(e_vector)
names(ans) <- z
ans
}
|
efaa51b88986825897c2609196847f7759bdf5f4
|
84af651242bc11422fed9a5b0e064f2bc4f8a4f5
|
/man/klmer.Rd
|
8ccb91fb023d509e211f4d35a275ad8004eccb74
|
[] |
no_license
|
YangLeeeee/RSPPlme4
|
f923061f8b9961c21caf36b0341223337c988c0c
|
810ac6fe01360d8715e1fc77edff1d711757bab5
|
refs/heads/master
| 2022-06-18T04:27:58.289732
| 2018-03-15T01:06:35
| 2018-03-15T01:06:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,302
|
rd
|
klmer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/klmer.R
\name{klmer}
\alias{klmer}
\title{function to fit mixed effects model to k functions at a set distance}
\usage{
klmer(formula, k, data, weights, na.action = "na.omit", ...)
}
\arguments{
\item{formula}{A one-sided formula describing the model to be fit, following}
\item{k}{A list of Kfunctions.}
\item{data}{The covariates.}
\item{weights}{The weights vector, probably generated by KfuncWeightsCalc.
the lme4 syntax. See \code{\link[lme4]{lmer}}}
\item{na.action}{How to deal with missing values.
See \code{\link[stats]{na.action}}}
\item{...}{Other arguments to \code{\link[lme4]{lmer}}}
}
\value{
An object of class kfunclmer.
}
\description{
function to fit mixed effects model to k functions at a set distance
}
\examples{
library(spatstat)
pppx <- replicate(50, rpoispp(runif(1, 10, 100)), simplify=FALSE)
r <- seq(0, 0.25, 0.05)
k <- lapply(pppx, Kest, r=r, correction='border')
k <- sapply(k, function(x) x[['border']][2])
weights <- lapply(pppx, kfuncWeightsCalc, r=r,
correction="border", type="nx_A")
weights <- sapply(weights, function(x) x[[2]])
dat <- data.frame(x=runif(50, 0, 10), gr = sample(1:10, 50, replace=TRUE))
formula = ~ x + (1|gr)
klmer(~x + (1|gr), k=k, data=dat, weights=weights)
}
|
f1dea7d04ac1a9da73469a35e6e1d516b7aa89d6
|
aeb8ac419da1d200d4f9a34aec30a895a98ee973
|
/03_R_Codes/08_NLP/02_Naive_Baye's_Classification/01_NaiveBayes.R
|
600131905157345e503dd7047b832cd40ae33459
|
[] |
no_license
|
wenki1990/Data_Science_1
|
61327bd26396075b6170d31ca241f52ae689761b
|
96396cfcb5f7ef124952323a6e5f1798b93c4d06
|
refs/heads/master
| 2021-06-12T07:59:39.396661
| 2021-03-12T14:14:36
| 2021-03-12T14:14:36
| 153,059,233
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,384
|
r
|
01_NaiveBayes.R
|
setwd('F:\\Library\\Analytics Path\\02-R\\02-R DataSets\\Sentiment Analysis and Navie Baye')
sms_raw<-read.csv('sms_spam.csv')
head(sms_raw)
View(sms_raw)
sms_raw$type<-as.factor(sms_raw$type)
str(sms_raw)
library(tm)
library(NLP)
sms_corpus<-Corpus(VectorSource(sms_raw$text))
clean_corpus<-tm_map(sms_corpus,tolower)
clean_corpus<-tm_map(clean_corpus,removePunctuation)
clean_corpus<-tm_map(clean_corpus,removeNumbers)
clean_corpus<-tm_map(clean_corpus,removeWords,stopwords('english'))
clean_corpus<-tm_map(clean_corpus,stripWhitespace)
inspect(clean_corpus[1:5])
sms_dtm<-DocumentTermMatrix(clean_corpus)
sms_raw_train<-sms_raw[1:4169,]
sms_raw_test<-sms_raw[4170:5559,]
sms_dtm_train<-sms_dtm[1:4169,]
sms_dtm_test<-sms_dtm[4170:5559,]
sms_corpus_train<-clean_corpus[1:4169]
sms_corpus_test<-clean_corpus[4170:5559]
#EDA
library(wordcloud)
wordcloud(clean_corpus,min.freq = 30,max.words = 50,colors = brewer.pal(8,'Dark2'))
#finding most differentiating terms in spam and ham
spam<-sms_raw[sms_raw$type=='spam',]
ham<-sms_raw[sms_raw$type=='ham',]
wordcloud(spam$text,max.words = 50,colors = brewer.pal(8,'Dark2'))
#most freq words in spam->free,prize,mobile,claim,text,reply,stop,
#text,txt,won,cash,win,send
wordcloud(ham$text,max.words = 50,colors = brewer.pal(8,'Dark2'))
#most freq words in ham->can,will,come,know,like,good,time,dont,ill,love,sorry,
#just,going,home,one,want,need
#Developing Document term matrix only with most differentiating words in
# spam and ham
myTerms<-c('free','prize','mobile','claim','text','can','will','come','know','like',
'good','time','love')
sms_train<-DocumentTermMatrix(sms_corpus_train,list(dictionary = myTerms))
sms_test<-DocumentTermMatrix(sms_corpus_test,list(dictionary = myTerms))
class(sms_train)
convert_counts<-function(x){
x<-ifelse(x>0,1,0)
x<-factor(x,levels = c(0,1),labels = c('NO','YES'))
}
sms_train<-apply(sms_train,2,convert_counts)
sms_test<-apply(sms_test,2,convert_counts)
class(sms_test)#matrix
library(e1071)
sms_classifier<-naiveBayes(sms_train,sms_raw_train$type)#matrix is coerced into
#df
sms_test_pred<-predict(sms_classifier,sms_test)
table(sms_raw_test$type,sms_test_pred)
## Building DocumentTermMatrix with exhaustive terms
myTerms2 = c(myTerms,'reply','stop','txt','won','cash','win','send','sorry',
'just','going','home','one','want','need')
myTerms2
sms_train2<-DocumentTermMatrix(sms_corpus_train,list(dictionary=myTerms2))
sms_test2<-DocumentTermMatrix(sms_corpus_test,list(dictionary = myTerms2))
convert_counts<-function(x){
x<-ifelse(x>0,1,0)
x<-factor(x,levels = c(0,1),labels = c('NO','YES'))
}
sms_train2<-apply(sms_train2,2,convert_counts)
sms_test2<-apply(sms_test2,2,convert_counts)
sms_classifier2<-naiveBayes(sms_train2,sms_raw_train$type)
sms_test_pred2<-predict(sms_classifier2,sms_test2)
table(sms_raw_test$type,sms_test_pred2)
sms_train3<-DocumentTermMatrix(sms_corpus_train)
sms_test3<-DocumentTermMatrix(sms_corpus_test)
sms_train3<-apply(sms_dtm_train,2,convert_counts)
sms_test3<-apply(sms_dtm_test,2,convert_counts)
sms_test3[1:5,1:5]
sms_classifier3<-naiveBayes(sms_train2,sms_raw_train$type,laplace = 50)
sms_test_pred3<-predict(sms_classifier3,sms_test2)
table(sms_raw_test$type,sms_test_pred3)
|
3d7856a93082a26510f59114adf6adcc1dfbb7bd
|
8faa2869f1496461af2180dfb496e0887fa7f722
|
/man/selectScenes.Rd
|
ecfa3077daf555e3c89a0d6e289cd7de3cb5bc07
|
[] |
no_license
|
yangxhcaf/timeSyncR
|
ea1cd34c48fe4bf32709215ae3e14e883b6fd945
|
14bfc1e3aec929a0b29e2fc102f29fe75d926c8f
|
refs/heads/master
| 2020-07-01T15:24:46.632996
| 2017-10-25T19:56:25
| 2017-10-25T19:56:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,001
|
rd
|
selectScenes.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/selectScenes.R
\name{selectScenes}
\alias{selectScenes}
\title{Select scenes from directory}
\usage{
selectScenes(x, targ, padding = NULL, verbose = TRUE)
}
\arguments{
\item{x}{The object whose extent will be checked. It can be a raster, spatial object, or an extent object}
\item{targ}{Either a character vector of image filenames or an object of type list containing images to be tested}
\item{padding}{Numeric. Additional area surrounding the extent to be included. Units depend on the projection of x}
\item{verbose}{Logical. Send status reports to the console?}
}
\value{
Either a character vector of filenames with overlapping extents if \code{is.character(targ)}, or a list of raster objects whose extents overlap with \code{x} if \code{targ} is a spatial object.
}
\description{
Select scenes (filenames) from a directory wth extents overlapping that of a given spatial object
}
\author{
Ben DeVries
}
|
8600842aa91626b6932c274eb476851748a569f9
|
78b8fee81df8d494a8e890837ff81afa4552ac8b
|
/ensemble/man/optParams.Rd
|
1aaf561a14786db54bdbe67de8af646c5a303d2a
|
[] |
no_license
|
joshbrowning2358/Ensemble_Building_Code
|
0dffef930d68146561e3e37e3833d5d3985f61a8
|
7b7d536f64081737717c05b274d8f5e915a67149
|
refs/heads/master
| 2023-02-17T18:32:22.888144
| 2014-09-19T15:18:44
| 2014-09-19T15:18:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,128
|
rd
|
optParams.Rd
|
\name{optParams}
\alias{optParams}
\title{
Optimize model parameters
}
\description{
This function is designed to optimize the tuning parameters to a particular data mining model by building many models. Note that it may be extremely slow, but should give good estimates for the optimal tuning parameters (by trying many combinations).
}
\usage{
optParams(func, form = NULL, data = NULL, x = NULL, y = NULL, nTrain = c(100, 1000, 10000), nValid = nTrain, replications = rep(30, length(nTrain)), optFunc = function(pred, actual) {
mean((pred - actual)^2)
}, optArgs = list(), optVals = rep(5, length(optArgs)), optRed = rep(0.7, length(optArgs)), predFunc = predict, constArgs = list(), coldStart = 10, seed = 321)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{func}{
The data mining function to be optimized.
}
\item{form}{
Either form and d OR x and y should be supplied. form supplies the formula to be used for fitting the model.
}
\item{data}{
Either form and d OR x and y should be supplied. d is a dataframe to be used for fitting the model.
}
\item{x}{
Either form and d OR x and y should be supplied. x is a matrix, dataframe, or numeric vector containing the independent variables for fitting.
}
\item{y}{
Either form and d OR x and y should be supplied. y is a numeric vector containing the dependent variable for fitting.
}
\item{nTrain}{
The number of observations to be randomly sampled at each iteration (to build training models).
}
\item{nValid}{
The number of observations to be randomly sampled at each iteration (to measure error of the trained models).
}
\item{replications}{
Specifies how many iterations should be performed at each optimization step. Typically 30 should be a good amount to ensure a good optimum is found, but decreasing this can help improve computation time.
}
\item{optFunc}{
A function accepting two arguments: pred and actual. From these two numeric vectors, the optFunc should provide a performance measure to be minimized.
}
\item{optArgs}{
%% ~~Describe \code{optArgs} here~~
}
\item{optVals}{
%% ~~Describe \code{optVals} here~~
}
\item{optRed}{
%% ~~Describe \code{optRed} here~~
}
\item{predFunc}{
%% ~~Describe \code{predFunc} here~~
}
\item{constArgs}{
%% ~~Describe \code{constArgs} here~~
}
\item{coldStart}{
%% ~~Describe \code{coldStart} here~~
}
\item{seed}{
%% ~~Describe \code{seed} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
e7ae58fc822e225af0864f3019029aabe86b89f7
|
759392dff9b5f70c9424d87f6552f52a5a122534
|
/man/RToCausataNames.Rd
|
577dadcba781069343d03497e46d8b1f392a2866
|
[] |
no_license
|
meantrix/Causata
|
fd88ffa3e6d6a8f6b54f9be4758e1243a9434027
|
62271eb1445a6b92efcc5d151a762cddc2690e06
|
refs/heads/master
| 2022-02-24T07:04:06.178043
| 2013-07-18T00:00:00
| 2013-07-18T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,662
|
rd
|
RToCausataNames.Rd
|
\name{RToCausataNames}
\alias{RToCausataNames}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Converts R-friendly causata column names to the corresponding Causata system name
}
\description{
Converts R-friendly causata column names to the corresponding Causata system name
}
\usage{
RToCausataNames(name.vector)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{name.vector}{
A character vector of column names.
}
}
\details{
Causata variables follow two naming conventions. The first is found in data exported from within Causata using the
"R Formated CSV" option:
\code{variable.name_Time.Domain_id} where id is a number, e.g. \code{variable.name_Time.Domain_123}
The second convention is found in data exported from the SQL interface:
\code{variable.name__Time.Domain}
Example conversions:
\code{variable.name__Time.Domain} becomes \code{variable-name$Time Domain}
\code{variable.name_Time.Domain_123} is unchanged.
Variables that do not conform to these conventions will be mapped to \code{"No Causata Name"} and a warning will be triggered.
}
\value{
An character vector of mapped variable names.
}
\author{
David Barker <support@causata.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{CausataData}}, \code{\link{CausataVariable}}, \code{\link{CausataToRNames}}.
}
\examples{
RToCausataNames(c("variable.name__Time.Domain", "variable.name_Time.Domain_123"))
RToCausataNames("bad-name-doesn't fit convention")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
% __ONLY ONE__ keyword per line
|
414d4bb98fd24c58bf17a06257edb0d019c146fb
|
db377b98ae482c97a225d8532ffedff88010aabb
|
/man/makeSMOTEWrapper.Rd
|
66a407892b9573a91d12c1bdfb4f3c406bba54f4
|
[
"BSD-2-Clause"
] |
permissive
|
JiaHaobo/mlr
|
d0a568480d6495c506c2dc72bd89618281fed3ce
|
17d7eac68433b5e37bc4c118d1a9056c5e4cc497
|
refs/heads/master
| 2021-01-19T11:20:27.365236
| 2017-04-11T15:27:00
| 2017-04-11T15:27:00
| 87,954,613
| 1
| 0
| null | 2017-04-11T16:10:10
| 2017-04-11T16:10:10
| null |
UTF-8
|
R
| false
| true
| 2,777
|
rd
|
makeSMOTEWrapper.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SMOTEWrapper.R
\name{makeSMOTEWrapper}
\alias{makeSMOTEWrapper}
\title{Fuse learner with SMOTE oversampling for imbalancy correction in binary classification.}
\usage{
makeSMOTEWrapper(learner, sw.rate = 1, sw.nn = 5L, sw.standardize = TRUE,
sw.alt.logic = FALSE)
}
\arguments{
\item{learner}{[\code{\link{Learner}} | \code{character(1)}]\cr
The learner.
If you pass a string the learner will be created via \code{\link{makeLearner}}.}
\item{sw.rate}{[\code{numeric(1)}]\cr
Factor to oversample the smaller class. Must be between 1 and \code{Inf},
where 1 means no oversampling and 2 would mean doubling the class size.
Default is 1.}
\item{sw.nn}{[\code{integer(1)}]\cr
Number of nearest neighbors to consider.
Default is 5.}
\item{sw.standardize}{[\code{logical(1)}]\cr
Standardize input variables before calculating the nearest neighbors
for data sets with numeric input variables only. For mixed variables
(numeric and factor) the gower distance is used and variables are
standardized anyway.
Default is \code{TRUE}.}
\item{sw.alt.logic}{[\code{logical(1)}]\cr
Use an alternative logic for selection of minority class observations.
Instead of sampling a minority class element AND one of its nearest
neighbors, each minority class element is taken multiple times (depending
on rate) for the interpolation and only the corresponding nearest neighbor
is sampled.
Default is \code{FALSE}.}
}
\value{
[\code{\link{Learner}}].
}
\description{
Creates a learner object, which can be
used like any other learner object.
Internally uses \code{\link{smote}} before every model fit.
Note that observation weights do not influence the sampling and are simply passed
down to the next learner.
}
\seealso{
Other wrapper: \code{\link{makeBaggingWrapper}},
\code{\link{makeClassificationViaRegressionWrapper}},
\code{\link{makeConstantClassWrapper}},
\code{\link{makeCostSensClassifWrapper}},
\code{\link{makeCostSensRegrWrapper}},
\code{\link{makeDownsampleWrapper}},
\code{\link{makeDummyFeaturesWrapper}},
\code{\link{makeFeatSelWrapper}},
\code{\link{makeFilterWrapper}},
\code{\link{makeImputeWrapper}},
\code{\link{makeMulticlassWrapper}},
\code{\link{makeMultilabelBinaryRelevanceWrapper}},
\code{\link{makeMultilabelClassifierChainsWrapper}},
\code{\link{makeMultilabelDBRWrapper}},
\code{\link{makeMultilabelNestedStackingWrapper}},
\code{\link{makeMultilabelStackingWrapper}},
\code{\link{makeOverBaggingWrapper}},
\code{\link{makePreprocWrapperCaret}},
\code{\link{makePreprocWrapper}},
\code{\link{makeRemoveConstantFeaturesWrapper}},
\code{\link{makeTuneWrapper}},
\code{\link{makeUndersampleWrapper}},
\code{\link{makeWeightedClassesWrapper}}
}
|
58cfad6fe37c0791446e98ef4de7c96dc43b9760
|
d0981a02d8ae7974f0f6013fc08c6e74445969d4
|
/lang/R/R/Interface.R
|
2b88e3801bd90e3bb23b7d5f2fd7c56a63774587
|
[
"CC-BY-4.0"
] |
permissive
|
airr-community/airr-standards
|
7c115e1c9ea926d5bdb8389bcf1e7f4a10632817
|
a98d307a190fc03143fbf2d3d20966d647da28f8
|
refs/heads/master
| 2023-09-01T08:58:22.038847
| 2023-08-29T20:03:47
| 2023-08-29T20:03:47
| 100,383,740
| 37
| 23
|
CC-BY-4.0
| 2023-08-28T16:30:49
| 2017-08-15T14:06:27
|
Python
|
UTF-8
|
R
| false
| false
| 26,067
|
r
|
Interface.R
|
#### Read TSV ####
#' Read AIRR tabular data
#'
#' \code{read_tabular} reads a tab-delimited (TSV) file containing tabular AIRR records.
#'
#' @param file input file path.
#' @param base starting index for positional fields in the input file.
#' If \code{"1"}, then these fields will not be modified.
#' If \code{"0"}, then fields ending in \code{"_start"} and \code{"_end"}
#' are 0-based half-open intervals (python style) in the input file
#' and will be converted to 1-based closed-intervals (R style).
#' @param schema \code{Schema} object defining the output format.
#' @param aux_types named vector or list giving the type for fields that are not
#' defined in \code{schema}. The field name is the name, the value
#' the type, denoted by one of \code{"c"} (character), \code{"l"} (logical),
#' \code{"i"} (integer), \code{"d"} (double), or \code{"n"} (numeric).
#' @param ... additional arguments to pass to \link[readr]{read_delim}.
#'
#' @return A \code{data.frame} of the TSV file with appropriate type and position conversion
#' for fields defined in the specification.
#'
#' @seealso
#' See \link{Schema} for the AIRR schema object definition.
#' See \link{write_tabular} for writing AIRR data.
#'
#' @examples
#' # Get path to the rearrangement-example file
#' file <- system.file("extdata", "rearrangement-example.tsv.gz", package="airr")
#'
#' # Load data file
#' df <- read_rearrangement(file)
#'
#' @export
read_tabular <- function(file, schema, base=c("1", "0"), aux_types=NULL,...) {
# Check arguments
base <- match.arg(base)
# Define types
parsers <- c("character"="c", "logical"="l", "integer"="i", "double"="d", "numeric"="n")
header <- names(suppressMessages(readr::read_tsv(file, n_max=1)))
schema_fields <- intersect(names(schema), header)
cast <- setNames(lapply(schema_fields, function(f) parsers[schema[f]$type]), schema_fields)
cast <- c(cast, list(.default = col_character()))
if(!is.null(aux_types)){
aux_types <- aux_types[names(aux_types) %in% header]
aux_cols <- setNames(lapply(aux_types, function(f) parsers[f]), names(aux_types))
cast <- c(cast, aux_cols)
}
types <- do.call(readr::cols, cast)
# Read file
data <- suppressMessages(readr::read_tsv(file, col_types=types, na=c("", "NA", "None"), ...))
# Validate file
valid_data <- validate_tabular(data, schema=schema)
# Adjust indexes
if (base == "0") {
start_positions <- grep("_start$", names(data), perl=TRUE)
if (length(start_positions) > 0) {
data[, start_positions] <- data[, start_positions] + 1
}
}
return(data)
}
#' @details
#' \code{read_rearrangement} reads an AIRR TSV containing Rearrangement data.
#'
#' @rdname read_tabular
#' @export
read_rearrangement <- function(file, base=c("1", "0"), ...) {
read_tabular(file, base=base, schema=RearrangementSchema, ...)
}
#' @details
#' \code{read_alignment} reads an AIRR TSV containing Alignment data.
#'
#' @rdname read_tabular
#' @export
read_alignment <- function(file, base=c("1", "0"), ...) {
msg <- paste("read_alignment is deprecated and will be removed in a future release.",
"Use read_tabular with the argument schema=AlignmentSchema instead.",
"See help(\"Deprecated\")",
sep="\n")
.Deprecated(msg=msg)
read_tabular(file, base=base, schema=AlignmentSchema, ...)
}
#### Read YAML/JSON ####
#' Read an AIRR Data Model file in YAML or JSON format
#'
#' \code{read_airr} loads a YAML or JSON file containing AIRR Data Model records.
#'
#' @param file path to the input file.
#' @param format format of the input file. Must be one of \code{"auto"}, \code{"yaml"}, or
#' \code{"json"}. If \code{"auto"} (default), the format will be
#' detected from the \code{file} extension.
#' @param validate run schema validation if \code{TRUE}.
#' @param model if \code{TRUE} validate only AIRR DataFile defined objects. If \code{FALSE}
#' attempt validation of all objects in \code{data}.
#' Ignored if \code{validate=FALSE}
#'
#' @return A named nested \code{list} contained in the AIRR Data Model with the top-level
#' names reflecting the individual AIRR objects.
#'
#' @seealso
#' See \link{Schema} for the AIRR schema definition objects.
#' See \link{write_airr} for writing AIRR Data Model records in YAML or JSON format.
#'
#' @examples
#' # Get path to the Reportoire and GermlineSet example files
#' f1 <- system.file("extdata", "repertoire-example.yaml", package="airr")
#' f2 <- system.file("extdata", "germline-example.json", package="airr")
#'
#' # Load data files
#' repertoire <- read_airr(f1)
#' germline <- read_airr(f2)
#'
#' @export
read_airr <- function(file, format=c("auto", "yaml", "json"), validate=TRUE, model=TRUE) {
# Check arguments
format <- match.arg(format)
# Autodetect format
if (format == "auto") { format <- tolower(tools::file_ext(file)) }
# Load data
if (format == "yaml") {
records <- read_airr_yaml(file, validate=validate, model=model)
} else if (format == "json") {
records <- read_airr_json(file, validate=validate, model=model)
} else {
stop("Unrecognized file extension ", format, "; must be either .json or .yaml.")
}
# Return
return(records)
}
# Read an AIRR YAML file
#
# \code{read_airr_yaml} loads a YAML file containing AIRR Data Model records.
#
# @param file path to the YAML input file.
# @param validate run schema validation if \code{TRUE}.
# @param model if \code{TRUE} validate only AIRR DataFile defined objects. If \code{FALSE}
# attempt validation of all objects in \code{data}.
# Ignored if \code{validate=FALSE}
#
# @return A named nested \code{list} contained in the AIRR Data Model with the top-level
# names reflecting the individual AIRR objects.
#
# @seealso
# See \link{Schema} for the AIRR schema definition objects.
# See \link{write_airr_yaml} for writing AIRR data in YAML format.
#
# @examples
# # Get path to the repertoire-example file
# file <- system.file("extdata", "repertoire-example.yaml", package="airr")
#
# # Load data file
# repr <- read_airr_yaml(file)
read_airr_yaml <- function(file, validate=TRUE, model=TRUE) {
# YAML format
data <- yaml::read_yaml(file)
# Validation. Warnings are thrown for fields for AIRR compliance failures
if (validate) {
valid <- validate_airr(data, model=model)
}
return(data)
}
# Read an AIRR JSON file
#
# \code{read_airr_json} loads a JSON file containing AIRR Data Model records.
#
# @param file path to the JSON input file.
# @param validate run schema validation if \code{TRUE}.
# @param model if \code{TRUE} validate only AIRR DataFile defined objects. If \code{FALSE}
# attempt validation of all objects in \code{data}.
# Ignored if \code{validate=FALSE}
#
# @return A named nested \code{list} contained in the AIRR Data Model with the top-level
# names reflecting the individual AIRR objects.
#
# @seealso
# See \link{Schema} for the AIRR schema object definition.
# See \link{write_airr_json} for writing AIRR data in JSON format.
#
# @examples
# # Get path to the rearrangement-example file
# file <- system.file("extdata", "germline-example.json", package="airr")
#
# # Load data file
# repr <- read_airr_json(file)
read_airr_json <- function(file, validate=TRUE, model=TRUE) {
# Read JSON format
data <- jsonlite::fromJSON(file,
simplifyVector=TRUE,
simplifyMatrix=FALSE,
simplifyDataFrame=FALSE,
flatten=FALSE)
# Validation. Warnings are thrown for fields for AIRR compliance failures
if (validate) {
valid <- validate_airr(data, model=model)
}
return(data)
}
#### Write TSV ####
#' Write an AIRR tabular data
#'
#' \code{write_tabular} writes a TSV containing AIRR tabular records.
#'
#' @param data \code{data.frame} of Rearrangement data.
#' @param file output file name.
#' @param base starting index for positional fields in the output file.
#' Fields in the input \code{data} are assumed to be 1-based
#' closed-intervals (R style).
#' If \code{"1"}, then these fields will not be modified.
#' If \code{"0"}, then fields ending in \code{_start} and \code{_end}
#' will be converted to 0-based half-open intervals (python style)
#' in the output file.
#' @param schema \code{Schema} object defining the output format.
#' @param ... additional arguments to pass to \link[readr]{write_delim}.
#'
#' @return NULL
#'
#' @seealso
#' See \link{Schema} for the AIRR schema object definition.
#' See \link{read_tabular} for reading to AIRR files.
#'
#' @examples
#' # Get path to the rearrangement-example file
#' file <- system.file("extdata", "rearrangement-example.tsv.gz", package="airr")
#'
#' # Load data file
#' df <- read_rearrangement(file)
#'
#' # Write a Rearrangement data file
#' outfile <- file.path(tempdir(), "output.tsv")
#' write_tabular(df, outfile, schema=RearrangementSchema)
#'
#' @export
write_tabular <- function(data, file, schema, base=c("1", "0"), ...) {
## DEBUG
# data <- data.frame("sequence_id"=1:4, "extra"=1:4, "a"=LETTERS[1:4])
data_name <- deparse(substitute(data))
schema_name <- deparse(substitute(schema))
# Check arguments
base <- match.arg(base)
# Fill in missing required columns
missing <- setdiff(schema@required, names(data))
if (length(missing) > 0 ) {
data[, missing] <- NA
}
# order columns
ordering <- c(intersect(names(schema), names(data)),
setdiff(names(data), names(schema)))
data <- data[, ordering]
# Adjust indexes
if (base == "0") {
start_positions <- grep("_start$", names(data), perl=TRUE)
if (length(start_positions) > 0) {
data[, start_positions] <- data[, start_positions] - 1
}
}
valid <- suppressWarnings(validate_tabular(data, schema))
if (!valid) {
w <- names(warnings())
w <- gsub("Warning: *", "" ,w)
err_msg <- paste0(data_name, " is not a valid ", schema_name, "\n")
err_msg <- paste(err_msg, paste(w, collapse = "\n"))
warning(err_msg)
}
# write logical fields as T/F
logical_fields <- names(which(sapply(schema@properties,
'[[', "type") == "logical"))
logical_fields <- intersect(colnames(data), logical_fields)
if (length(logical_fields) > 0 ) {
for (log_field in logical_fields) {
logical_values <- data[[log_field]] %in% c(TRUE, FALSE)
data[[log_field]] <- as.character(data[[log_field]])
if (length(logical_values) > 0 ) {
data[[log_field]][logical_values] <- c("T", "F")[match(data[[log_field]][logical_values],
c("TRUE", "FALSE"))]
}
}
}
# Write
write_tsv(data, file, na="", ...)
}
#' @details
#' \code{write_rearrangement} writes a \code{data.frame} containing AIRR Rearrangement data to TSV.
#'
#' @rdname write_tabular
#' @export
write_rearrangement <- function(data, file, base=c("1", "0"), ...) {
write_tabular(data, file, base=base, schema=RearrangementSchema, ...)
}
#' @details
#' \code{write_alignment} writes a \code{data.frame} containing AIRR Alignment data to TSV.
#'
#' @rdname write_tabular
#' @export
write_alignment <- function(data, file, base=c("1", "0"), ...) {
msg <- paste("write_alignment is deprecated and will be removed in a future release.",
"Use write_tabular with the argument schema=AlignmentSchema instead.",
"See help(\"Deprecated\")",
sep="\n")
.Deprecated(msg=msg)
write_tabular(data, file, base=base, schema=AlignmentSchema, ...)
}
#### Write YAML/JSON ####
#' Write AIRR Data Model records to YAML or JSON files
#'
#' \code{write_airr} writes a YAML or JSON file containing AIRR Data Model records.
#'
#' @param data \code{list} containing AIRR Model Records.
#' @param file output file name.
#' @param format format of the output file. Must be one of \code{"auto"}, \code{"yaml"}, or
#' \code{"json"}. If \code{"auto"} (default), the format will be
#' detected from the \code{file} extension.
#' @param validate run schema validation prior to write if \code{TRUE}.
#' @param model if \code{TRUE} validate and write only AIRR DataFile defined objects.
#' If \code{FALSE} attempt validation and write of all objects in \code{data}.
#'
#' @seealso
#' See \link{Schema} for the AIRR schema definition objects.
#' See \link{read_airr} for reading to AIRR Data Model files.
#'
#' @examples
#' # Get path to the repertoire-example file
#' file <- system.file("extdata", "repertoire-example.yaml", package="airr")
#'
#' # Load data file
#' repertoire <- read_airr(file)
#'
#' # Write a Rearrangement data file
#' outfile <- file.path(tempdir(), "output.yaml")
#' write_airr(repertoire, outfile)
#'
#' @export
write_airr <- function(data, file, format=c("auto", "yaml", "json"), validate=TRUE, model=TRUE) {
# Check arguments
format <- match.arg(format)
# Autodetect format
if (format == "auto") { format <- tolower(tools::file_ext(file)) }
# Write data
if (format == "yaml") {
write_airr_yaml(data, file, validate=validate, model=model)
} else if (format == "json") {
write_airr_json(data, file, validate=validate, model=model)
} else {
stop("Unrecognized file extension ", format, "; must be either .json or .yaml.")
}
}
# Write an AIRR yaml
#
# \code{write_airr_yaml} writes a yaml containing AIRR formatted records.
#
# @param data object containing Repertoire data.
# @param file output file name.
# @param validate run schema validation prior to write if \code{TRUE}.
# @param model if \code{TRUE} validate only AIRR DataFile defined objects. If \code{FALSE}
# attempt validation of all objects in \code{data}.
#
# @seealso
# See \link{Schema} for the AIRR schema object definition.
# See \link{read_airr_yaml} for reading to AIRR files.
#
# @examples
# # Get path to the rearrangement-example file
# file <- system.file("extdata", "repertoire-example.yaml", package="airr")
#
# # Load data file
# repr <- read_airr(file)
#
# # Write a Rearrangement data file
# outfile <- file.path(tempdir(), "output.yaml")
# write_airr_yaml(repr, outfile)
write_airr_yaml <- function(data, file, validate=TRUE, model=TRUE) {
# Validate prior to write
if (validate) {
valid <- validate_airr(data, model=model)
}
# Subset to AIRR DataFile records
if (model) {
data <- data[names(data) %in% names(DataFileSchema@properties)]
}
# Write
yaml::write_yaml(data, file)
}
# Write an AIRR json
#
# \code{write_airr_json} writes a yaml containing AIRR formatted records.
#
# @param data object containing Repertoire data.
# @param file output file name.
# @param validate run schema validation prior to write if \code{TRUE}.
#
# @return NULL
#
# @seealso
# See \link{Schema} for the AIRR schema object definition.
# See \link{read_airr_json} for reading to AIRR files.
#
# @examples
# # Get path to the rearrangement-example file
# file <- system.file("extdata", "germline-example.json", package="airr")
#
# # Load data file
# repr <- read_airr(germline)
#
# # Write a Rearrangement data file
# outfile <- file.path(tempdir(), "output.json")
# write_airr_json(repr, outfile)
write_airr_json <- function(data, file, validate=TRUE, model=TRUE) {
# Validate prior to write
if (validate) {
valid <- validate_airr(data, model=model)
}
# Subset to AIRR DataFile records
if (model) {
data <- data[names(data) %in% names(DataFileSchema@properties)]
}
# Write
json <- jsonlite::toJSON(data, auto_unbox=TRUE, null="null", na="null")
write(json, file)
}
#### Validation ####
#' Validate tabular AIRR data
#'
#' \code{validate_tabular} validates compliance of the contents of a \code{data.frame}
#' to the AIRR standards.
#'
#' @param data \code{data.frame} of tabular data to validate.
#' @param schema \code{Schema} object defining the data standard of the table.
#'
#' @return Returns \code{TRUE} if the input \code{data} is compliant and
#' \code{FALSE} if not.
#'
#' @examples
#' # Get path to the rearrangement-example file
#' file <- system.file("extdata", "rearrangement-example.tsv.gz", package="airr")
#'
#' # Load data file
#' df <- read_rearrangement(file)
#'
#' # Validate a data.frame against the Rearrangement schema
#' validate_rearrangement(df)
#'
#' @export
validate_tabular <- function(data, schema) {
# Initialize return value
valid <- TRUE
# Check all required fields exist
missing_fields <- setdiff(schema@required, names(data))
if (length(missing_fields) > 0 ) {
valid <- FALSE
warning(paste("Warning: File is missing AIRR mandatory field(s):",
paste(missing_fields, collapse = ", ")))
}
# Validate sequence_id:
# - uniqueness
# - not empty
if ("sequence_id" %in% colnames(data)) {
dup_ids <- duplicated(data[['sequence_id']])
if (any(dup_ids)) {
valid <- FALSE
warning(paste("Warning: sequence_id(s) are not unique:",
paste(data[['sequence_id']][dup_ids], collapse = ", ")))
}
empty_rows <- which(data[['sequence_id']] %in% c("None", "", NA))
if (length(empty_rows) > 0 ) {
# TODO
# valid <- FALSE
warning(paste("Warning: sequence_id is empty for row(s):",
paste(empty_rows, collapse = ", ")))
}
}
# check logical fields
logical_fields <- names(which(sapply(schema@properties,
'[[', "type") == "logical"))
logical_fields <- intersect(colnames(data), logical_fields)
if (length(logical_fields) > 0 ) {
for (log_field in logical_fields) {
not_logical <- data[[log_field]] %in% c(TRUE, FALSE) == FALSE
if (any(not_logical)) {
warning(paste("Warning:",log_field,"is not logical for row(s):",
paste(which(not_logical), collapse = ", ")))
} else {
NULL
}
}
}
return(valid)
}
#' @details
#' \code{validate_rearrangement} validates the standards compliance of AIRR Rearrangement
#' data stored in a \code{data.frame}
#'
#' @rdname validate_tabular
#' @export
validate_rearrangement <- function(data) {
validate_tabular(data, schema=RearrangementSchema)
}
#' Validate an AIRR Data Model nested list representation
#'
#' \code{validate_airr} validates the fields in a named nested list representation of the
#' AIRR Data Model. Typically, generating by reading of JSON or YAML formatted AIRR files.
#'
#' @param data \code{list} containing records of an AIRR Data Model objected imported from
#' a YAML or JSON representation.
#' @param model if \code{TRUE} validate only AIRR DataFile defined objects. If \code{FALSE}
#' attempt validation of all objects in \code{data}.
#' @param each if \code{TRUE} return a logical vector with results for each object in \code{data}
#' instead of a single \code{TRUE} or \code{FALSE} value.
#'
#' @return Returns \code{TRUE} if the input \code{data} is compliant with AIRR standards and
#' \code{FALSE} if not. If \code{each=TRUE} is set, then a vector with results for each
#' each object in \code{data} is returned instead.
#'
#' @seealso
#' See \link{Schema} for the AIRR schema definitions.
#' See \link{read_airr} for loading AIRR Data Models from a file.
#' See \link{write_airr} for writing AIRR Data Models to a file.
#'
#' @examples
#' # Get path to the rearrangement-example file
#' f1 <- system.file("extdata", "repertoire-example.yaml", package="airr")
#' f2 <- system.file("extdata", "germline-example.json", package="airr")
#'
#' # Load data file
#' repertoire <- read_airr(f1)
#' germline <- read_airr(f2)
#'
#' # Validate a single record
#' validate_airr(repertoire)
#'
#' # Return validation for individual objects
#' validate_airr(germline, each=TRUE)
#'
#' @export
validate_airr <- function(data, model=TRUE, each=FALSE) {
# This is a wrapper function to allow recursive validation of the different entries in yaml file
# Directly calling validate_entry does not work, because the function
# validate_entry also needs to work for recursive calling of reference schemes
# Iterate through objects in input data
valid_sum <- logical()
for (n in names(data)) {
if (n %in% c('Info', 'DataFile')) { next }
entry <- data[[n]]
if (is.null(entry)) { next }
# Check for non-DataFile objects
if (model && !(n %in% names(DataFileSchema@properties))) {
warning('Skipping validation of non-DataFile object: ', n)
next
}
# Load schema
if (n %in% names(AIRRSchema)) {
schema <- AIRRSchema[[n]]
} else {
schema <- tryCatch(load_schema(n), error=function(e) NULL)
}
# Fail invalid schema
if (is.null(schema)) {
warning('Unrecognized schema: ', n)
valid <- FALSE
} else {
# Recursively validate all entries
valid <- sapply(entry, validate_entry, schema=schema)
}
# Store check result
valid_sum <- append(setNames(all(valid), n), valid_sum)
}
# Data only valid if all entries valid
if (!each) { valid_sum <- all(valid_sum) }
return(valid_sum)
}
# Validation function for a single entry in the yaml file
#
# @param entry AIRR data in a nested list structure
# @param schema Schema definition object
# @returns TRUE or FALSE
validate_entry <- function(entry, schema) {
schema_name <- schema@definition
valid <- TRUE
# Check all required fields exist
missing_fields <- setdiff(schema@required, names(entry))
if (length(missing_fields) > 0 ) {
valid <- FALSE
warning(paste("Warning:", schema_name, "object is missing AIRR mandatory field(s):",
paste(missing_fields, collapse = ", "), "\n"))
}
# loop through all fields in the list and check if they refer to other schemes
for(f in names(entry)) {
# get the reference scheme
reference_schemes <- schema[f]$ref
# simple recursive (reference scheme in 1st level)
# in this case the type on the 1st level is NULL
if (is.na(schema[f][["type"]]) || is.null(schema[f][["type"]])) {
if (!is.null(reference_schemes)) {
v <- validate_entry(entry[[f]], schema=reference_schemes)
if (!v) { valid <- FALSE }
}
# entry of array type with a list of on or several reference schemes
} else if (schema[f][["type"]] == "array" & !is.null(reference_schemes)) {
# this is the number of different reference schemes
n_schemes <- length(reference_schemes)
# this is the number of elements in the array
n_array_entries <- length(entry[[f]])
# loop over all reference schemes in list
for (n_ref in seq_len(n_schemes)) {
# recursively validate the entries in the array
for (n_array in seq_len(n_array_entries)) {
v <- validate_entry(entry[[f]][[n_array]], schema = reference_schemes[[n_ref]])
if (!v) { valid <- FALSE }
}
}
# check if the entry type is correct
} else if (class(entry[[f]]) != schema[f][["type"]]) {
# one reason for non-identical types can be that the entry is nullable
nullable <- schema[f][["x-airr"]][["nullable"]]
# if not specified, it should be nullable
if (is.null(nullable)) { nullable <- TRUE }
if (!(nullable & is.null(entry[[f]]))) {
# another reason for types not matching is the array format
# we test whether the entries are lists
if (!(schema[f][["type"]] == "array" & is.vector(entry[[f]]))) {
# another reason for types not matching is the numeric arguments being read as integers
# we test whether the entries are numeric
if (!(schema[f][["type"]] == "numeric" & is.numeric(entry[[f]]))) {
valid <- FALSE
warning(paste("Warning:", schema_name, "entry does not have the required type",
schema[f][["type"]], ":", f, "\n"))
}
}
}
}
}
# return to indicate whether entries are valid
return(valid)
}
|
2bab137fe3152f34abca1063ea01d9350af4070b
|
58724d750895403a1b0c94cfbc0fad061c77670b
|
/pkg/retistruct/man/ReconstructedCountSet.Rd
|
99fc36526c40be52aa54c09e70b6ea784adb5336
|
[] |
no_license
|
ZeitgeberH/retistruct
|
8052826b91d5321f3eb42a88062b6688bac1afbd
|
0f7ca278dc57fef6c25fc75370a49753399ffb8c
|
refs/heads/master
| 2020-07-01T09:41:10.305635
| 2019-07-31T15:33:52
| 2019-07-31T15:34:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 688
|
rd
|
ReconstructedCountSet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ReconstructedCountSet.R
\docType{data}
\name{ReconstructedCountSet}
\alias{ReconstructedCountSet}
\title{ReconstructedCountSet class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
ReconstructedCountSet
}
\value{
An \code{ReconstructedCountSet} object. This contains the following fields:
\item{\code{DVflip}}{\code{TRUE} if the raw data is flipped in
the dorsoventral direction}
\item{\code{side}}{The side of the eye ("Left" or "Right")}
\item{\code{dataset}}{File system path to dataset}
}
\description{
ReconstructedCountSet class
}
\author{
David Sterratt
}
\keyword{datasets}
|
0a15117e4ed38cf7db4d49791b57db3da9d020f5
|
3b26ab6bc88a47dfef383d4937558e4bd44da506
|
/man/quickin.Rd
|
0e2ca87a8c3376859d57952b27391bc4f4973aaa
|
[
"MIT"
] |
permissive
|
SMBaylis/fishSim
|
affafad3915dad24057895d1b0708bc53dd206bd
|
2f98c4545780d4d42f63dd169fb9902c61d0c614
|
refs/heads/master
| 2021-08-02T18:07:06.651542
| 2021-07-23T06:17:11
| 2021-07-23T06:17:11
| 144,930,871
| 3
| 2
|
MIT
| 2021-02-15T01:28:04
| 2018-08-16T03:17:48
|
R
|
UTF-8
|
R
| false
| true
| 1,365
|
rd
|
quickin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fishSim_dev.R
\name{quickin}
\alias{quickin}
\title{Quick lookup of CKMR-relevant relationships}
\usage{
quickin(inds, max_gen = 2)
}
\arguments{
\item{inds}{an 'indiv' matrix, as from 'mort()', with some
individuals marked as 'captured'}
\item{max_gen}{the maximum depth to look up relatives, in
generations. max_gen = 2 is sufficient for relatives used in
CKMR}
}
\description{
quickin performs quick lookup of the kinships directly relevant to
close-kin mark-recapture. It returns a list of eight character
arrays, with each array holding one kinship in one pair of animals
per row.
}
\details{
The named relationship classes (in list order) are:
POPs: Parent-offspring pairs. One is the other's parent.
HSPs: Half-sibling pairs. The individuals share one parent.
FSPs: Full-sibling pairs. The individuals share two parents.
GGPs: Grandparent-grandoffspring pairs. One is the other's grandparent.
HTPs: Half-thiatic pairs. One individual's grandparent is the other individual's parent.
FTPs: Full-thiatic pairs. Two of one individual's grandparents are the other individual's
parents.
HCPs: Half-cousin pairs. The individuals share one grandparent.
FCPs: Full-cousin pairs. The individuals share two grandparents.
}
\seealso{
[fishSim::findRelatives()]
[fishSim::capture()]
}
|
f49b09216fd8a0deb90a3090c7ac9b055e3129d0
|
c16e93230eef744aef141adcb6620c45ab50a721
|
/multibandsBFAST/man/valitable.Rd
|
f742b1f80268acf549a050267a23e1dd32087bff
|
[] |
no_license
|
mengluchu/multibandsBFAST
|
0ccae505f8e64884ace80a6d35d3e8508459c3e9
|
eaefc4074d31f1febb41394c52c0b2e4d0ce9409
|
refs/heads/master
| 2020-12-25T14:59:08.706074
| 2017-03-29T10:16:56
| 2017-03-29T10:16:56
| 66,471,940
| 1
| 1
| null | 2016-10-21T11:03:47
| 2016-08-24T14:38:30
|
R
|
UTF-8
|
R
| false
| true
| 639
|
rd
|
valitable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validationsimple.R
\name{valitable}
\alias{valitable}
\title{validation}
\usage{
valitable(cx2, oridensetime, oritemplate, EarlyDateIsCommission = T, totalp,
nofchange, colmWith = 2)
}
\arguments{
\item{cx2}{validation chart, dataframe with true date in the first row}
\item{oridensetime}{array time}
\item{oritemplate}{a matrix of the location of each time stamp of each time series.To compare the time by observations}
}
\value{
dataframe with confusion matrix and time delay. can use stargazer to generate a table
}
\note{
adapted from Eliakim's code
}
|
0b189e14fa8383eab479af0a8efb497516b95a7b
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/robmixglm/man/print.outlierTest.Rd
|
cd210d5bd96d00aabd42fd6ad81f5c2d8b934c04
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 576
|
rd
|
print.outlierTest.Rd
|
\name{print.outlierTest}
\alias{print.outlierTest}
\alias{summary.outlierTest}
\alias{print.summary.outlierTest}
\title{Print an outlierTest object}
\description{
Print an outlierTest object.
}
\usage{
\method{print}{outlierTest}(x, \ldots)
}
\arguments{
\item{x}{outlierTest object}
\item{\ldots}{further arguments (not currently used)}
}
\examples{
\donttest{
library(MASS)
data(forbes)
forbes.robustmix <- robmixglm(bp~pres, data = forbes, cores = 1)
summary(forbes.robustmix)
print(outlierTest(forbes.robustmix, cores = 1))
}
}
\author{Ken Beath}
\keyword{methods}
|
a70b98de0f6c71879c4ae6577bacb9e8f62700d7
|
b94bde90fdb3e38483293d906c0b5f0669af647e
|
/simsem/R/tagHeaders-methods.R
|
114e6419e6e1aecde26161c8aedb9da81bca2542
|
[] |
no_license
|
pairach/simsem
|
c2da13f31af4b8ed986647320090bbd9edc0c400
|
8194f63851ed0c0dbd447726988b0a58619ec43a
|
refs/heads/master
| 2020-12-25T01:50:53.664082
| 2012-05-29T21:38:06
| 2012-05-29T21:38:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,066
|
r
|
tagHeaders-methods.R
|
# tagHeaders: This element will add names in each element of a vector or will add row and columns names of a matrix with variable or factor names
setMethod("tagHeaders", signature = "VirtualRSet", definition = function(object) {
ny <- NULL
nx <- NULL
nk <- NULL
ne <- NULL
modelType <- object@modelType
if (modelType == "CFA") {
ne <- ncol(object@LY)
ny <- nrow(object@LY)
} else if (modelType == "Path") {
ny <- nrow(object@PS)
} else if (modelType == "Path.exo") {
nx <- ncol(object@GA)
ny <- nrow(object@PS)
} else if (modelType == "SEM") {
ne <- ncol(object@LY)
ny <- nrow(object@LY)
} else if (modelType == "SEM.exo") {
ne <- ncol(object@LY)
ny <- nrow(object@LY)
nk <- ncol(object@LX)
nx <- nrow(object@LX)
}
names.y <- NULL
names.x <- NULL
names.e <- NULL
names.k <- NULL
if (!is.null(ny)) {
for (i in 1:ny) {
temp <- paste("y", i, sep = "")
names.y <- c(names.y, temp)
}
}
if (!is.null(nx)) {
for (i in 1:nx) {
temp <- paste("x", i, sep = "")
names.x <- c(names.x, temp)
}
}
if (!is.null(ne)) {
for (i in 1:ne) {
temp <- paste("e", i, sep = "")
names.e <- c(names.e, temp)
}
}
if (!is.null(nk)) {
for (i in 1:nk) {
temp <- paste("k", i, sep = "")
names.k <- c(names.k, temp)
}
}
if (!isNullObject(object@LY)) {
colnames(object@LY) <- names.e
rownames(object@LY) <- names.y
}
if (!isNullObject(object@TE)) {
colnames(object@TE) <- names.y
rownames(object@TE) <- names.y
}
if (!isNullObject(object@PS)) {
if (modelType == "Path" | modelType == "Path.exo") {
colnames(object@PS) <- names.y
rownames(object@PS) <- names.y
} else {
colnames(object@PS) <- names.e
rownames(object@PS) <- names.e
}
}
if (!isNullObject(object@BE)) {
if (modelType == "Path" | modelType == "Path.exo") {
colnames(object@BE) <- names.y
rownames(object@BE) <- names.y
} else {
colnames(object@BE) <- names.e
rownames(object@BE) <- names.e
}
}
if (!isNullObject(object@TY)) {
names(object@TY) <- names.y
}
if (!isNullObject(object@AL)) {
if (modelType == "Path" | modelType == "Path.exo") {
names(object@AL) <- names.y
} else {
names(object@AL) <- names.e
}
}
if (!isNullObject(object@LX)) {
colnames(object@LX) <- names.k
rownames(object@LX) <- names.x
}
if (!isNullObject(object@TD)) {
colnames(object@TD) <- names.x
rownames(object@TD) <- names.x
}
if (!isNullObject(object@PH)) {
if (modelType == "Path" | modelType == "Path.exo") {
colnames(object@PH) <- names.x
rownames(object@PH) <- names.x
} else {
colnames(object@PH) <- names.k
rownames(object@PH) <- names.k
}
}
if (!isNullObject(object@GA)) {
if (modelType == "Path" | modelType == "Path.exo") {
colnames(object@GA) <- names.x
rownames(object@GA) <- names.y
} else {
colnames(object@GA) <- names.k
rownames(object@GA) <- names.e
}
}
if (!isNullObject(object@TX)) {
names(object@TX) <- names.x
}
if (!isNullObject(object@KA)) {
if (modelType == "Path" | modelType == "Path.exo") {
names(object@KA) <- names.x
} else {
names(object@KA) <- names.k
}
}
if (!isNullObject(object@TH)) {
colnames(object@TH) <- names.y
rownames(object@TH) <- names.x
}
return(object)
})
|
3dda7d18f04feaae4f6408ed23ac471e874f3577
|
f2ecedf2b1a39abc178ba6e31f6fb7e1f28f0e99
|
/man/ap_textplot.Rd
|
882746c19435840d92bac9339d00c1aa169a4652
|
[] |
no_license
|
cekehe/rappp
|
02ae1bee6b6112b210ea85d29cefe606178afbe7
|
256d983ff1e07d5635446f9b3a0d62fca9858729
|
refs/heads/master
| 2022-06-07T11:54:44.324202
| 2022-05-16T13:37:43
| 2022-05-16T13:37:43
| 192,409,000
| 0
| 3
| null | 2022-05-16T13:37:50
| 2019-06-17T19:47:53
|
R
|
UTF-8
|
R
| false
| true
| 851
|
rd
|
ap_textplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ap_textplot.R
\name{ap_textplot}
\alias{ap_textplot}
\title{Display text information in a graphics plot.}
\usage{
ap_textplot(object, halign = "center", valign = "center", cex, cmar = 1.5, ...)
}
\description{
This function displays text output in a graphics window.
It is the equivalent of 'print' except that the output is displayed as a plot.
}
\details{
Based on \code{\link[gplots:textplot]{textplot()}} with slightly altered code.
Altered code found onlne (https://gist.github.com/johncolby/1482973).
Additional alteration done by CH, including making working methods, see code.
The input should be one of classes \code{matrix}, \code{data.frame}, \code{vectors} longer than 1,
single \code{character} string, single \code{integer} or single \code{numeric} value.
}
|
836f62fa2d1da347e5b2494f10a4c0655aa52e36
|
16886bf71c969197ecdeb76f5bdfbf8291ac4410
|
/R/mapping.R
|
7409a99b67db77b017970a29e2590edbed3cdecc
|
[] |
no_license
|
mapping-elections/mappingelections
|
298d4838172e14bc3fa241f35424d95f606985cd
|
5841ddf796572f142f8e103bb7b98705aaebb585
|
refs/heads/master
| 2021-01-23T12:32:37.432586
| 2019-07-23T14:59:33
| 2019-07-23T14:59:33
| 93,166,371
| 0
| 0
| null | 2018-07-26T21:40:50
| 2017-06-02T13:07:19
|
R
|
UTF-8
|
R
| false
| false
| 16,826
|
r
|
mapping.R
|
#' Map elections data
#'
#' @param data An \code{sf} object with elections data returned by
#' \code{\link{join_to_spatial}}.
#' @param congress The number of the Congress. If \code{NULL}, it will be
#' guessed from the data.
#' @param projection If not provided, then the best state plane projection will
#' be guessed using the \code{\link[USAboundaries]{state_plane}} function from
#' the \code{USAboundaries} package. If \code{NULL}, then leaflet's default
#' Web Mercator projection will be used. To use a custom projection, provide a
#' projection/CRS object returned by the \code{\link[leaflet]{leafletCRS}}
#' function in the \code{leaflet} package.
#' @param state Override the guessing of the state from the data passed in.
#' @param congressional_boundaries Draw Congressional district boundaries in
#' addition to county boundaries?
#' @param state_boundaries Draw state boundaries in addition to county
#' boundaries?
#' @param cities Number of largest cities to draw. Pass \code{FALSE} to not draw
#' any cities.
#' @param width The width of the map in pixels or percentage. Passed on to
#' \code{\link[leaflet]{leaflet}}.
#' @param height The height of the map in pixels or percentage. Passed on to
#' \code{\link[leaflet]{leaflet}}.
#'
#' @rdname map_elections
#'
#' @examples
#' map_data <- get_county_map_data("meae.congressional.congress19.nc.county")
#' map_counties(map_data)
#'
#' @importFrom dplyr ends_with
#' @export
map_counties <- function(data, congress = NULL, projection = NULL,
congressional_boundaries = TRUE,
state_boundaries = FALSE,
cities = 4L,
state = NULL, width = "100%", height = "600px") {
stopifnot(is.logical(congressional_boundaries),
is.numeric(cities) || cities == FALSE)
if (is.null(state)) {
statename_to_filter <- most_common_state(data$state_terr)
} else {
statename_to_filter <- state
}
state_to_filter <- USAboundaries::state_codes %>%
dplyr::filter(state_name == statename_to_filter) %>%
dplyr::pull(state_abbr)
if (is.null(congress)) {
congress <- unique(stats::na.omit(data$congress))[1]
}
if (congressional_boundaries) {
# Get the Congressional data now if needed
congress_sf <- histcongress %>%
dplyr::filter(statename %in% statename_to_filter,
startcong <= congress,
congress <= endcong,
district != -1)
}
# Calculate the bounding box. Sometimes the state/counties are bigger;
# sometimes the Congressional district is bigger.
bbox_counties <- as.list(sf::st_bbox(data))
if (congressional_boundaries) {
bbox <- as.list(sf::st_bbox(congress_sf))
# Now get the biggest bounding box
bbox$xmin <- min(bbox$xmin, bbox_counties$xmin)
bbox$ymin <- min(bbox$ymin, bbox_counties$ymin)
bbox$xmax <- max(bbox$xmax, bbox_counties$xmax)
bbox$ymax <- max(bbox$ymax, bbox_counties$ymax)
} else {
# If we are not using Congressional boundaries, just use the counties boundaries
bbox <- bbox_counties
}
# Now add a minimum padding based on the size of the state
lng_pad <- max((bbox$xmax - bbox$xmin) * 0.15, 0.45)
lat_pad <- max((bbox$ymax - bbox$ymin) * 0.15, 0.45)
bbox$xmin <- bbox$xmin - lng_pad
bbox$xmax <- bbox$xmax + lng_pad
bbox$ymin <- bbox$ymin - lat_pad
bbox$ymax <- bbox$ymax + lat_pad
if (is.null(projection)) {
# Use the state plane projection
projection <- make_leaflet_crs(state_to_filter)
} else {
stopifnot(inherits(projection, "leaflet_crs"))
}
colors <- poli_chrome(dplyr::as_data_frame(data))
# Instantiate the map with the data and the projection
map <- leaflet::leaflet(data, width = width, height = height,
options = leaflet::leafletOptions(
crs = projection,
zoomControl = FALSE, dragging = TRUE,
minZoom = 7, maxZoom = 12
))
# Set the maximum bounds of the map
map <- map %>%
leaflet::setMaxBounds(bbox$xmin, bbox$ymin, bbox$xmax, bbox$ymax)
map <- map %>%
leaflet::addPolygons(
# layerId = "county",
stroke = TRUE,
smoothFactor = 1,
color = "#bbb",
opacity = 1,
weight = 2,
dashArray = "5, 5",
fillOpacity = 1,
fillColor = colors,
label = label_maker(leaflet::getMapData(map)),
labelOptions = leaflet::labelOptions(direction = "auto"),
popup = popup_maker(leaflet::getMapData(map))
# popup = ~popup_maker(county = tools::toTitleCase(tolower(name)),
# federalist = federalist_vote,
# demrep = demrep_vote,
# other = other_vote,
# fed_percent = federalist_percentage,
# demrep_percent = demrep_percentage,
# oth_percent = other_percentage)
)
if (state_boundaries) {
state_names <- USAboundaries::state_codes %>%
dplyr::filter(state_abbr %in% state_to_filter)
state_sf <- USAboundaries::us_states(map_date = unique(data$map_date),
resolution = "high",
states = state_names$state_name)
map <- map %>%
leaflet::addPolygons(
data = state_sf,
# layerId = "state",
stroke = TRUE,
smoothFactor = 1,
color = "#222",
opacity = 1,
weight = 3,
fill = FALSE
)
}
if (congressional_boundaries) {
map <- map %>%
leaflet::addPolygons(
data = congress_sf,
stroke = TRUE,
smoothFactor = 1,
color = "#222",
opacity = 1,
weight = 3,
fill = FALSE
)
}
if (cities > 0) {
decade <- round(as.integer(format(data$map_date, "%Y")) / 10) * 10
decade <- unique(stats::na.omit(decade))
if (decade < 1790L) decade <- 1790L
city_locations <- USAboundaries::us_cities(map_date = decade) %>%
dplyr::filter(state_abbr %in% state_to_filter,
population > 100) %>%
dplyr::group_by(state_abbr) %>%
dplyr::top_n(cities, population)
if (nrow(city_locations) > 0) {
map <- map %>%
leaflet::addCircleMarkers(data = city_locations,
stroke = TRUE, color = "#333", opacity = 1, weight = 1.5,
fill = TRUE, fillColor = "#eaf945", fillOpacity = 1,
radius = 5,
label = ~city)
}
}
map
}
#' Map the Congressional data for the nation
#'
#' @param data An \code{sf} object with elections data returned by
#' \code{\link{join_to_spatial}}.
#' @param congress The number of the Congress. If \code{NULL}, it will be
#' guessed from the data.
#' @param congressional_boundaries Draw Congressional district boundaries in
#' addition to county boundaries?
#' @param state_boundaries Draw state boundaries in addition to county
#' boundaries?
#' @param width The width of the map in pixels or percentage. Passed on to
#' \code{\link[leaflet]{leaflet}}.
#' @param height The height of the map in pixels or percentage. Passed on to
#' \code{\link[leaflet]{leaflet}}.
#' @examples
#' map_data <- get_national_map_data("meae.congressional.congress01.national.county")
#' map_national(map_data)
#' @export
map_national <- function(data, congress = NULL,
congressional_boundaries = TRUE,
state_boundaries = FALSE,
width = "100%", height = "800px") {
stopifnot(is.logical(congressional_boundaries))
if (is.null(congress)) {
congress <- unique(stats::na.omit(data$congress))[1]
}
if (congressional_boundaries) {
# Get the Congressional data now if needed
congress_sf <- histcongress %>%
dplyr::filter(startcong <= congress,
congress <= endcong,
district != -1)
}
# Calculate the bounding box. Sometimes the state/counties are bigger;
# sometimes the Congressional district is bigger.
bbox_counties <- as.list(sf::st_bbox(data))
if (congressional_boundaries) {
bbox <- as.list(sf::st_bbox(congress_sf))
# Now get the biggest bounding box
bbox$xmin <- min(bbox$xmin, bbox_counties$xmin)
bbox$ymin <- min(bbox$ymin, bbox_counties$ymin)
bbox$xmax <- max(bbox$xmax, bbox_counties$xmax)
bbox$ymax <- max(bbox$ymax, bbox_counties$ymax)
} else {
# If we are not using Congressional boundaries, just use the counties boundaries
bbox <- bbox_counties
}
# Now add a minimum padding based on the size of the state
lng_pad <- max((bbox$xmax - bbox$xmin) * 0.05, 0.40)
lat_pad <- max((bbox$ymax - bbox$ymin) * 0.05, 0.40)
bbox$xmin <- bbox$xmin - lng_pad
bbox$xmax <- bbox$xmax + lng_pad
bbox$ymin <- bbox$ymin - lat_pad
bbox$ymax <- bbox$ymax + lat_pad
projection <- leaflet::leafletCRS(
crsClass = "L.Proj.CRS",
code = "ESRI:102003",
proj4def = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs",
resolutions = 2^(20:0)
)
colors <- poli_chrome(dplyr::as_data_frame(data))
# Instantiate the map with the data and the projection
map <- leaflet::leaflet(data, width = width, height = height,
options = leaflet::leafletOptions(
crs = projection,
zoomControl = FALSE, dragging = TRUE,
minZoom = 7, maxZoom = 12
))
# Set the maximum bounds of the map
map <- map %>%
leaflet::setMaxBounds(bbox$xmin, bbox$ymin, bbox$xmax, bbox$ymax)
map <- map %>%
leaflet::addPolygons(
# layerId = "county",
stroke = TRUE,
smoothFactor = 1,
color = "#bbb",
opacity = 1,
weight = 2,
dashArray = "5, 5",
fillOpacity = 1,
fillColor = colors,
label = label_maker(leaflet::getMapData(map), states = TRUE),
labelOptions = leaflet::labelOptions(direction = "auto"),
popup = popup_maker(leaflet::getMapData(map))
)
# if (state_boundaries) {
# state_names <- USAboundaries::state_codes %>%
# dplyr::filter(state_abbr %in% state_to_filter)
# state_sf <- USAboundaries::us_states(map_date = unique(data$map_date),
# resolution = "high",
# states = state_names$state_name)
# map <- map %>%
# leaflet::addPolygons(
# data = state_sf,
# # layerId = "state",
# stroke = TRUE,
# smoothFactor = 1,
# color = "#222",
# opacity = 1,
# weight = 3,
# fill = FALSE
# )
# }
if (congressional_boundaries) {
map <- map %>%
leaflet::addPolygons(
data = congress_sf,
stroke = TRUE,
smoothFactor = 1,
color = "#222",
opacity = 1,
weight = 3,
fill = FALSE
)
}
map
}
# Get the colors from the data
poli_chrome <- function(df) {
# The data frame will contain the percentages for various groups.
df <- df %>%
dplyr::select(dplyr::ends_with("_percentage")) %>%
dplyr::mutate_all(replace_with_zero)
party <- colnames(df)[max.col(df)] %>% stringr::str_replace("_percentage", "")
percentage <- apply(df, 1, max)
pals <- pal_mapping[party]
pos <- cut(percentage, breaks = seq(0, 1, 0.2), labels = FALSE)
out <- purrr::map2_chr(pals, pos, get_color)
names(out) <- NULL
out
}
# Takes an RColorBrewer palette and the position in that palette
get_color <- function(pal, i) {
if (is.na(i)) return("#FFFFFF") # return white for missing values
RColorBrewer::brewer.pal(5, pal)[i]
}
#' @importFrom stringr str_c
label_maker <- function(df, states = FALSE) {
if (any(unique(df$state_abbr) %in% c("SC", "LA"))) {
county_label <- ""
} else {
county_label <- " County"
}
labels <- vector("character", nrow(df))
for (i in seq_len(nrow(df))) {
row <- df[i, ]
county <- str_c(tools::toTitleCase(tolower(row$name)), county_label)
if (!is.na(row$state_name)) {
state <- str_c(", ", row$state_name)
} else {
state <- NULL
}
district_word <- ifelse(stringr::str_detect(row$district, ", "),
"Districts", "District")
district <- str_c(district_word, " ", row$district)
if (is.na(district)) {
district <- NULL
} else if (district == "District At-large") {
district <- "At-large district"
}
if (states) {
if (is.null(district)) {
label <- str_c(county, state)
} else {
label <- sprintf("%s, %s / %s", county, state, district)
label <- str_c(county, state, " / ", district)
}
} else {
label <- str_c(county, district, sep = " / ")
}
labels[i] <- label
}
labels
}
#' @importFrom stringr str_c
popup_maker <- function(df) {
if (any(unique(df$state_abbr) %in% c("SC", "LA"))) {
county_label <- ""
} else {
county_label <- " County"
}
popups <- vector("character", nrow(df))
for (i in seq_len(nrow(df))) {
row <- df[i, ]
if (!is.na(row$state_name)) {
state <- str_c(", ", row$state_name)
} else {
state <- NULL
}
county <- str_c("<b>", tools::toTitleCase(tolower(row$name)), county_label,
state, "</b><br/>")
districts <- str_c("District: ", row$districts, "<br/>")
if (is.na(districts)) districts <- NULL
federalists <- votes_to_popup("Federalists", row$federalist_percentage,
row$federalist_vote)
antifeds <- votes_to_popup("Anti-Federalists", row$antifederalist_percentage,
row$antifederalist_vote)
demreps <- votes_to_popup("Democratic-Republicans", row$demrep_percentage,
row$demrep_vote)
chesapeake <- votes_to_popup("Chesapeake", row$chesapeake_percentage,
row$chesapeake_vote)
potomac <- votes_to_popup("Potomac", row$potomac_percentage,
row$potomac_vote)
repfacs <- votes_to_popup("Republican faction", row$repfac_percentage,
row$repfac_vote)
adamsclay <- votes_to_popup("Adams/Clay supporters", row$adamsclay_percentage,
row$adamsclay_vote)
jacksonian <- votes_to_popup("Jacksonian supporters", row$jacksonian_percentage,
row$jacksonian_vote)
caucus <- votes_to_popup("Caucus", row$caucus_percentage,
row$caucus_vote)
anticaucus <- votes_to_popup("Anti-Caucus", row$anticaucus_percentage,
row$anticaucus_vote)
others <- votes_to_popup("Unaffiliated or other parties", row$other_percentage,
row$other_vote)
if (!is.na(row$county_source) && row$county_source == "district") {
disclaimer <- "<br/><span class='county-disclaimer'>County-level returns are not available for this county, so party percentages for the district as a whole have been displayed.</span>"
} else {
disclaimer <- NULL
}
popup <- str_c(county, districts, federalists, antifeds, demreps,
adamsclay, jacksonian, caucus, anticaucus, repfacs,
chesapeake, potomac, others, disclaimer, sep = "\n")
popups[i] <- popup
}
popups
}
#' @importFrom stringr str_c
votes_to_popup <- function(party, percentage, vote) {
if (is.na(percentage)) return(NULL)
out <- str_c(party, ": ", round(percentage * 100, 1), "%")
if (!is.na(vote)) {
out <- str_c(out, " (", prettyNum(vote, big.mark = ","), " votes)")
}
out <- str_c(out, "<br/>")
out
}
# Make a leaflet CRS from a state name
make_leaflet_crs <- function(state) {
epsg <- USAboundaries::state_plane(state, type = "epsg")
proj4 <- USAboundaries::state_plane(state, type = "proj4")
leaflet::leafletCRS(
crsClass = "L.Proj.CRS",
code = sprintf("ESRI:%s", epsg),
proj4def = proj4,
resolutions = 2^(20:0)
)
}
# Mapping of parties to palettes
pal_mapping <- c("federalist" = "Greens",
"antifederalist" = "Oranges",
"demrep" = "Purples",
"repfac" = "Oranges",
"adamsclay" = "Reds",
"jacksonian" = "Blues",
"other" = "RdPu",
"potomac" = "Blues",
"chesapeake" = "Reds",
"caucus" = "Reds",
"anticaucus" = "Blues")
|
2cd63a325c5d48dc9b870bf937fb948d1314d5b6
|
4630a28100fbb60d6dbaf71540c0547346760bc3
|
/R/utilities.R
|
680719424871503d5e3852390371ece46f39c932
|
[] |
no_license
|
Bioconductor/BiocManager
|
e202aa74fb2db70cbfed2295958c88d416209d3f
|
125d50a723caaea36d3c27d241f78f7d96e2a3d7
|
refs/heads/devel
| 2023-09-01T01:22:18.656330
| 2023-08-21T20:11:04
| 2023-08-21T20:11:04
| 33,965,307
| 74
| 23
| null | 2023-09-08T13:39:13
| 2015-04-15T01:04:01
|
R
|
UTF-8
|
R
| false
| false
| 2,794
|
r
|
utilities.R
|
.is_CRAN_check <-
function()
{
!interactive() && ("CheckExEnv" %in% search())
}
.is_character <-
function(x, na.ok = FALSE, zchar = FALSE)
{
is.character(x) &&
(na.ok || all(!is.na(x))) &&
(zchar || all(nzchar(x)))
}
.is_scalar_character <- function(x, na.ok = FALSE, zchar = FALSE)
length(x) == 1L && .is_character(x, na.ok, zchar)
.is_scalar_logical <- function(x, na.ok = FALSE)
is.logical(x) && length(x) == 1L && (na.ok || !is.na(x))
.getAnswer <- function(msg, allowed)
{
if (interactive()) {
repeat {
cat(msg)
answer <- readLines(n = 1)
if (answer %in% allowed)
break
}
tolower(answer)
} else {
"n"
}
}
.sQuote <- function(x)
sprintf("'%s'", as.character(x))
.url_exists <-
function(url)
{
suppressWarnings(tryCatch({
identical(nchar(.inet_readChar(url, 1L)), 1L)
}, error = function(...) {
FALSE
}))
}
.msg <-
function(
fmt, ...,
width=getOption("width"), indent = 0, exdent = 2, wrap. = TRUE
)
## Use this helper to format all error / warning / message text
{
txt <- sprintf(fmt, ...)
if (wrap.) {
txt <- strwrap(
sprintf(fmt, ...), width=width, indent = indent, exdent=exdent
)
paste(txt, collapse="\n")
} else {
txt
}
}
.message <-
function(..., call. = FALSE, domain = NULL, appendLF=TRUE)
{
## call. = FALSE provides compatibility with .stop(), but is ignored
message(.msg(...), domain = NULL, appendLF=appendLF)
invisible(TRUE)
}
.packageStartupMessage <-
function(..., domain = NULL, appendLF = TRUE)
{
packageStartupMessage(.msg(...), domain = domain, appendLF = appendLF)
invisible(TRUE)
}
.stop <-
function(..., call.=FALSE)
{
stop(.msg(...), call.=call.)
}
.warning <-
function(..., call.=FALSE, immediate.=FALSE)
{
warning(.msg(...), call.=call., immediate.=immediate.)
invisible(TRUE)
}
isDevel <-
function()
{
version() == .version_bioc("devel")
}
isRelease <-
function()
{
version() == .version_bioc("release")
}
## testthat helper functions
.skip_if_misconfigured <-
function()
{
R_version <- getRversion()
bioc_version <- version()
test_ver <- tryCatch({
.version_validity(bioc_version)
}, error = function(err) {
conditionMessage(err)
})
if (!isTRUE(test_ver)) {
msg <- sprintf(
"mis-configuration, R %s, Bioconductor %s", R_version, bioc_version
)
testthat::skip(msg)
}
}
.skip_if_BiocVersion_not_available <-
function()
{
if (!"BiocVersion" %in% rownames(installed.packages()))
testthat::skip("BiocVersion not installed")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.