blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6cb2fa74bc7c57c4f1d8e48c0c923fe55638ef89 | dc0df3319907e4615c94a8b5e15ae7c7a9a6d025 | /R/print.codebook.R | 11dd7bc24f9847ed41936d87fb890c76895ee5ca | [] | no_license | tkuehner/codebookr | 8cf3a9ba21c74f451d65d4558b37ae612e621ea4 | b03d8f4d74aaf685afb435d95a4263a12a917ebb | refs/heads/master | 2020-12-02T17:42:43.847035 | 2017-07-05T22:36:09 | 2017-07-05T22:36:09 | 96,416,325 | 1 | 0 | null | 2017-07-06T10:01:48 | 2017-07-06T10:01:48 | null | UTF-8 | R | false | false | 2,489 | r | print.codebook.R | ##' Print an S3 object of class \code{codebook}
##'
##' \code{read_codebook} reads a code book stored as a \code{csv} file
##' for either checking against a data file or relabelling factor
##' levels or labelling variables. \code{read_codebook} returns an S3
##' object of class \code{codebook}
##'
##' @aliases codebook
##'
##' @param x object of class \code{codebook}
##' @param extra logical: whether to print extra information. Default: FALSE
##' @param ... extra arguments passed to specific printing functions
##'
##' @seealso \code{\link{read_codebook}}
##' @author Peter Baker \email{pete@@petebaker.id.au}
##' @examples
##' file.copy(system.file('demoFiles', 'data1_codebook.csv',
##' package='codebookr'), 'data1_codebook.csv')
##' data1_codebook <- read_codebook("data1_codebook.csv",
##' column_names = list(variable_levels = "Factor.Levels",
##' variable_original = "Old.Variable",
##' min = "Min", max = "Max"))
##' print(data1_codebook)
##' @export
print.codebook <-
function(x, extra = FALSE, ...)
{
## check class of object -------------------------------------
if (class(x) != "codebook")
stop(paste0("Object '", deparse(substitute(x)),
"' not of class 'codebook'"))
cat("Codebook:", deparse(substitute(x)), "\n\n")
if (!is.null(x$file_info)){
file_info <- x$file_info
cat("Codebook read from file:", file_info$codebook_filename,
"\nRead at:", file_info$codebook_read_time, "\nColumn names:\n")
print(file_info$column_names)
}
if (extra & !is.null(x$renamed_variables)){
cat("Renamed Variables:\n")
print(x$renamed_variables)
}
if(!is.null(x$variable_labels)){
cat("\nVariable Labels:\n")
print(x$variable_labels)
}
if(!is.null(x$factor_levels)){
cat("\nFactor Levels:\n")
print(x$factor_levels)
}
if(!is.null(x$limits_continuous)){
cat("\nLimits for Continuous Variables:\n")
print(x$limits_continuous)
}
if (extra & !is.null(x$data_management_plan)){
cat("\nData Management Plan details:\n")
print(x$data_management_plan, ...)
}
}
## data1_codebook <- read_codebook("../inst/demoFiles/data1_codebook.csv",
## column_names = list(variable_levels = "Factor.Levels",
## variable_original = "Old.Variable",
## min = "Min", max = "Max"))
## x <- data1_codebook
## x
## print(x, extra = TRUE)
|
96dd7f865992d352ac8c3ca674d44847e8075e31 | f227ab831d162ce779731b7ad5b0bf39e951b4c6 | /cachematrix.R | 3c0a85b75fd13c402e50e0a11b2d4e4859aa1fad | [] | no_license | lyudmil/cache-matrix-inverse | 172aa56abb823758f98430e1b961e05f087777c5 | 2d975e7a71cc6a677b4e6028737e6f63d69fe790 | refs/heads/master | 2020-12-25T10:41:53.219608 | 2015-02-19T10:39:11 | 2015-02-19T10:39:11 | 30,979,475 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,709 | r | cachematrix.R | ## These functions provide a more performant way to invert matrices by
## caching previously computed values.
## makeCacheMatrix turns a regular matrix into an object with an inverse() method
## The inverse() method on the returned object will only calculate the inverse the
## first time and cache the value. Any subsequent calls to inverse() will return
## the cached value without redoing the calculation.
## Parameters:
## (1) matrixData: The matrix object to turn into a caching matrix.
## (2) invertMatrix: The function that calculates the inverse.
## This is only used in unit tests in order to test the caching.
## Most callees should use the default, which is solve.
makeCacheMatrix <- function(matrixData = matrix(), invertMatrix = solve) {
matrixInverse <- NULL
# Return the initial data, which is just a matrix object.
asMatrix <- function() { matrixData }
# Check if we've already calculated the inverse.
# Only do the calculation if we haven't. Otherwise return the cached inverse.
inverse <- function() {
if(!is.null(matrixInverse)) return(matrixInverse)
matrixInverse <<- invertMatrix(matrixData)
}
list(asMatrix = asMatrix, inverse = inverse)
}
## cacheSolve inverts a matrix, caching its value.
## Since it expects to invert a caching matrix, it just delegates to its inverse() method.
## Parameters:
## (1) cachingMatrix: The matrix to inverse.
## This should be an object with an inverse() function that returns the matrix inverse.
## See makeCacheMatrix to find out how to easily construct such an object.
cacheSolve <- function(cachingMatrix) {
cachingMatrix$inverse()
}
|
d6fb15620530f112e6b820490b7d0751ca4d1d3c | 5830b1560094b3fb09f7bd3ffb4dbc1a3834210a | /reporting/ResourceConsumptionReport.r | 47dfa1b0edcf7e38109c6a79ff2820aff5c96bfb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | elkhan-shahverdi/streaming-benchmarks | 66ec62265f23d35f334c65c98e962d45870e40f1 | 9ff9f1854f9bc41a16e6c92c47226e7a32372a6c | refs/heads/master | 2021-07-21T17:25:35.102969 | 2020-04-22T18:12:45 | 2020-04-22T18:12:45 | 142,678,511 | 9 | 4 | null | null | null | null | UTF-8 | R | false | false | 13,041 | r | ResourceConsumptionReport.r | ######################################################################################################################################
########################## Benchmark Resource Consumption ##########################
######################################################################################################################################
generateResourceConsumptionReport <- function(engines, tps, duration, tps_count){
for(i in 1:tps_count) {
memoryUsage= NULL
cpuUsage= NULL
kafkaMemoryUsage= NULL
kafkaCpuUsage= NULL
for(eng in 1:length(engines)){
engine = engines[eng]
TPS = toString(tps*i)
reportFolder = paste("/Users/sahverdiyev/Desktop/EDU/THESIS/stream-benchmarking/result/", sep = "")
sourceFolder = paste("/Users/sahverdiyev/Desktop/EDU/THESIS/stream-benchmarking/result/", engine, "/TPS_", TPS,"_DURATION_",toString(duration),"/", sep = "")
#Get the stream servers cpu and memory consumption statistics
for(x in 1:10) {
streamCpu = read.table(paste(sourceFolder, "stream-node-0", x,".cpu",sep=""),header=F,stringsAsFactors=F,sep=',')
streamMem = read.table(paste(sourceFolder, "stream-node-0", x,".mem",sep=""),header=F,stringsAsFactors=F,sep=',')
SecondsCpu <- 1:length(streamCpu$V1)
SecondsMem <- 1:length(streamMem$V1)
dfCpu <- data.frame(engine, paste("Node " , x, sep=""), as.numeric(trim(substr(streamCpu$V1, 9, 14))), SecondsCpu)
dfMemory <- data.frame(engine, paste("Node " , x, sep=""), as.numeric(trim(substr(streamMem$V3, 2, 10)))*100/as.numeric(trim(substr(streamMem$V1, 11, 19))), SecondsMem)
names(dfCpu) <- c("ENGINE","NODE","USAGE", "TIME")
names(dfMemory) <- c("ENGINE","NODE","USAGE", "TIME")
cpuUsage <- rbind(cpuUsage, dfCpu)
memoryUsage <- rbind(memoryUsage, dfMemory)
}
#Get the kafka servers cpu and memory consumption statistics
for(x in 1:5) {
kafkaCpu = read.table(paste(sourceFolder, "kafka-node-0", x,".cpu",sep=""),header=F,stringsAsFactors=F,sep=',')
kafkaMem = read.table(paste(sourceFolder, "kafka-node-0", x,".mem",sep=""),header=F,stringsAsFactors=F,sep=',')
SecondsCpu <- 1:length(kafkaCpu$V1)
SecondsMem <- 1:length(kafkaMem$V1)
dfCpu <- data.frame(engine, paste("Node " , x, sep=""), as.numeric(trim(substr(kafkaCpu$V1, 9, 14))), SecondsCpu)
dfMemory <- data.frame(engine, paste("Node " , x, sep=""), as.numeric(trim(substr(kafkaMem$V3, 2, 10)))*100/as.numeric(trim(substr(kafkaMem$V1, 11, 19))), SecondsMem)
names(dfCpu) <- c("ENGINE","NODE","USAGE", "TIME")
names(dfMemory) <- c("ENGINE","NODE","USAGE", "TIME")
kafkaCpuUsage <- rbind(kafkaCpuUsage, dfCpu)
kafkaMemoryUsage <- rbind(kafkaMemoryUsage, dfMemory)
}
}
names(cpuUsage) <- c("ENGINE","NODE","USAGE", "TIME")
names(memoryUsage) <- c("ENGINE", "NODE","USAGE","TIME")
cpuUsage <- cpuUsage %>% group_by(ENGINE, TIME) %>% summarise(USAGE=mean(USAGE))
memoryUsage <- memoryUsage %>% group_by(ENGINE, TIME) %>% summarise(USAGE=mean(USAGE))
names(kafkaCpuUsage) <- c("ENGINE","NODE","USAGE", "TIME")
names(kafkaMemoryUsage) <- c("ENGINE", "NODE","USAGE","TIME")
kafkaCpuUsage <- kafkaCpuUsage %>% group_by(ENGINE, TIME) %>% summarise(USAGE=mean(USAGE))
kafkaMemoryUsage <- kafkaMemoryUsage %>% group_by(ENGINE, TIME) %>% summarise(USAGE=mean(USAGE))
p1 <- ggplot(data=cpuUsage, aes(x=TIME, y=USAGE, group=ENGINE, colour=ENGINE)) +
scale_y_continuous(breaks= pretty_breaks()) +
geom_smooth(method="loess", se=F, size=0.5) +
guides(fill=FALSE) +
labs(x="Time (seconds)", y="CPU load percentage") +
#subtitle=paste("Stream Servers CPU load with", toString(tps*i*10), "TPS")) +
theme(plot.title = element_text(size = 8, face = "plain"),
plot.subtitle = element_text(size = 7, face = "plain"),
text = element_text(size = 6, face = "plain"),
legend.position="none")
p2 <- ggplot(data=memoryUsage, aes(x=TIME, y=USAGE, group=ENGINE, colour=ENGINE)) +
geom_smooth(method="loess", se=F, size=0.5) +
scale_y_continuous(breaks= pretty_breaks()) +
guides(fill=FALSE) +
labs(x="Time (seconds)", y="Memory load percentage") +
#subtitle=paste("Stream Servers Memory load with", toString(tps*i*10), "TPS")) +
theme(plot.title = element_text(size = 8, face = "plain"),
plot.subtitle = element_text(size = 7, face = "plain"),
text = element_text(size = 6, face = "plain"),
legend.position="none")
p3 <- ggplot(data=kafkaCpuUsage, aes(x=TIME, y=USAGE, group=ENGINE, colour=ENGINE)) +
scale_y_continuous(breaks= pretty_breaks()) +
geom_smooth(method="loess", se=F, size=0.5) +
guides(fill=FALSE, size=1) +
labs(x="Time (seconds)", y="CPU load percentage") +
#subtitle=paste("Kafka Servers CPU load with", toString(tps*i*10), "TPS")) +
theme(plot.title = element_text(size = 8, face = "plain"),
plot.subtitle = element_text(size = 7, face = "plain"),
text = element_text(size = 6, face = "plain"),
legend.position="none")
p4 <- ggplot(data=kafkaMemoryUsage, aes(x=TIME, y=USAGE, group=ENGINE, colour=ENGINE)) +
geom_smooth(method="loess", se=F, size=0.5) +
scale_y_continuous(breaks= pretty_breaks()) +
guides(fill=FALSE, size=1) +
labs(x="Time (seconds)", y="Memory load percentage") +
#subtitle=paste("Kafka Servers Memory load with", toString(tps*i*10), "TPS")) +
theme(plot.title = element_text(size = 8, face = "plain"),
plot.subtitle = element_text(size = 7, face = "plain"),
text = element_text(size = 6, face = "plain"),
legend.justification = c(1, 0),
legend.background = element_rect(fill=alpha('white', 0.4)),
legend.position = c(1, 0),
legend.key.height=unit(0.7,"line"),
legend.key.width=unit(0.5,"line"),
legend.box.margin=margin(c(3,3,3,3)),
legend.text=element_text(size=rel(1.0)))
pdf(paste(reportFolder, "TPS_",TPS,"_RESOURCE_LOAD", ".pdf", sep = ""), width = 6, height = 6)
multiplot(p1, p2, p3, p4, cols = 2)
dev.off()
}
}
generateResourceConsumptionReportByTps <- function(engine, tps, duration, tps_count){
memoryUsage= NULL
cpuUsage= NULL
kafkaMemoryUsage= NULL
kafkaCpuUsage= NULL
for(i in 1:tps_count) {
TPS = toString(tps*i)
reportFolder = paste("/Users/sahverdiyev/Desktop/EDU/THESIS/stream-benchmarking/result/", engine, "/", sep = "")
sourceFolder = paste("/Users/sahverdiyev/Desktop/EDU/THESIS/stream-benchmarking/result/", engine, "/TPS_", TPS,"_DURATION_",toString(duration),"/", sep = "")
#Get the stream servers cpu and memory consumption statistics
for(x in 1:10) {
streamCpu = read.table(paste(sourceFolder, "stream-node-0", x,".cpu",sep=""),header=F,stringsAsFactors=F,sep=',')
streamMem = read.table(paste(sourceFolder, "stream-node-0", x,".mem",sep=""),header=F,stringsAsFactors=F,sep=',')
SecondsCpu <- 1:length(streamCpu$V1)
SecondsMem <- 1:length(streamMem$V1)
dfCpu <- data.frame(engine, TPS, as.numeric(trim(substr(streamCpu$V1, 9, 14))), SecondsCpu)
dfMemory <- data.frame(engine, TPS, as.numeric(trim(substr(streamMem$V3, 2, 10)))*100/as.numeric(trim(substr(streamMem$V1, 11, 19))), SecondsMem)
names(dfCpu) <- c("ENGINE","TPS","USAGE", "TIME")
names(dfMemory) <- c("ENGINE","TPS","USAGE", "TIME")
cpuUsage <- rbind(cpuUsage, dfCpu)
memoryUsage <- rbind(memoryUsage, dfMemory)
}
#Get the kafka servers cpu and memory consumption statistics
for(x in 1:5) {
kafkaCpu = read.table(paste(sourceFolder, "kafka-node-0", x,".cpu",sep=""),header=F,stringsAsFactors=F,sep=',')
kafkaMem = read.table(paste(sourceFolder, "kafka-node-0", x,".mem",sep=""),header=F,stringsAsFactors=F,sep=',')
SecondsCpu <- 1:length(kafkaCpu$V1)
SecondsMem <- 1:length(kafkaMem$V1)
dfCpu <- data.frame(engine, TPS, as.numeric(trim(substr(kafkaCpu$V1, 9, 14))), SecondsCpu)
dfMemory <- data.frame(engine, TPS, as.numeric(trim(substr(kafkaMem$V3, 2, 10)))*100/as.numeric(trim(substr(kafkaMem$V1, 11, 19))), SecondsMem)
names(dfCpu) <- c("ENGINE","TPS","USAGE", "TIME")
names(dfMemory) <- c("ENGINE","TPS","USAGE", "TIME")
kafkaCpuUsage <- rbind(kafkaCpuUsage, dfCpu)
kafkaMemoryUsage <- rbind(kafkaMemoryUsage, dfMemory)
}
}
names(cpuUsage) <- c("ENGINE","TPS","USAGE", "TIME")
names(memoryUsage) <- c("ENGINE", "TPS","USAGE","TIME")
cpuUsage <- cpuUsage %>% group_by(TPS, TIME) %>% summarise(USAGE=mean(USAGE))
memoryUsage <- memoryUsage %>% group_by(TPS, TIME) %>% summarise(USAGE=mean(USAGE))
names(kafkaCpuUsage) <- c("ENGINE","TPS","USAGE", "TIME")
names(kafkaMemoryUsage) <- c("ENGINE", "TPS","USAGE","TIME")
kafkaCpuUsage <- kafkaCpuUsage %>% group_by(TPS, TIME) %>% summarise(USAGE=mean(USAGE))
kafkaMemoryUsage <- kafkaMemoryUsage %>% group_by(TPS, TIME) %>% summarise(USAGE=mean(USAGE))
p1 <- ggplot(data=cpuUsage, aes(x=TIME, y=USAGE, group=TPS, colour=TPS)) +
scale_y_continuous(breaks= pretty_breaks()) +
geom_smooth(method="loess", se=F, size=0.5) +
guides(fill=FALSE) +
labs(x="Time (seconds)", y="CPU load percentage") +
#subtitle=paste("Stream Servers CPU load with", toString(tps*i*10), "TPS")) +
theme(plot.title = element_text(size = 8, face = "plain"),
plot.subtitle = element_text(size = 7, face = "plain"),
text = element_text(size = 6, face = "plain"),
legend.justification = c(1, 0),
legend.position = c(1, 0),
legend.key.height=unit(0.5,"line"),
legend.key.width=unit(0.5,"line"),
legend.box.margin=margin(c(3,3,3,3)),
legend.text=element_text(size=rel(0.5)))
p2 <- ggplot(data=memoryUsage, aes(x=TIME, y=USAGE, group=TPS, colour=TPS)) +
geom_smooth(method="loess", se=F, size=0.5) +
scale_y_continuous(breaks= pretty_breaks()) +
guides(fill=FALSE) +
labs(x="Time (seconds)", y="Memory load percentage") +
#subtitle=paste("Stream Servers Memory load with", toString(tps*i*10), "TPS")) +
theme(plot.title = element_text(size = 8, face = "plain"),
plot.subtitle = element_text(size = 7, face = "plain"),
text = element_text(size = 6, face = "plain"),
legend.justification = c(1, 0),
legend.position = c(1, 0),
legend.key.height=unit(0.5,"line"),
legend.key.width=unit(0.5,"line"),
legend.box.margin=margin(c(3,3,3,3)),
legend.text=element_text(size=rel(0.5)))
p3 <- ggplot(data=kafkaCpuUsage, aes(x=TIME, y=USAGE, group=TPS, colour=TPS)) +
scale_y_continuous(breaks= pretty_breaks()) +
geom_smooth(method="loess", se=F, size=0.5) +
guides(fill=FALSE, size=1) +
labs(x="Time (seconds)", y="CPU load percentage") +
#subtitle=paste("Kafka Servers CPU load with", toString(tps*i*10), "TPS")) +
theme(plot.title = element_text(size = 8, face = "plain"),
plot.subtitle = element_text(size = 7, face = "plain"),
text = element_text(size = 6, face = "plain"),
legend.justification = c(1, 0),
legend.position = c(1, 0),
legend.key.height=unit(0.5,"line"),
legend.key.width=unit(0.5,"line"),
legend.box.margin=margin(c(3,3,3,3)),
legend.text=element_text(size=rel(0.5)))
p4 <- ggplot(data=kafkaMemoryUsage, aes(x=TIME, y=USAGE, group=TPS, colour=TPS)) +
geom_smooth(method="loess", se=F, size=0.5) +
scale_y_continuous(breaks= pretty_breaks()) +
guides(fill=FALSE, size=1) +
labs(x="Time (seconds)", y="Memory load percentage") +
#subtitle=paste("Kafka Servers Memory load with", toString(tps*i*10), "TPS")) +
theme(plot.title = element_text(size = 8, face = "plain"),
plot.subtitle = element_text(size = 7, face = "plain"),
text = element_text(size = 6, face = "plain"),
legend.justification = c(1, 0),
legend.position = c(1, 0),
legend.key.height=unit(0.5,"line"),
legend.key.width=unit(0.5,"line"),
legend.box.margin=margin(c(3,3,3,3)),
legend.text=element_text(size=rel(0.5)))
pdf(paste(reportFolder, "TPS_",TPS,"_RESOURCE_LOAD", ".pdf", sep = ""), width = 6, height = 6)
multiplot(p1, p2, p3, p4, cols = 2)
dev.off()
}
|
bd03f64c997c9401027f2f62d040b7fe83482afb | d311812591389299ab2a0d24b52b392c32efe887 | /R/get_effect_variances.R | f9a9243bac0e4a91f620dda969f6b3c9f93b9b10 | [] | no_license | bestwpw/margins | e6421f7910977bfb8e2083c769c4202164feca6f | 427f0b20bf79f2bb12bdaba35689688dbdd992cf | refs/heads/master | 2017-11-30T21:23:40.074525 | 2016-08-18T10:04:00 | 2016-08-18T10:04:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,062 | r | get_effect_variances.R | get_effect_variances <-
function(data = data,
model = model,
which = all.vars(model[["terms"]])[-1], # which mes do we need variances of
type = c("response", "link", "terms"),
vcov = vcov(model),
vce = c("delta", "simulation", "bootstrap"),
iterations = 50L, # if vce == "bootstrap" or "simulation"
method = c("simple", "Richardson", "complex"), # passed to marginal_effects()
...) {
# march.arg() for arguments
type <- match.arg(type)
method <- match.arg(method)
vce <- match.arg(vce)
if (is.function(vcov)) {
vcov <- vcov(model)
}
if (vce == "delta") {
# default method
variances <- delta_once(data = data, model = model, type = type, vcov = vcov, method = method)
} else if (vce == "simulation") {
# copy model for quick use in estimation
tmpmodel <- model
tmpmodel$model <- NULL # remove data from model for memory
# simulate from multivariate normal
coefmat <- MASS::mvrnorm(iterations, coef(model), vcov)
# estimate AME from from each simulated coefficient vector
effectmat <- apply(coefmat, 1, function(coefrow) {
tmpmodel[["coefficients"]] <- coefrow
colMeans(marginal_effects(data, model = tmpmodel, type = type, method = method))
})
# calculate the variance of the simulated AMEs
variances <- apply(effectmat, 1, var, na.rm = TRUE)
} else if (vce == "bootstrap") {
# function to calculate AME for one bootstrap subsample
bootfun <- function() {
s <- sample(seq_len(nrow(data)), nrow(data), TRUE)
colMeans(marginal_effects(model = model, data = data[s,], type = type, method = method), na.rm = TRUE)
}
# bootstrap the data and take the variance of bootstrapped AMEs
variances <- apply(replicate(iterations, bootfun()), 1, var, na.rm = TRUE)
}
return(variances)
}
|
f18b73b405bd048ccd626b66a361170f4a54bb37 | 589b566fc6d258a5c342cfc76782371b1681e4f3 | /elitism.R | 12072af780b72edeb399338479c50c507a44e552 | [] | no_license | Fozefy/GeneticAlgorithm | c36061e7d098649ee480d25f37c145ef07f331ab | db2f3ce9ad58a0ef5aa65a51a8c74afa5a5d0609 | refs/heads/master | 2016-09-06T07:06:36.741214 | 2016-03-08T21:31:22 | 2016-03-08T21:31:22 | 12,627,009 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,770 | r | elitism.R | #### Elitism
select.elite.population.fgen <- function(elite.fn, elite.size, maximizing, ...){
function(pop)
elite.fn(pop = pop, elite.size = elite.size, maximizing = maximizing, ...)
}
#note: elite.selection only works with integer or floating point fitness ordered by <= or >=
elite.selection <- function(pop, elite.size = 1, maximizing = TRUE, pop.fit = NULL, verbose = FALSE){
#Sort the organisms to put the 'best' at the top of our list
sortedOrganisms = pop@organisms$values
class(sortedOrganisms) <- "organismList"
sortedOrganisms = sort(sortedOrganisms, decreasing = maximizing)
#Fill our elite list with the top fitness values
eliteList = c(sortedOrganisms[[1]])
if (elite.size > 1)
{
elitesFilled = 1
for(i in 2:length(sortedOrganisms))
{
#Check if we already have this elite
foundMatch = FALSE
for (j in 1:elitesFilled)
{
if (identical(eliteList[[j]]@chromosome$genes, sortedOrganisms[[i]]@chromosome$genes))
{
foundMatch = TRUE
break
}
}
if (!foundMatch)
{
elitesFilled = elitesFilled + 1
eliteList[[elitesFilled]] = sortedOrganisms[[i]]
}
if (elitesFilled == elite.size) break
else if (i == length(sortedOrganisms) && verbose == TRUE) print(cat("Too many duplicates in population to generate",elite.size, "elites, Returning: ",elitesFilled))
}
}
return(eliteList)
}
#Used for sortation
`[.organismList` <- function(x, i) {
class(x) <- "list"
structure(x[i], class="organismList")
}
`>.organismList` <- function(e1, e2) {
e1[[1]]@fitness$value > e2[[1]]@fitness$value
}
`==.organismList` <- function(e1, e2) {
e1[[1]]@fitness$value == e2[[1]]@fitness$value
} |
f34b23d1d24cd45c6bc6edba64b5782827cadc6c | 2a3f50853c6e3b404329e98076428f60629cca75 | /R/Scripts/KddPaper062014/SwitchingAREventInference.r | 8f8751b54e5793066775d4c00ac520ddc215b3cf | [] | no_license | wmchad/NetworkResearch | dc9d8f210325e58fa27c2bf52ad9c4dc4a917d0b | 9826d2442f291238c4d89bdca4046e93ee05b77e | refs/heads/master | 2021-01-17T06:34:14.150982 | 2014-09-08T19:15:36 | 2014-09-08T19:15:36 | 18,338,256 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,777 | r | SwitchingAREventInference.r | require("bcp");
setwd("c:/Code/NetworkResearch/R/Analysis/");
source("SwitchingAR.r");
setwd("c:/Code/NetworkResearch/R/Functions/Data/");
source("TowerFunctions.r");
GetTimeSeries <- function(data, day, interval) {
y <- NULL;
y$day <- data$TotalDiffCalls[data$Day == day];
y$interval <- data$TotalDiffCalls[data$Day >= day-interval &
data$Day <= day+interval];
y$month <- data$TotalDiffCalls;
y;
}
## Get data for the events
bombing1Calls <- GetTowerAggregatedCalls( 790, 11, 12 );
bombing2Calls <- GetTowerAggregatedCalls( 915, 11, 12 );
inaug1Calls <- GetTowerAggregatedCalls( 183, 11, 12 );
inaug2Calls <- GetTowerAggregatedCalls( 261, 11, 7 );
## Get various datasets:
bombing1.y <- GetTimeSeries( bombing1Calls, 6, 5 );
bombing2.y <- GetTimeSeries( bombing2Calls, 7, 5 );
inaug1.y <- GetTimeSeries( inaug1Calls, 15, 5 );
inaug2.y <- GetTimeSeries( inaug2Calls, 23, 5 );
events.y <- NULL;
events.y$day <- c(inaug1.y$day, bombing1.y$day, inaug2.y$day, bombing2.y$day);
events.y$day <- events.y$day/max(events.y$day);
events.y$interval <- c(inaug1.y$interval, bombing1.y$interval,
inaug2.y$interval, bombing2.y$interval);
events.y$interval <- events.y$interval/max(events.y$interval);
events.y$month <- c(inaug1.y$month, bombing1.y$month,
inaug2.y$month, bombing2.y$month);
events.y$month <- events.y$month/max(events.y$month);
## Analysis - concatenated events
samplerResults <- NULL;
samplerResults$events <- NULL;
samplerResults$bombing <- NULL;
nBurn <- 2000;
nSamp <- 2000;
verbose <- TRUE;
nAnnounce <- 10;
setwd("c:/Code/NetworkResearch/R/Scripts/KddPaper062014/Results");
K <- 3;
hypers <- list(alpha=1/K, a=.1, b=.001, sigma2mu=1, sigma2a=1);
Rprof();
samplerResults.bombing.day.3s <- SwitchingARSampler( hypers, K, bombing1.y$day,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
summaryRprof();
Rprof(NULL);
save(samplerResults.bombing.day.3s, file="results.bombing.day.3s.rdata");
samplerResults.bombing.interval.3s <- SwitchingARSampler( hypers, K, bombing1.y$interval,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.bombing.interval.3s, file="results.bombing.interval.3s.rdata");
samplerResults.events.day.3s <- SwitchingARSampler( hypers, K, events.y$day,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.events.day.3s, file="results.events.day.3s.rdata");
samplerResults.events.interval.3s <- SwitchingARSampler( hypers, K, events.y$interval,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.events.interval.3s, file="results.events.interval.3s.rdata");
K <- 5;
hypers <- list(alpha=1/K, a=.1, b=.001, sigma2mu=1, sigma2a=1);
samplerResults.bombing.day.5s <- SwitchingARSampler( hypers, K, bombing1.y$day,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.bombing.day.5s, file="results.bombing.day.5s.rdata");
samplerResults.bombing.interval.5s <- SwitchingARSampler( hypers, K, bombing1.y$interval,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.bombing.interval.5s, file="results.bombing.interval.5s.rdata");
samplerResults.events.day.5s <- SwitchingARSampler( hypers, K, events.y$day,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.events.day.5s, file="results.events.day.5s.rdata");
samplerResults.events.interval.5s <- SwitchingARSampler( hypers, K, events.y$interval,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.events.interval.5s, file="results.events.interval.5s.rdata");
K <- 10;
hypers <- list(alpha=1/K, a=.1, b=.001, sigma2mu=1, sigma2a=1);
samplerResults.bombing.day.10s <- SwitchingARSampler( hypers, K, bombing1.y$day,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.bombing.day.10s, file="results.bombing.day.10s.rdata");
samplerResults.bombing.interval.10s <- SwitchingARSampler( hypers, K, bombing1.y$interval,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.bombing.interval.10s, file="results.bombing.interval.10s.rdata");
samplerResults.events.day.10s <- SwitchingARSampler( hypers, K, events.y$day,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.events.day.10s, file="results.events.day.10s.rdata");
samplerResults$events$interval.10s <- SwitchingARSampler( hypers, K, events.y$interval,
nBurn, nSamp, verbose=verbose,
nAnnounce=nAnnounce );
save(samplerResults.events.interval.10s, file="results.events.interval.10s.rdata");
## Changepoint Detection
bcpResults.events.day <- bcp(events.y$day, w0=1, p0=.0005,
return.mcmc=TRUE, burnin=500, mcmc=5000);
bcpstates <- (cumsum(bcpResults.events.day$mcmc.rhos[,2105]) /
sum(bcpResults.events.day$mcmc.rhos[,2105])) - 1;
bcpnames="1";
df.bcp <- data.frame(Time=1:length(events.y$day), Volume=events.y$day,
States=bcpstates,
Prob=bcpResults.events.day$posterior.prob/2 - 1);
setwd("C:/Data/NetworkResearch/Results");
save(bcpResults.events.day, file="bcpResults.events.day.rdata");
ggsave("ChangepointResults.png", height=6, width=10,
plot=ggplot(df.bcp, aes(Time, Volume)) + geom_line() +
geom_line(aes(y=States), color="blue") +
geom_line(aes(y=Prob), color="darkgreen"));
df.events.day <- data.frame(time=c(1:288, (1:288) + 320, (1:288) + 640, (1:288) + 960),
event=c(rep("inaug1", 288), rep("bombing1", 288),
rep("inaug2", 288), rep("bombing2", 288)),
volume=events.y$day / max(events.y$day),
cpstate=bcpstates - .2,
cpnames=bcpnames);
eventStarts <- c(0, 90, 100, 461, 466, 807, 816, 1093, 1095);
eventEnds <- c(90, 100, 320+11*12+9, 466, 807, 816, 1093, 1095, 1250)
eventStarts <- c(0, 91, 118, 462, 466, 815, 854, 1095, 1100);
eventEnds <- c(91, 118, 462, 466, 815, 854, 1095, 1100, 1250)
cpSummaryPlot <- ggplot() +
annotate("rect", xmin=eventStarts, xmax=eventEnds, ymin=-Inf, ymax=-.2,
fill=c("green", "#FDDC00", "green", "red", "green",
"blue", "green", "purple", "green"), alpha=0.4) +
geom_line(data=df.events.day, aes(x=time, y=volume, group=event), color="blue") +
geom_step(data=df.events.day, aes(x=time, y=cpstate, group=event)) +
theme(axis.title=element_blank(), axis.text=element_blank(),
axis.ticks=element_blank()) +
scale_x_continuous(breaks=c(300, 620, 940), minor_breaks=NULL, limits=c(0, 1250)) +
scale_y_continuous(breaks=NULL)
setwd("C:/Data/NetworkResearch/Results");
setwd("c:/Code/NetworkResearch/R/Scripts/KddPaper062014/Plots");
ggsave("ChangepointSummaryPlot2.png", height=5, width=10, plot=cpSummaryPlot);
plot(events.y$day, type="l", ylim=c(-1, 1))
lines(bcpstates, type="s", col="blue");
lines(bcpResults.events.day$posterior.prob/2 - 1, col="green");
plot(cumsum(bcpResults.events.day$mcmc.rhos[,51]), type="s")
lines(cumsum(bcpResults.events.day$mcmc.rhos[,52]), type="s")
lines(cumsum(bcpResults.events.day$mcmc.rhos[,550]), type="s", lty=2)
## State plots
setwd("c:/Code/NetworkResearch/R/Scripts/KddPaper062014/Results");
load("results.events.day.10s.long.rdata");
bestSample.events.day.long.10s <- samplerResults.events.day.long.10s$bestSample;
tempz <- bestSample.events.day.long.10s$z;
temptable <- table(tempz);
newOrder <- order(table(tempz), decreasing=TRUE);
for ( i in 1:length(newOrder) ) {
tempz[tempz==as.numeric(names(temptable)[newOrder[i]])] <- 100 + i;
}
tempz <- tempz - 100;
counts <- rep(0, length(samplerResults.events.day.long.10s$samples[[1]]$z)-1);
for ( sample in samplerResults.events.day.long.10s$samples ) {
counts <- counts + as.numeric(sample$z[-1]!=sample$z[-length(sample$z)]);
}
counts <- c(0, counts);
counts <- counts / 2000;
df.events.day.long.10s <- data.frame(time=c(1:288, (1:288) + 320, (1:288) + 640, (1:288) + 960),
event=c(rep("inaug1", 288), rep("bombing1", 288),
rep("inaug2", 288), rep("bombing2", 288)),
volume=events.y$day / max(events.y$day),
state=tempz/10-1);
eventStarts <- c(0, 91, 139, 463, 468, 808, 867, 1095, 1098);
eventEnds <- c(91, 139, 463, 468, 808, 867, 1095, 1098, 1250)
arHmmSummaryPlot <- ggplot() +
annotate("rect", xmin=eventStarts, xmax=eventEnds, ymin=-Inf, ymax=-.2,
fill=c("green", "#FDDC00", "green", "red", "green",
"#FDDC00", "green", "red", "green"), alpha=0.4) +
geom_line(data=df.events.day.long.10s, aes(x=time, y=volume, group=event), color="blue") +
geom_step(data=df.events.day.long.10s, aes(x=time, y=state, group=event)) +
theme(axis.title=element_blank(), axis.text=element_blank(),
axis.ticks=element_blank()) +
scale_x_continuous(breaks=c(300, 620, 940), minor_breaks=NULL, limits=c(0, 1250)) +
scale_y_continuous(breaks=NULL)
setwd("C:/Data/NetworkResearch/Results");
ggsave( "ArHmm10stateSummaryPlot.png", height=5, width=10,
plot=arHmmSummaryPlot);
## Month-long stuff
## K <- 3
## hypers <- list(alpha=1/K, a=.1, b=.001, sigma2mu=1, sigma2a=1);
## samplerResults.events.month.3s <- SwitchingARSampler( hypers, K, events.y$month,
## nBurn, nSamp, verbose=verbose,
## nAnnounce=nAnnounce );
## save(samplerResults.events.month.3s, file="results.events.month.3s.rdata");
##
## samplerResults.bombing.month.3s <- SwitchingARSampler( hypers, K, bombing1.y$month,
## nBurn, nSamp, verbose=verbose,
## nAnnounce=nAnnounce );
## save(samplerResults.bombing.month.3s, file="results.bombing.month.3s.rdata");
##
##
## K <- 5
## hypers <- list(alpha=1/K, a=.1, b=.001, sigma2mu=1, sigma2a=1);
## samplerResults.events.month.5s <- SwitchingARSampler( hypers, K, events.y$month,
## nBurn, nSamp, verbose=verbose,
## nAnnounce=nAnnounce );
## save(samplerResults.events.month.5s, file="results.events.month.5s.rdata");
##
## samplerResults.bombing.month.5s <- SwitchingARSampler( hypers, K, bombing1.y$month,
## nBurn, nSamp, verbose=verbose,
## nAnnounce=nAnnounce );
## save(samplerResults.bombing.month.5s, file="results.bombing.month.5s.rdata");
##
##
## K <- 10
## hypers <- list(alpha=1/K, a=.1, b=.001, sigma2mu=1, sigma2a=1);
## samplerResults$events$month.10s <- SwitchingARSampler( hypers, K, events.y$month,
## nBurn, nSamp, verbose=verbose,
## nAnnounce=nAnnounce );
## save(samplerResults.events.month.10s, file="results.events.month.10s.rdata");
##
## samplerResults.bombing.month.10s <- SwitchingARSampler( hypers, K, bombing1.y$month,
## nBurn, nSamp, verbose=verbose,
## nAnnounce=nAnnounce );
## save(samplerResults.bombing.month.10s, file="results.bombing.month.10s.rdata");
|
135f7fb0de594eb759469305275627ee2a0c1158 | da2676b6fd629acdcad0bf955b0643a27407d1ce | /descriptives/ui.R | a9600b09bf595f55f7e6390b0d577c247c6b2788 | [] | no_license | DataKind-SF/datadive_201503_techsoup-global | 728e5a0ee69e5b29d91af8021df7a44ecd3b0c34 | 966f94d27bd522638c11582a13baca2f9fd49b1f | refs/heads/master | 2016-09-10T21:16:06.661971 | 2015-04-07T04:56:01 | 2015-04-07T04:56:01 | 31,853,990 | 0 | 12 | null | 2015-04-24T19:14:45 | 2015-03-08T15:10:51 | R | UTF-8 | R | false | false | 1,657 | r | ui.R | library(shiny)
library(gplots)
theme_set(theme_bw(base_size = 24))
shinyUI(fixedPage(
titlePanel("TechSoup Scratch"),
div(class="row",
column(6,dateRangeInput("date_range",label = h4("Transactions between"),start=min(.ts.data$Transaction_Date),end=max(.ts.data$Transaction_Date)))),
tabsetPanel(
tabPanel("Descriptives",
div(class="row",
column(6,plotOutput("h_log_budget")),
column(6,plotOutput("h_log_liscenses"))
),
div(class="row",
column(6,plotOutput("h_n_items_per_org")),
column(6,plotOutput("h_log_n_items_per_vendor"))
),
div(class="row",
column(6,plotOutput("h_log_n_licenses_per_org")),
column(6,plotOutput("h_log_n_licenses_per_vendor"))
),
div(class="row",
column(6,plotOutput("h_log_n_orders_per_org")),
column(6,plotOutput("h_log_n_orders_per_vendor"))
),
div(class="row",
column(6,plotOutput("h_org_subtype_per_type"))
),
div(class="row",
column(6,plotOutput("gr_log_budget_value")),
column(6,plotOutput("gr_log_budget_revenue"))
),
div(class="row",
column(6,plotOutput("gr_n_org_per_budget"))
)
)
)))
|
2ee3a412cbb397240204ec4aab4fe647570fd38a | 054b849dd5ea11ad33471fb85ffeafbd9f2a57ec | /R/MI.R | b4a933f37333a5719ddac7a510041d9af3c444a5 | [] | no_license | kwlee58/class201402 | cc05b23d5fe0531541525b44d83a02fb0807cbd1 | 3723f190fb9b72d0efaa633c9fb31ae283dccd4d | refs/heads/master | 2021-01-10T08:01:47.160002 | 2016-11-22T22:57:47 | 2016-11-22T22:57:47 | 43,969,478 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,760 | r | MI.R | MI1402.2<-read.table("MI1402_2.txt",header=T,sep="")
music.score.2<-MI1402.2$Q_1+MI1402.2$Q_9+MI1402.2$Q_17+MI1402.2$Q_25+MI1402.2$Q_33+MI1402.2$Q_41+MI1402.2$Q_49
body.score.2<-MI1402.2$Q_2+MI1402.2$Q_10+MI1402.2$Q_18+MI1402.2$Q_26+MI1402.2$Q_34+MI1402.2$Q_42+MI1402.2$Q_50
logic.score.2<-MI1402.2$Q_3+MI1402.2$Q_11+MI1402.2$Q_19+MI1402.2$Q_27+MI1402.2$Q_35+MI1402.2$Q_43+MI1402.2$Q_51
spatial.score.2<-MI1402.2$Q_4+MI1402.2$Q_12+MI1402.2$Q_20+MI1402.2$Q_28+MI1402.2$Q_36+MI1402.2$Q_44+MI1402.2$Q_52
verbal.score.2<-MI1402.2$Q_5+MI1402.2$Q_13+MI1402.2$Q_21+MI1402.2$Q_29+MI1402.2$Q_37+MI1402.2$Q_45+MI1402.2$Q_53
people.score.2<-MI1402.2$Q_5+MI1402.2$Q_13+MI1402.2$Q_21+MI1402.2$Q_29+MI1402.2$Q_37+MI1402.2$Q_45+MI1402.2$Q_53
people.score.2<-MI1402.2$Q_6+MI1402.2$Q_14+MI1402.2$Q_22+MI1402.2$Q_30+MI1402.2$Q_38+MI1402.2$Q_46+MI1402.2$Q_54
self.score.2<-MI1402.2$Q_7+MI1402.2$Q_15+MI1402.2$Q_23+MI1402.2$Q_31+MI1402.2$Q_39+MI1402.2$Q_47+MI1402.2$Q_55
nature.score.2<-MI1402.2$Q_8+MI1402.2$Q_16+MI1402.2$Q_24+MI1402.2$Q_32+MI1402.2$Q_40+MI1402.2$Q_48+MI1402.2$Q_56
MI.score.2<-data.frame(MI1402.2$ID, music.score.2,body.score.2,logic.score.2,spatial.score.2,verbal.score.2,people.score.2,self.score.2,nature.score.2)
#MI.names<-c("musical","bodily-kinesthetic","logical-nathematical","visual-spatial","verbal-lingusitic","interpersonal","intrapersonal","naturalistic")
dimnames(MI.score.2)[[2]]<-c("ID",MI.names)
MI.order.2<-apply(MI.score.2[,-1],1,order,decreasing=TRUE)
MI.sort.2<-matrix(MI.names.kr[MI.order],ncol=8,byrow=T,dimnames=list(MI.score.2$ID,1:8))
MI.sort.2[,1:3]
MI.sort.2.df<-data.frame(MI1402.2$ID,MI.sort.2,row.names=1:64)
dimnames(MI.sort.2.df)[[2]]<-c("ID",1:8)
MI.sort.2.full<-join(class.roll[,1:2],MI.sort.2.df,by="ID")
MI.sort.2.full[,1:5] |
c981cb40c1b047021926703479f7682c7198f2f7 | 8bde7be4bf935e9e38991f62dabe81542cc3139c | /flow/dimensionality_reduction/pca.R | d374512e1ae9235849e6a528349b2a6f7041ebaf | [] | no_license | burtonbiomedical/IIGeeks | a338437901198bb5ff29309fb9de2c7695309b91 | 5b8bcc39f27a140cc16d7991917df29d7e0b8093 | refs/heads/master | 2020-04-17T12:17:50.792062 | 2019-01-22T15:53:57 | 2019-01-22T15:53:57 | 166,574,679 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,089 | r | pca.R | ### PRINCIPLE COMPONENT ANALYSIS ###
require(stats)
require(ggplot2)
require(lattice)
require(dplyr)
#Read in data
data <- data.frame(read.csv("SEPSIS.csv", header=TRUE, sep=","))
sample <- sample_n(data, 100000)
channels <- c("CD57", "CD161", "CD3", "CCR7", "VA7.2", "CD8",
"Vdelta2", "CD45RA", "PanGD", "CD4", "CD27")
#Perform PCA with prcomp
pca <- prcomp(sample %>% select(channels), scale=TRUE)
#Calculate the amount of variation contributed from each component
pca_var <- pca$sdev^2
pca_var_per <- round(pca_var/sum(pca_var)*100, 2)
barplot(pca_var_per, main="Scree Plot",
xlab="Principle Component",
ylab="Percentage Variation")
plot(pca$x[,1], pca$x[,2])
#3D Plot with overlay
sample$PC1 <- pca$x[,1]
sample$PC2 <- pca$x[,2]
#plot_ly(data = pca_data, x=~X, y=~Y, z=~Z, type="scatter3d", color=~label) %>% add_markers()
ggplot(pca_data, aes(x=X, y=Y, color=label)) + geom_point() + xlab("PC1") + ylab("PC2")
ggplot(sample, aes(x=PC1, y=PC2)) +
geom_point(aes(color=cell_type,fill=CCR7),stroke=0.5, shape=21) +
xlab("PC1") + ylab("PC2")
|
76d376969232e81e72e35a709fad98e1e9742f4d | 4582eb19bfc245bbe20ffa305279fbb545e54e3b | /man/download_meta-deprecated.Rd | 387b14d89ea52fd8d61d6f4becb345cd22031f55 | [] | no_license | edwindj/cbsodataR | a9045d130d9138fc40000b284b7a64e39bdd5af4 | e66ceeccca5d62c03f54b44ed3b69d0feaacf7ef | refs/heads/master | 2021-07-07T18:42:04.153201 | 2021-05-31T22:00:39 | 2021-05-31T22:00:39 | 34,798,294 | 31 | 13 | null | 2022-09-23T14:49:12 | 2015-04-29T14:24:37 | R | UTF-8 | R | false | true | 1,201 | rd | download_meta-deprecated.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download-meta.R
\name{download_meta-deprecated}
\alias{download_meta-deprecated}
\alias{download_meta}
\title{Dumps the meta data into a directory}
\usage{
download_meta(
id,
dir = id,
...,
verbose = FALSE,
cache = FALSE,
base_url = getOption("cbsodataR.base_url", BASE_URL)
)
}
\arguments{
\item{id}{Id of CBS open data table (see \code{\link[=cbs_get_toc]{cbs_get_toc()}})}
\item{dir}{Directory in which data should be stored.
By default it creates a sub directory with the name of the id}
\item{...}{not used}
\item{verbose}{Print extra messages what is happening.}
\item{cache}{Should meta data be cached?}
\item{base_url}{optionally allow to specify a different server. Useful for
third party data services implementing the same protocol.}
}
\value{
meta data object
}
\description{
This method is deprecated in favor of \code{\link[=cbs_download_meta]{cbs_download_meta()}}.
}
\seealso{
Other meta data:
\code{\link{cbs_add_date_column}()},
\code{\link{cbs_add_label_columns}()},
\code{\link{cbs_get_meta}()}
Other download:
\code{\link{cbs_download_data}()},
\code{\link{cbs_download_table}()}
}
|
7817caf9ba6f323ccd0b363e3641aedbe3cdd82d | 8a12f6663116a7448b3c40ae39597bdd99766f84 | /R/traits.R | f44bd579b9698b2be36cb7a9aad1db5f8f46f621 | [] | no_license | patrickCNMartin/lighteR | 50f1751c46a8cecaaa328e278ee6a3d2074488aa | 78211ffa96f64dd0e8b84aae2e53c03804aa393a | refs/heads/master | 2023-02-22T16:26:57.960988 | 2021-01-27T12:52:55 | 2021-01-27T12:52:55 | 263,102,364 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,214 | r | traits.R | ################################################################################
############################ NPQ analysis ######################################
################################################################################
################################################################################
################################################################################
### Selecting Parameters
################################################################################
################################################################################
#' Extract traits from a seed object - simple trait and model traits
#'
#' @param seed a seed object
#' @param measure light measure that traits should be extracted for
#' @param cores numer of cores used for analysis
#' @return return a seed object with extract traits
getTraits <- function(seed,measure = c("NPQ","XE","EF","OE"),cores=1){
# Extracting time points
if(length(seed@meta.param@timePoints)>0){
time <- seed@meta.param@timePoints
} else {
message("No Time Points have been set - using default 40 - 80")
time <- c(40,80)
}
## check for models
models <- sum(unlist(.slotApply(seed@models,length))) == 0
if(models){
## Just checking what I will use to extract param
## No need to extract param from measure if some disk have already been filtered
is.retain.empty<- sum(unlist(.slotApply(seed@retain,length))) == 0
if(is.retain.empty){
measures <- seed@measures
} else {
measures <- seed@retain
}
##
param <- .slotExtractParam(measures,.extractParam,time)
seed@traits <- .slotAssign(seed@traits,param)
} else {
is.retained.models.empty <-sum(unlist(.slotApply(seed@retained.models,length))) == 0
if(is.retained.models.empty){
models <- seed@models
#models <- slotUnlist(models)
#models <- slotAssign(seed@models,models)
} else {
models <- seed@retained.models
}
modelType <- seed@meta.param@models
is.origin.empty <-sum(unlist(.slotApply(seed@origin,length))) == 0
is.retain.empty<- sum(unlist(.slotApply(seed@retain,length))) == 0
is.trait.empty<- sum(unlist(.slotApply(seed@traits,length))) == 0
template <- vector("list", length(measure))
names(template)<-measure
for(i in seq_along(measure)){
tmp <- modelType[[measure[i]]]
if(any(grepl("No", tmp, ignore.case=TRUE))){
warning(paste(measure[i],"models have not been computed - skipping measure"))
next()
}
if(is.retain.empty){
plant <- slot(seed@measures,measure[i])
} else if(!is.retain.empty) {
plant <- slot(seed@retain, measure[i])
}
if(!is.origin.empty){
plant <- slot(seed@origin, measure[i])
}
if(is.trait.empty){
trait <- NULL
} else{
trait <- slot(seed@traits, measure[i])
}
mods <- slot(models,measure[i])
if(is.origin.empty){
template[[i]] <- .extractModels(mods,plant,trait,time,origin=FALSE,cores)
} else {
template[[i]] <- mcmapply(.extractModels,mods,plant,
MoreArgs = list(trait,time,origin=TRUE),mc.cores =cores)
}
}
if(!is.origin.empty){
## re-orient df
#browser()
template <- lapply(template, function(tmp){
if(!is.null(tmp)){
nmax <-range(unique(unlist(sapply(tmp, function(x)sapply(x,length)))))
tmp <- suppressWarnings(lapply(tmp,function(x){
do.call("rbind",matrix(x,ncol=length(x)))
}))
for(i in seq_along(tmp)){
if(ncol(tmp[[i]])== min(nmax)){
tmp[[i]]<- cbind(tmp[[i]],rep(NA,max(nmax)-min(nmax)))
}
}
tmp <- do.call("rbind", tmp)
return(as.data.frame(tmp))
}else{
return(data.frame())
}
})
} else {
for(i in seq_along(template)){
if(!is.null(template[[i]])){
template[[i]] <- as.data.frame(do.call("rbind",template[[i]]))
} else {
template[[i]] <- data.frame()
}
}
}
if(is.trait.empty){
seed@traits <- .slotAssign(seed@traits,template)
} else {
seed@traits <- .slotAddTraits(seed@traits,template)
}
}
return(seed)
}
.extractParam <- function(df,time,measure){
##
paramType <-c("startHighLight","endHighLight","minHighLight","maxHighLight",
"InductionTime","LinearRate","startLowLight","endLowLight","minLowLight",
"OverCompTime","RelaxationTime","startPlateau",
"endPlateau","stableLowLightTime")
param <- as.data.frame(matrix(0,ncol = length(paramType), nrow = nrow(df)))
colnames(param) <- paramType
## High Light param
if(any(colnames(df) %in% c("plot","pedigree","line","stem"))){
dftmp<- df[,!colnames(df) %in% c("diskID","plot","pedigree","line","stem")]
tags <-df[,colnames(df) %in% c("diskID","plot","pedigree","line","stem")]
dfh <- as.matrix(dftmp[,seq(1,time[1])])
dfl <- as.matrix(dftmp[seq(time[1]+1,time[2])])
## Cleaning up for debugging purpose
} else {
dftmp<- df[,!colnames(df) %in% c("diskID","Zone")]
tags<- df[,colnames(df) %in% c("diskID","Zone")]
dfh <- as.matrix(dftmp[,seq(1,time[1])])
dfl <- as.matrix(dftmp[seq(time[1]+1,time[2])])
}
param$startHighLight <- apply(dftmp,1,"[[",1)
param$endHighLight <- apply(dftmp,1,"[[",time[1])
param$minHighLight <- apply(dfh,1,min)
param$maxHighLight <- apply(dfh,1,max)
if(measure!="XE"){
param$InductionTime<-apply(dfh,1,function(x){return(which(x==max(x))[1])})
} else{
param$InductionTime<-apply(dfh,1,function(x){return(which(x==min(x))[1])})
}
param$LinearRate<-apply(dfh,1,.rate)
param$minLowLight<-apply(dfl,1,min)
param$startLowLight<-apply(dftmp,1,"[[",time[1]+1)
param$endLowLight<-apply(dftmp,1,"[[",time[2])
param$OverCompTime<-apply(dfl,1,.findDip)
param$RelaxationTime<-apply(dfl,1,.findDropTime)
loc<-t(apply(dfl,1,.findPlateau))
param$startPlateau<-loc[,1]
param$endPlateau<-loc[,2]
param$stableLowLightTime<-loc[,3]
## Adding back names
param <- cbind(tags,param)
return(param)
}
.extractModels <- function(models,seed,trait,time,origin =FALSE,cores=1){
## Time extraction
if(origin == TRUE){
#models <- .orderModels(models)
modelType <- all(sapply(models, length) == 3)
zone <- seed[,colnames(seed) %in% c("diskID","plot","pedigree","line","stem")]
tag <- zone
} else {
modelType <- all(sapply(models, length) == 3)
zone <- seed[,colnames(seed) %in% c("diskID","Zone")]
tag <- zone
}
if(modelType){
zone <- apply(zone,1,paste,collapse="")
dip <- .quickSelect(trait,zone)
### Taking care of weird over comp times
overcomp <-(time[1]+1) + median(dip)
overcomp2 <-(time[1]+1) + median(dip) +1
overcomp[overcomp >time[2]] <- time[2]
overcomp2[overcomp2 >time[2]] <- time[2]
timeLoc <- data.frame("startHighLight" = rep(1,nrow(seed)),
"endHighLight" = rep(time[1],nrow(seed)),
"startLowLight" = rep(time[1]+1, nrow(seed)),
"OverCompTime" = overcomp,
"OverCompTime" = overcomp2,
"endLowLight" = rep(time[2],nrow(seed)))
} else {
timeLoc <- data.frame("startHighLight" = rep(1,nrow(seed)),
"endHighLight" = rep(time[1],nrow(seed)),
"startLowLight" = rep(time[1]+1, nrow(seed)),
"endLowLight" = rep(time[2],nrow(seed)))
}
## seed split by row
if(origin==FALSE){
timeLoc<- split(timeLoc, seq(nrow(timeLoc)))
tag <- split(tag,seq(nrow(tag)))
tag <- lapply(tag,function(x){
as.character(as.vector(as.matrix(x)))})
idx <- seq_along(timeLoc)
models <- mcmapply(.SelectFitted,models,idx,timeLoc,tag,SIMPLIFY = FALSE ,mc.cores=cores)
models <- lapply(models, unlist)
} else {
timeLoc<- split(timeLoc, seq(nrow(timeLoc)))
tag <- split(tag, seq(nrow(tag)))
tag <- lapply(tag,function(x){
as.character(as.vector(as.matrix(x)))})
idx <- seq_along(timeLoc)
models <- mapply(.SelectFitted,models,idx,timeLoc,tag,MoreArgs = list(origin=TRUE),SIMPLIFY= FALSE)
}
return(models)
}
.SelectFitted <- function(model,idx,time,tags,origin=FALSE){
### Using filtering function as template
## they work in similar ways
modelLocal <- c()
coefs <- c()
timeLoc <- vector("list", length(model))
count <- 1
for(t in seq(1,by=2,length.out =length(model))){
timeLoc[[count]] <- c(time[t], time[t+1])
count <- count +1
}
nas <- is.na(model)
tag <- c()
for(mod in seq_along(model)){
ti <- seq(1,(timeLoc[[mod]][[2]]-timeLoc[[mod]][[1]])+1)
if(any(names(timeLoc[[mod]]) %in% "OverCompTime.1")){
if(timeLoc[[mod]]$OverCompTime.1 ==timeLoc[[mod]]$endLowLight) next()
}
if(nas[mod]){
modelLocal <- c(modelLocal,rep(NA,length(ti)))
resi <- NA
coefs <-c(coefs,resi,rep(NA,3))
} else {
modelLocal <- c(modelLocal,.extractFittedModel(model[[mod]],ti,names(model)[mod]))
resi <- model[[mod]]$residuals
div<- length(resi)/length(ti)
resi <- resi[seq(idx,length(resi)-(div+idx), by=div)]
resi <- sqrt(sum(resi^2)/(length(resi)-2))
coefs <- c(coefs,coef(model[[mod]]),resi)
}
tag <- c(tag, paste0(rep(names(model)[mod],length(ti)),mod))
}
if(length(tag)< length(modelLocal)){
tag <- c(tag, rep(NA,length(modelLocal)-length(tag)))
} else if(length(tag)> length(modelLocal)){
modelLocal <- c(modelLocal, rep(NA,length(tag)-length(modelLocal)))
}
names(modelLocal) <- tag
names(coefs)[names(coefs) %in% ""] <- "RSE"
mods <- c(as.character(tags),coefs,"fitted_data",modelLocal)
return(mods)
}
|
77a73d8df01bbcc8f1c8142c0ebf37116c7ab264 | c6afcdb5dbee5bc96a5eac133632d1bef908ac98 | /downloadUSDM.R | 9833bea897844271686a3c9a3ba9c6d6ff407b15 | [] | no_license | mcrimmins/range-clim-resilience | 3ee2866ef470fa452f33a231ecc809ec911cf486 | cdb9b04396ad80cb24d51d9c433d90360b139f67 | refs/heads/master | 2020-03-22T16:48:41.826571 | 2018-07-09T23:54:26 | 2018-07-09T23:54:26 | 140,353,065 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,971 | r | downloadUSDM.R | # download all years of USDM shapefiles
# MAC 6/19/18
#Set years
yr1<-2000
yr2<-2018
# set directories - only needed for CyVerse instances
dir.create("./tmpFiles")
for(i in yr1:yr2){
paste0(i)
# temporarily download year files and then delete
# download test files; changed to "wget" from "curl"
print("Downloading Yearly USDM Files")
download.file(paste0("http://droughtmonitor.unl.edu/data/shapefiles_m//",i,"_USDM_M.zip"), destfile = "./tmpFiles/USDM.zip", method="curl")
print("Done downloading, extracting files")
unzip("./tmpFiles/USDM.zip", exdir = "/scratch/crimmins/USDM/files")
print("Done downloading, extracting files")
do.call(file.remove, list(list.files("./tmpFiles", full.names = TRUE)))
}
# unzip and process files
library(raster)
library(sp)
library(rgdal)
library(rgeos)
library(maptools)
#library(cleangeo)
prismGrid<-raster("prismGrid.grd")
prismGrid<-aggregate(prismGrid, fact=4, fun=mean)
fileNames<-as.data.frame(list.files("/scratch/crimmins/USDM/files"))
colnames(fileNames)<-"files"
fileNames$date<-as.Date(substr(fileNames$files, 6,13), "%Y%m%d")
fileNames$code<-substr(fileNames$files, 6,13)
for(i in 1:nrow(fileNames)){
unzip(paste0("/scratch/crimmins/USDM/files/",fileNames$files[i]),
exdir = "./tmpFiles")
tempUSDM <- readOGR(dsn = "./tmpFiles", layer = paste0("USDM_",fileNames$code[i]) )
#tempUSDM <- readShapePoly(paste0("./tmpFiles/USDM_",fileNames$code[i],".shp")) # library maptools
#tempUSDM <- clgeo_Clean(tempUSDM) # library cleangeo
# rasterize
tempGrid <- rasterize(tempUSDM, prismGrid, 'DM', fun='last')
if (i==1){
tempGrid2 <- tempGrid
}else{
tempGrid2 <- stack(tempGrid2, tempGrid) # brick or stack?
}
print(i)
do.call(file.remove, list(list.files("./tmpFiles", full.names = TRUE)))
}
# save datafiles
save(fileNames, tempGrid2, file = "USDMRaster_2001_2018_16km.RData")
names(tempGrid2)<-fileNames$date
# write out data file
#writeRaster(tempGrid2,filename="USDM2001_2018.grd")
# Analyze/Map Data (problem with rasterized maps 2000-2004)
load("USDMRaster_2001_2018_16km.RData")
library(rasterVis)
library(RColorBrewer)
# get state boundaries
states <- getData('GADM', country='United States', level=1)
names(tempGrid2)<-fileNames$date
cols <- brewer.pal(5, "YlOrRd")
week<-950
classUSDM<-as.factor(tempGrid2[[week]])
rat <- levels(classUSDM)[[1]]
# USDM categories
cats<-c("Dry","Moderate","Severe","Extreme","Exceptional")
rat[["cluster"]]<-cats[1:nrow(rat)]
levels(classUSDM) <- rat
# plot classified map
levelplot(classUSDM, col.regions=cols, par.settings=list(panel.background=list(col="white")),
margin=FALSE, main=paste0("US Drought Monitor - ",fileNames$date[week]))+
layer(sp.polygons(states))
# develop USDM like time series of D areas like http://droughtmonitor.unl.edu/Data/Timeseries.aspx
# Add seasons to the time series plots in quarters
# load regions
regions <- readShapePoly("./western_states/western_states")
|
243dfaee358a11ea5b98e71c00dd5bb6bf99c057 | 29d34e3302b71d41d77af715727e963aea119392 | /man/s.GLM.Rd | 594657c1c95e8603aea36d0c07388887ba5c5470 | [] | no_license | bakaibaiazbekov/rtemis | 1f5721990d31ec5000b38354cb7768bd625e185f | a0c47e5f7fed297af5ad20ae821274b328696e5e | refs/heads/master | 2020-05-14T20:21:40.137680 | 2019-04-17T15:42:33 | 2019-04-17T15:42:33 | 181,943,092 | 1 | 0 | null | 2019-04-17T18:00:09 | 2019-04-17T18:00:09 | null | UTF-8 | R | false | true | 6,785 | rd | s.GLM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s.GLM.R
\name{s.GLM}
\alias{s.GLM}
\title{Generalized Linear Model [C, R]}
\usage{
s.GLM(x, y = NULL, x.test = NULL, y.test = NULL, x.name = NULL,
y.name = NULL, family = NULL, interactions = FALSE,
nway.interactions = 0, covariate = NULL, class.method = NULL,
weights = NULL, ipw = TRUE, ipw.type = 2, upsample = FALSE,
upsample.seed = NULL, intercept = TRUE, polynomial = FALSE,
poly.d = 3, poly.raw = FALSE, print.plot = TRUE,
plot.fitted = NULL, plot.predicted = NULL,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
na.action = na.exclude, removeMissingLevels = TRUE,
question = NULL, rtclass = NULL, verbose = TRUE, trace = 0,
outdir = NULL, save.mod = ifelse(!is.null(outdir), TRUE, FALSE), ...)
}
\arguments{
\item{x}{Numeric vector or matrix / data frame of features i.e. independent variables}
\item{y}{Numeric vector of outcome, i.e. dependent variable}
\item{x.test}{(Optional) Numeric vector or matrix / data frame of testing set features
Columns must correspond to columns in \code{x}}
\item{y.test}{(Optional) Numeric vector of testing set outcome}
\item{x.name}{Character: Name for feature set}
\item{y.name}{Character: Name for outcome}
\item{family}{Error distribution and link function. See \code{stats::family}}
\item{interactions}{Logical: If TRUE, include all pairwise interactions. \code{formula = y ~.*.}}
\item{nway.interactions}{Integer: Include n-way interactions. This integer defined the n: \code{formula = y ~^n}}
\item{covariate}{String (optional): Name of column to be included as interaction term in formula, must be factor}
\item{class.method}{String (Optional): Define "logistic" or "multinom" for classification. The only purpose
of this is so you can try \code{nnet::multinom} instead of glm for binary classification}
\item{weights}{Numeric vector: Weights for cases. For classification, \code{weights} takes precedence
over \code{ipw}, therefore set \code{weights = NULL} if using \code{ipw}.
Note: If \code{weight} are provided, \code{ipw} is not used. Leave NULL if setting \code{ipw = TRUE}. Default = NULL}
\item{ipw}{Logical: If TRUE, apply inverse probability weighting (for Classification only).
Note: If \code{weights} are provided, \code{ipw} is not used. Default = TRUE}
\item{ipw.type}{Integer {0, 1, 2}
1: class.weights as in 0, divided by max(class.weights)
2: class.weights as in 0, divided by min(class.weights)
Default = 2}
\item{upsample}{Logical: If TRUE, upsample cases to balance outcome classes (for Classification only)
Caution: upsample will randomly sample with replacement if the length of the majority class is more than double
the length of the class you are upsampling, thereby introducing randomness}
\item{upsample.seed}{Integer: If provided, will be used to set the seed during upsampling.
Default = NULL (random seed)}
\item{intercept}{Logical: If TRUE, fit an intercept term. Default = TRUE}
\item{polynomial}{Logical: if TRUE, run lm on \code{poly(x, poly.d)} (creates orthogonal polynomials)}
\item{poly.d}{Integer: degree of polynomial. Default = 3}
\item{poly.raw}{Logical: if TRUE, use raw polynomials.
Default, which should not really be changed is FALSE}
\item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3}
Takes precedence over \code{plot.fitted} and \code{plot.predicted}}
\item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted}
\item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted.
Requires \code{x.test} and \code{y.test}}
\item{plot.theme}{String: "zero", "dark", "box", "darkbox"}
\item{na.action}{How to handle missing values. See \code{?na.fail}}
\item{removeMissingLevels}{Logical: If TRUE, finds factors in \code{x.test} that contain levels
not present in \code{x} and substitutes with NA. This would result in error otherwise and no
predictions would be made, ending \code{s.GLM} prematurely}
\item{question}{String: the question you are attempting to answer with this model, in plain language.}
\item{rtclass}{String: Class type to use. "S3", "S4", "RC", "R6"}
\item{verbose}{Logical: If TRUE, print summary to screen.}
\item{trace}{Integer: If higher than 0, will print more information to the console. Default = 0}
\item{outdir}{Path to output directory.
If defined, will save Predicted vs. True plot, if available,
as well as full model output, if \code{save.mod} is TRUE}
\item{save.mod}{Logical. If TRUE, save all output as RDS file in \code{outdir}
\code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir}
is defined, outdir defaults to \code{paste0("./s.", mod.name)}}
\item{...}{Additional arguments}
}
\value{
\link{rtMod}
}
\description{
Train a Generalized Linear Model for Regression or Logistic Regression (Classification) using \code{glm}
If outcome \code{y} has more than two classes, Multinomial Logistic Regression is performed using
\code{nnet::multinom}
}
\details{
A common problem with \code{glm} arises when the testing set containts a predictor with more
levels than those in the same predictor in the training set, resulting in error. This can happen
when training on resamples of a data set, especially after stratifying against a different
outcome, and results in error and no prediction. \code{s.GLM} automatically finds such cases
and substitutes levels present in \code{x.test} and not in \code{x} with NA.
}
\examples{
x <- rnorm(100)
y <- .6 * x + 12 + rnorm(100)/2
mod <- s.GLM(x, y)
}
\seealso{
\link{elevate} for external cross-validation
Other Supervised Learning: \code{\link{s.ADABOOST}},
\code{\link{s.ADDTREE}}, \code{\link{s.BART}},
\code{\link{s.BAYESGLM}}, \code{\link{s.BRUTO}},
\code{\link{s.C50}}, \code{\link{s.CART}},
\code{\link{s.CTREE}}, \code{\link{s.DA}},
\code{\link{s.ET}}, \code{\link{s.EVTREE}},
\code{\link{s.GAM.default}}, \code{\link{s.GAM.formula}},
\code{\link{s.GAMSEL}}, \code{\link{s.GAM}},
\code{\link{s.GBM3}}, \code{\link{s.GBM}},
\code{\link{s.GLMNET}}, \code{\link{s.GLS}},
\code{\link{s.H2ODL}}, \code{\link{s.H2OGBM}},
\code{\link{s.H2ORF}}, \code{\link{s.IRF}},
\code{\link{s.KNN}}, \code{\link{s.LDA}},
\code{\link{s.LM}}, \code{\link{s.MARS}},
\code{\link{s.MLRF}}, \code{\link{s.MXN}},
\code{\link{s.NBAYES}}, \code{\link{s.NLA}},
\code{\link{s.NLS}}, \code{\link{s.NW}},
\code{\link{s.POLYMARS}}, \code{\link{s.PPR}},
\code{\link{s.PPTREE}}, \code{\link{s.QDA}},
\code{\link{s.QRNN}}, \code{\link{s.RANGER}},
\code{\link{s.RFSRC}}, \code{\link{s.RF}},
\code{\link{s.SGD}}, \code{\link{s.SPLS}},
\code{\link{s.SVM}}, \code{\link{s.TFN}},
\code{\link{s.XGBLIN}}, \code{\link{s.XGB}}
}
\author{
Efstathios D. Gennatas
}
\concept{Supervised Learning}
|
a961333cb2c70830412549b8116e897393e8f7a8 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /CENFA/man/parCov.Rd | f875ca3150039ddf443bdd8eb4588d4805ff09d6 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 3,033 | rd | parCov.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parCov.R
\name{parCov}
\alias{parCov}
\alias{parCov,Raster,missing-method}
\alias{parCov,Raster,Raster-method}
\title{Efficient calculation of covariance matrices for Raster* objects}
\usage{
parCov(x, y, ...)
\S4method{parCov}{Raster,missing}(
x,
w = NULL,
sample = TRUE,
progress = FALSE,
parallel = FALSE,
n = 1,
cl = NULL,
keep.open = FALSE
)
\S4method{parCov}{Raster,Raster}(
x,
y,
w = NULL,
sample = TRUE,
progress = FALSE,
parallel = FALSE,
n = 1,
cl = NULL,
keep.open = FALSE
)
}
\arguments{
\item{x}{Raster* object, typically a brick or stack}
\item{y}{NULL (default) or a Raster* object with the same extent and resolution
as \code{x}}
\item{...}{additional arguments, including any of the following:}
\item{w}{optional Raster* object of weights for a weighted covariance matrix}
\item{sample}{logical. If \code{TRUE}, the sample covariance is calculated
with a denominator of $n-1$}
\item{progress}{logical. If \code{TRUE}, messages and progress bar will be
printed}
\item{parallel}{logical. If \code{TRUE} then multiple cores are utilized}
\item{n}{numeric. Number of CPU cores to utilize for parallel processing}
\item{cl}{optional cluster object}
\item{keep.open}{logical. If \code{TRUE} and \code{parallel = TRUE}, the
cluster object will not be closed after the function has finished}
}
\value{
Returns a matrix with the same row and column names as the layers of
\code{x}. If \code{y} is supplied, then the covariances between the layers
of \code{x} and the layers of code{y} are computed.
}
\description{
\code{parCov} efficiently calculates the covariance of Raster* objects,
taking advantage of parallel processing and pulling data into memory only as
necessary. For large datasets with lots of variables, calculating the covariance
matrix rapidly becomes unwieldy, as the number of calculations required grows
quadratically with the number of variables.
}
\details{
This function is designed to work similarly to the
\code{\link[stats]{cov}} and the \code{\link[raster]{layerStats}}
functions, with two major differences. First, \code{parCov} allows you to
calculate the covariance between two different Raster* objects, whereas
\code{layerStats} does not. Second, \code{parCov} can (optionally) compute
each element of the covariance matrix in parallel, offering a dramatic
improvement in computation time for large Raster* objects.
The raster layer of weights \code{w} should contain raw weights as values,
and should \emph{not} be normalized so that \code{sum(w) = 1}. This is
necessary for computing the sample covariance, whose formula contains
\code{sum(w) - 1} in its denominator.
}
\examples{
mat1 <- parCov(climdat.hist)
# correlation matrix
Z <- parScale(climdat.hist)
mat2 <- parCov(Z)
# covariance between two Raster* objects
mat3 <- parCov(x = climdat.hist, y = climdat.fut)
}
\seealso{
\code{\link[stats]{cov}}, \code{\link[raster]{layerStats}}
}
|
c0f7cc168a218964bc6ca51c96ad31fca626f7d3 | d078cbbe8dcc16be58f840f1d5459a1318d73c38 | /Exercise/Exercise.R | 5e6c43d5b30b9e39a443d089838c4b0039f92d94 | [] | no_license | naponjatusripitak/polisci490 | c8504c654b64088b3a5a5321f87d8206d977093f | dd2db773e069593d1339625256538482e61a4482 | refs/heads/master | 2021-05-05T09:42:10.630595 | 2018-04-08T22:13:05 | 2018-04-08T22:13:05 | 117,893,780 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,527 | r | Exercise.R | # Set working directory
setwd("~/polisci490/Exercise")
# Load packages
packages <- c("xml2","rvest", "dplyr", "tm", "tidytext", "ggplot2", "SnowballC", "stats", "MASS", "nnet")
load.packages <- function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
}
lapply(packages, load.packages)
USArrests <- USArrests
log.iris <- log(iris[,1:4])
iris.species <- iris[,5]
USArrest.PCA <- prcomp(USArrests, center=TRUE, scale = TRUE)
print(USArrest.PCA)
plot(USArrest.PCA, type="l")
library(devtools)
install_github("ggbiplot", "vqv")
library(ggbiplot)
states <- rownames(USArrest.PCA)
p <- ggbiplot(USArrest.PCA, obs.scale=1, var.scale=1, groups=states, ellipse=TRUE, circle=TRUE, labels=rownames(USArrests))
p <- p + scale_color_discrete(name='') + theme_bw()
#####
Boston <- Boston
sample <- .5*nrow(Boston)
set.seed(789)
data.split <- sample(seq_len(nrow(Boston)), size=sample)
training <- Boston[data.split,]
test <- Boston[-data.split,]
reg1 <- lm(medv ~., data=training)
predict.reg1 <- predict(reg1, newdata=test[, -14])
mean((test$medv - predict.reg1)^2)
##
y <- nnet::class.ind(Boston$medv)
x <- Boston
x$medv <- NULL
colmins <- apply(Boston, 2, min)
colmaxs <- apply(Boston, 2, max)
BostonScale <- as.data.frame(cbind(y, scale(x, center = colmins,scale = colmaxs - colmins)))
wineScale[1:3, 1:6]
wineScale <- as.data.frame(cbind(y, scale(x, center = colmins,scale = colmaxs - colmins)))wineScale[1:3, 1:6]
|
91fb008a89630d5bbd555533a646a4b421eb5d5d | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/8962_0/rinput.R | 952d972399124f4f8ba639dc0c89091e7689c5a5 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("8962_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8962_0_unrooted.txt") |
deff1542bcbd3b5151137a25e7682d2edba46336 | c19f860870dfdd0c8cb4198c3ef62a14ece71c76 | /man/GetSubject.Rd | 30e74bc6db17c63107a07fc2ef5a8610da9003d4 | [
"MIT"
] | permissive | HBGDki/FAIRsimulator | 84933d896d6d93c4c1dad0e348a6db2ccf449618 | 6d303b8c6381a379d5d4475ca8a56a1255940309 | refs/heads/master | 2021-01-20T01:10:38.058287 | 2017-05-21T16:58:37 | 2017-05-21T16:58:37 | 89,228,918 | 0 | 1 | null | 2017-05-02T18:14:49 | 2017-04-24T10:44:09 | HTML | UTF-8 | R | false | true | 577 | rd | GetSubject.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FAIRStudy.R
\name{GetSubject}
\alias{GetSubject}
\title{GetSubject}
\usage{
GetSubject(ID, TRTIndex, AgeAtRand, Cohort, StudyObj)
}
\arguments{
\item{ID}{The subject identification number}
\item{TRTIndex}{The treatment index}
\item{AgeAtRand}{The age at randomization}
\item{Cohort}{A FAIRsimulator \code{cohort} object}
\item{StudyObj}{A FAIRsimulator \code{study} object}
}
\value{
A FAIRsimulator \code{individual} object.
}
\description{
Creates a FAIRsimulator \code{individual} object.
}
|
d3b8dfb59e6ab0c93398f42f2ff7ebb9f0de4a16 | a0c20713538fe09c254acb215c8b140b4f8780be | /Accessing the rows and columns and visualization.R | f360551d333c2c8ff85bd90f46f061c3e3104620 | [] | no_license | Gaurav715/Rprogramming | ef115880e7f11ba9e7338c4d56f5c45a31156e6e | 1d94bd8e39122ecb42ad08f358080fd5baaa466e | refs/heads/master | 2021-04-12T00:42:26.363123 | 2020-03-28T21:21:48 | 2020-03-28T21:21:48 | 249,070,365 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 411 | r | Accessing the rows and columns and visualization.R | x<-c("a","b","c","d","e")
x
Games
# calling particular columns and rows
Games[1:3,6:10]
Games[c(1,10),]
Games[,c("2008","2009")]
Games[1,5,drop=F]
Games[1,,drop=F]
myplot<-function(data,rows){
d<-data[rows,,drop=FALSE]
matplot(t(d),type = "b",pch=15:18,col=c(1:4,6))
legend("bottomleft",inset = 0.001,legend = Players[rows] ,col=c(1:4,6),pch=15:18,horiz = F)
}
myplot(Salary,1:5)
|
87715c4a0ae755d360423b08fc82454197621658 | 8e339bcc1e11a65db22ca0aac7f559c7ceeeb047 | /R/load_latest_forecasts_zoltar.R | a5ebbfacb17129e6c35b50f80762a1a8f055cafc | [] | no_license | eycramer/covidHubUtils | 89d423a954bde981b6348f78f1982e2d641879ec | 61c4819cdf702e431581a865fb39d07f15c39afd | refs/heads/master | 2023-02-03T01:41:28.118397 | 2020-12-21T17:27:08 | 2020-12-21T17:27:08 | 323,137,843 | 0 | 0 | null | 2020-12-21T17:57:44 | 2020-12-20T18:24:44 | null | UTF-8 | R | false | false | 5,193 | r | load_latest_forecasts_zoltar.R | #' Load the most recent forecasts submitted in a time window from zoltar.
#'
#' @param models Character vector of model abbreviations.
#' If missing, forecasts for all models that submitted forecasts
#' meeting the other criteria are returned.
#' @param forecast_dates date vector to load the most recent forecast from
#' @param locations list of valid fips code. Defaults to all locations with
#' available forecasts.
#' @param types character vector specifying type of forecasts to load: “quantile”
#' or “point”. Defaults to c(“quantile”, “point”)
#' @param targets character vector of targets to retrieve, for example
#' c('1 wk ahead cum death', '2 wk ahead cum death'). Defaults to all targets.
#'
#' @return data frame with columns model, forecast_date, location, horizon,
#' temporal_resolution, target_variable, target_end_date, type, quantile, value,
#' location_name, population, geo_type, geo_value, abbreviation
#'
load_latest_forecasts_zoltar <- function(models, forecast_dates, locations,
types, targets){
# validate models
all_valid_models <- get_all_models(source = "zoltar")
if (!missing(models)){
models <- match.arg(models, choices = all_valid_models, several.ok = TRUE)
} else {
models <- all_valid_models
}
# validate locations
all_valid_fips <- covidHubUtils::hub_locations$fips
if (!missing(locations)){
locations <- match.arg(locations, choices = all_valid_fips, several.ok = TRUE)
} else{
locations <- all_valid_fips
}
# validate types
if (!missing(types)){
types <- match.arg(types, choices = c("point", "quantile"), several.ok = TRUE)
} else {
types <- c("point", "quantile")
}
# set up Zoltar connection
zoltar_connection <- zoltr::new_connection()
if(Sys.getenv("Z_USERNAME") == "" | Sys.getenv("Z_PASSWORD") == "") {
zoltr::zoltar_authenticate(zoltar_connection, "zoltar_demo","Dq65&aP0nIlG")
} else {
zoltr::zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"),Sys.getenv("Z_PASSWORD"))
}
# construct Zoltar project url
the_projects <- zoltr::projects(zoltar_connection)
project_url <- the_projects[the_projects$name == "COVID-19 Forecasts", "url"]
# validate targets
all_valid_targets <- zoltr::targets(zoltar_connection, project_url)$name
if (!missing(targets)){
targets <- match.arg(targets, choices = all_valid_targets, several.ok = TRUE)
} else {
targets <- all_valid_targets
}
message("Large queries that span many combinations of forecast dates, models, locations,
and targets can take a long time to process. To reduce run-time of queries,
we encourage users to download a local copy of the COVID-19 Forecast Hub repository
so queries can be run locally: https://github.com/reichlab/covid19-forecast-hub/")
# get all valid timezeros in project
all_valid_timezeros <- zoltr::timezeros(zoltar_connection = zoltar_connection,
project_url = project_url)$timezero_date
# take intersection of forecast_dates and all_valid_timezeros
valid_forecast_dates <- intersect(as.character(forecast_dates),
as.character(all_valid_timezeros))
forecast <- zoltr::do_zoltar_query(zoltar_connection = zoltar_connection,
project_url = project_url,
query_type = "forecasts",
units = locations,
timezeros = valid_forecast_dates,
models = models,
targets = targets,
types = types,
verbose = FALSE)
if (nrow(forecast) ==0){
warning("Warning in do_zotar_query: Forecasts are not available in the given time window.\n Please check your parameters.")
} else {
forecast <- forecast %>%
# only include the most recent forecast submitted in the time window
dplyr::group_by(model) %>%
dplyr::filter(timezero == max(timezero)) %>%
dplyr::ungroup() %>%
# change value and quantile back to double
dplyr::mutate(value = as.double(value),
timezero = as.Date(timezero)) %>%
# keep only required columns
dplyr::select(model, timezero, unit, target, class, quantile, value) %>%
dplyr::rename(location = unit, forecast_date = timezero,
type = class) %>%
# create horizon and target_end_date columns
tidyr::separate(target, into=c("horizon","temporal_resolution","ahead","target_variable"),
remove = FALSE, extra = "merge") %>%
dplyr::mutate(target_end_date = as.Date(
calc_target_end_date(forecast_date, as.numeric(horizon), temporal_resolution)
)) %>%
dplyr::select(model, forecast_date, location, horizon, temporal_resolution,
target_variable, target_end_date, type, quantile, value) %>%
dplyr::left_join(covidHubUtils::hub_locations, by=c("location" = "fips"))
}
return(forecast)
}
|
9002f94ebd631ea811cdcf98ac767eff4db71f48 | d2eda24acceb35dc11263d2fa47421c812c8f9f6 | /man/plot.TS.Rd | 9c000ed0c72fed949bd581da1da83dad49cc024f | [] | no_license | tbrycekelly/TheSource | 3ddfb6d5df7eef119a6333a6a02dcddad6fb51f0 | 461d97f6a259b18a29b62d9f7bce99eed5c175b5 | refs/heads/master | 2023-08-24T05:05:11.773442 | 2023-08-12T20:23:51 | 2023-08-12T20:23:51 | 209,631,718 | 5 | 1 | null | null | null | null | UTF-8 | R | false | true | 578 | rd | plot.TS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source.physics.r
\name{plot.TS}
\alias{plot.TS}
\title{Make TS Plot}
\usage{
plot.TS(
S,
Tmp,
xlim = c(25, 36),
ylim = c(-5, 15),
levels = seq(0, 40, by = 2),
cex.lab = 1,
drawlabels = TRUE,
labels = NULL,
freezing.line = T,
freezing.col = "#00000030",
col.contour = "grey",
lwd = 1,
lty = 1,
xlab = "Practical Salinity",
ylab = "Potential Temperature",
pch = 1,
col = "black",
cex = 1,
main = NULL
)
}
\description{
Make TS Plot
}
\author{
Thomas Bryce Kelly
}
|
9b46dd06af32d8adf3b9d93b71546c070e0b5a3a | a0be475be1571c5d52932e851049e6b74dc71480 | /munge/scrape_quasar.R | 694ccecd5f5da7901fe53d4e18b3aad4f37289a9 | [] | no_license | memebrain/cognitive_load | 02effdeb19c7f704cac44a3fd03a789ca0fe5da6 | 135d0375eb186428ccace78cd15c1bcbae84cdb5 | refs/heads/master | 2016-09-08T02:41:23.474123 | 2013-02-14T00:42:40 | 2013-02-14T00:42:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 967 | r | scrape_quasar.R |
allfiles<-list.files('./data')
interesting<-paste0('./data/',allfiles[3:5])
rawdf<-read.xls(file.choose(), pattern='Classification File',sheet=1, verbose=FALSE, blank.lines.skip=TRUE,header=TRUE,as.is=TRUE)
cleanQuasar<-rawdf[2:7,c(1,6,7,10,11)]
names(cleanQuasar)<-c('file','AverageLinearEngagement', 'St.Dev.LinearEngagement', 'AverageLinearWorkload', 'St.Dev.LinearWorkload')
lines<-sapply(interesting,function(x) readLines(x,3))
datafiles<-dir('./data')
filename<-paste0('./data/',datafiles[3])
sheetrange=c(1:13)
df<-data.frame(workbook=rep(filename,length(sheetrange)),sheet=sheetrange)
workbookloader <- function (filename,sheetrange) {
readWorksheetFromFile(file=filename,sheet=sheetrange, header=TRUE,startRow=2, endCol= 11)
}
for(i in list){
varname=paste0('./data/','quasar','_coladd_',i)
datafiles<-dir('./data')
filename<-paste0('./data/',datafiles[3])
sheetout<-workbookloader(filename,sheetrange=i)
save(sheetout,file=varname)
}
|
e9018e6636f17076f67197a958fbd77132817309 | 268165a7f130ea91e80f2fb30e51703bd05ee2a8 | /man/sgmodel.Rd | 89a9f59a2352f31d3ac6162e7ccd99e4e96565f7 | [] | no_license | cran/sgmodel | f4ca5e00ef8d22605c697ee9acd647af9660f4de | 5d6882de69c469fea0aa2443925d73dd6f4055f7 | refs/heads/master | 2020-03-27T03:57:14.032918 | 2020-02-27T11:20:02 | 2020-02-27T11:20:02 | 145,901,110 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,749 | rd | sgmodel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stochastic_growth_model.R
\name{sgmodel}
\alias{sgmodel}
\title{Sgmodel}
\usage{
sgmodel(grid, utiltype, utilparam, A, depre, discount, prod, states, m, rho,
sigma, ...)
}
\arguments{
\item{grid}{A numerical value, the number of capital grid points to consider for k (t). Default value set to 1000.}
\item{utiltype}{The type of preference for the \code{util} function. Can be "log", "CRRA", "CARA", "Cobb-Douglas", "CES". See description of \code{util} for details. Default type set to "log".}
\item{utilparam}{Numerical value, preference parameter for the \code{util} function. See description of \code{util} for details. Default set to 1.}
\item{A}{Numerical value, preference parameter for the \code{util} function. See description of \code{util} for details. Default set to 1.}
\item{depre}{Numerical value for the depreciation parameter. Must be between 0 and 1. Default value set to 1.}
\item{discount}{Numerical value for the discount factor. Must be (strictly) between 0 and 1. Default value set to 0.95.}
\item{prod}{Numerical value for the Cobb-Douglas production function. Must be (strictly) between 0 and 1. Default value set to 0.3.}
\item{states}{umerical value for the number of states of the Markov process approximating the TFP process. Default value set to 2.}
\item{m}{Numerical value for the \code{Rtauchen} function. See description of \code{Rtauchen} for details. Default value set to 3.}
\item{rho}{Autocorrelation of the TFP AR(1) process, used to approximate the process with a Markov process.}
\item{sigma}{Standard deviation of the white noise in the TFP process, used to approximate the process with a Markov process.}
\item{...}{Additional arguments.}
}
\value{
The function returns a list containing:
\item{Capital grid }{Vector of values for capital.}
\item{Savings }{ Vector of size (\code{grid} x \code{States}) indicating which coordinates of the capital grid are the optimal savings decision.}
\item{Consumption }{Vector of size (\code{grid} x \code{States}) indicating the optimal consumption decisions using the optimal savings decision, and given the capital level of the corresponding coordinate of \code{Capital grid}.}
\item{Z }{States of the TFP process.}
\item{PTM }{The probability transition matrix of the process.}
\item{Production parameter }{The exponent on capital in the Cobb-Douglas production function.}
\item{Utility type }{The type of utility function. See the details of "util" for the available types}
\item{Discount factor }{The discount factor used in the model.}
\item{Depreciation }{The depreciation rate of capital used in the model.}
\item{Rho }{Autocorrelation of the TFP AR(1) process.}
\item{Sigma }{Standard deviation of the white noise in the TFP process.}
}
\description{
The function \code{sgmodel} computes the solutions to a generic stochastic growth model after discretizing the distribution of the stochastic element.
}
\examples{
model <- sgmodel(grid= 100, rho = 0.2, sigma = 0.02)
grid <- 200
utiltype <- "CRRA"
utilparam <- 4
A <- 1
depre <- 0.03
discount <- 0.95
prod <- 0.3
states <- 5
m <- 10
rho <- 0.2
sigma <- 0.02
model <- sgmodel(grid, utiltype, utilparam, A, depre, discount, prod, states, m, rho, sigma)
}
\references{
Tauchen G (1986), Finite state markov-chain approximations to univariate and vector autoregressions.
\emph{Economics letters}, \bold{20}(2), 177--181.
Merton R. C (1971), Optimum consumption and portfolio rules in a continuous-time model.
\emph{Journal of Economic Theory}, \bold{3}(4), 373--413.
URL \url{http://www.sciencedirect.com/science/article/pii/002205317190038X}
}
|
dc462f36c4dbfe6dfeaf5e4f70abbbb4d475f73b | a05a197d4d6e550256009734701abce24aa9e6df | /man/spec.glkDistance.Rd | aa0631f906899a109e3ffaca6eabdd294a689a7d | [] | no_license | Kevin-Jin/TSdist | 4225f343eb7bc870859dea861797cd400474913e | 70b1dbd1beb63fbfe66d625c2de4c45c828b57a6 | refs/heads/master | 2021-01-22T01:05:44.434624 | 2016-05-11T15:40:25 | 2016-05-11T15:40:25 | 38,079,705 | 0 | 0 | null | 2015-06-25T23:26:07 | 2015-06-25T23:26:07 | null | UTF-8 | R | false | false | 2,105 | rd | spec.glkDistance.Rd | \name{spec.glkDistance}
\alias{spec.glkDistance}
\title{
Dissimilarity based on the Generalized Likelihood Ratio Test
}
\description{
The dissimilarity of two numerical series of the same length is calculated based on an adaptation of the generalized likelihood ratio test.
}
\usage{
spec.glkDistance(x, y, ...)
}
\arguments{
\item{x}{
Numeric vector containing the first time series.
}
\item{y}{
Numeric vector containing the second time series.
}
\item{...}{
Additional parameters for the function. See \code{\link[TSclust]{diss.PER}} for more
information.
}
}
\details{
This function simply intends to be a wrapper for the \code{\link[TSclust]{diss.SPEC.GLK}} function of package \pkg{TSclust}. However, in the 1.2.3 version of the \pkg{TSclust} package we have found an error in the call to this function. As such, in this version, the more general \code{\link[TSclust]{diss}} function, designed for distance matrix calculations of time series databases, is used to calculate the spec.glk distance between two series. Once this bug is fixed in the original package, we will update our call procedure.
}
\value{
\item{d}{
The computed distance between the pair of series.
}
}
\references{
Pablo Montero, José A. Vilar (2014). TSclust: An R Package for Time Series
Clustering. Journal of Statistical Software, 62(1), 1-43. URL
http://www.jstatsoft.org/v62/i01/.
}
\author{
Usue Mori, Alexander Mendiburu, Jose A. Lozano.
}
\seealso{
To calculate this distance measure using \code{ts}, \code{zoo} or \code{xts} objects see \code{\link{tsDistances}}. To calculate distance matrices of time series databases using this measure see \code{\link{tsDatabaseDistances}}.
}
\examples{
#The objects example.series1 and example.series2 are two
#numeric series of length 100.
data(example.series1)
data(example.series2)
#For information on their generation and shape see
#help page of example.series.
help(example.series)
#Calculate the ar.mah distance between the two series using
#the default parameters.
spec.glkDistance(example.series1, example.series2)
}
|
3b3d9f412f1271be74f0c8049d76ef6b8588d55b | dfa0394edb42e95fa575c18bc1d28c29b2d5130f | /R Code/Preprocess.R | 89741c60d18a4ca5aa75045e59cbd074f76ce2d6 | [] | no_license | fairypp/ATT_Fall_Case_Competition_Code | d8c79ed6d11871999c71b5fcbe0ebf8d02ca594a | fcd2269e66a3fd884d0076b4e49e37d034ca5db6 | refs/heads/master | 2020-05-23T11:19:49.749668 | 2017-02-20T18:19:46 | 2017-02-20T18:19:46 | 80,385,186 | 0 | 1 | null | 2017-01-31T21:28:08 | 2017-01-30T01:18:04 | R | UTF-8 | R | false | false | 4,806 | r | Preprocess.R | #normalize all review ratings from different platforms to unified scale (1~5)
#for google reviews, review rating is 0~3, we scale rating as follows:
# 0-->1; 1-->3; 2-->4; 3-->5
#use normalized review ratings to determine sentiment: 1~2: negative (-1),
#3: neutral (0), 4~5 positive (1)
#those groundtruth sentiments will be used as the training data to predict the sentiment
#of tweets
att_dallas_review <- read.csv("ATT_dallas_reviews_lvl2.csv")
pos <- which(att_dallas_review$rating>=4)
att_dallas_review[pos,'sentiment'] <- 1
neg <- which(att_dallas_review$rating<=2)
att_dallas_review[neg,'sentiment'] <- -1
neur <- which(att_dallas_review$rating==3)
att_dallas_review[neur,'sentiment'] <- 0
write.csv(att_dallas_review, "ATT_dallas_reviews_lvl2.csv")
spr_dallas_review <- read.csv("Sprint_dallas_reviews_lvl2.csv")
pos <- which(spr_dallas_review$rating>=4)
spr_dallas_review[pos,'sentiment'] <- 1
neg <- which(spr_dallas_review$rating<=2)
spr_dallas_review[neg,'sentiment'] <- -1
neur <- which(spr_dallas_review$rating==3)
spr_dallas_review[neur,'sentiment'] <- 0
write.csv(spr_dallas_review, "Sprint_dallas_reviews_lvl2.csv")
att_dallas_review <- att_dallas_review[0,]
att_dallas_review <- read.csv("Tmobile_dallas_reviews_lvl2.csv")
pos <- which(att_dallas_review$rating>=4)
att_dallas_review[pos,'sentiment'] <- 1
neg <- which(att_dallas_review$rating<=2)
att_dallas_review[neg,'sentiment'] <- -1
neur <- which(att_dallas_review$rating==3)
att_dallas_review[neur,'sentiment'] <- 0
write.csv(att_dallas_review, "Tmobile_dallas_reviews_lvl2.csv")
att_dallas_review <- att_dallas_review[0,]
att_dallas_review <- read.csv("Verizon_dallas_reviews_lvl2.csv")
pos <- which(att_dallas_review$rating>=4)
att_dallas_review[pos,'sentiment'] <- 1
neg <- which(att_dallas_review$rating<=2)
att_dallas_review[neg,'sentiment'] <- -1
neur <- which(att_dallas_review$rating==3)
att_dallas_review[neur,'sentiment'] <- 0
write.csv(att_dallas_review, "Verizon_dallas_reviews_lvl2.csv")
us_gl_review <- read.csv("ATT_US_reviews.csv")
us_gl_review[which(us_gl_review$rating==3),"rating"] <- 5
us_gl_review[which(us_gl_review$rating==2),"rating"] <- 4
us_gl_review[which(us_gl_review$rating==1),"rating"] <- 3
us_gl_review[which(us_gl_review$rating==0),"rating"] <- 1
us_gl_review$sentiment <- 1
pos <- which(us_gl_review$rating>=4)
us_gl_review[pos,'sentiment'] <- 1
neg <- which(us_gl_review$rating<=2)
us_gl_review[neg,'sentiment'] <- -1
neur <- which(us_gl_review$rating==3)
us_gl_review[neur,'sentiment'] <- 0
del <- which(us_gl_review$reviews=="")
if(length(del)>0){
us_gl_review <- us_gl_review[-del,]
}
write.csv(us_gl_review, "ATT_US_reviews.csv")
us_gl_review <- us_gl_review[0,]
us_gl_review <- read.csv("Sprint_US_reviews.csv")
us_gl_review[which(us_gl_review$rating==3),"rating"] <- 5
us_gl_review[which(us_gl_review$rating==2),"rating"] <- 4
us_gl_review[which(us_gl_review$rating==1),"rating"] <- 3
us_gl_review[which(us_gl_review$rating==0),"rating"] <- 1
us_gl_review$sentiment <- 1
pos <- which(us_gl_review$rating>=4)
us_gl_review[pos,'sentiment'] <- 1
neg <- which(us_gl_review$rating<=2)
us_gl_review[neg,'sentiment'] <- -1
neur <- which(us_gl_review$rating==3)
us_gl_review[neur,'sentiment'] <- 0
del <- which(us_gl_review$reviews=="")
if(length(del)>0){
us_gl_review <- us_gl_review[-del,]
}
write.csv(us_gl_review, "Sprint_US_reviews.csv")
us_gl_review <- us_gl_review[0,]
us_gl_review <- read.csv("Tmobile_US_reviews.csv")
us_gl_review[which(us_gl_review$rating==3),"rating"] <- 5
us_gl_review[which(us_gl_review$rating==2),"rating"] <- 4
us_gl_review[which(us_gl_review$rating==1),"rating"] <- 3
us_gl_review[which(us_gl_review$rating==0),"rating"] <- 1
us_gl_review$sentiment <- 1
pos <- which(us_gl_review$rating>=4)
us_gl_review[pos,'sentiment'] <- 1
neg <- which(us_gl_review$rating<=2)
us_gl_review[neg,'sentiment'] <- -1
neur <- which(us_gl_review$rating==3)
us_gl_review[neur,'sentiment'] <- 0
del <- which(us_gl_review$reviews=="")
if(length(del)>0){
us_gl_review <- us_gl_review[-del,]
}
write.csv(us_gl_review, "Tmobile_US_reviews.csv")
us_gl_review <- us_gl_review[0,]
us_gl_review <- read.csv("Verizon_US_reviews.csv")
us_gl_review[which(us_gl_review$rating==3),"rating"] <- 5
us_gl_review[which(us_gl_review$rating==2),"rating"] <- 4
us_gl_review[which(us_gl_review$rating==1),"rating"] <- 3
us_gl_review[which(us_gl_review$rating==0),"rating"] <- 1
us_gl_review$sentiment <- 1
pos <- which(us_gl_review$rating>=4)
us_gl_review[pos,'sentiment'] <- 1
neg <- which(us_gl_review$rating<=2)
us_gl_review[neg,'sentiment'] <- -1
neur <- which(us_gl_review$rating==3)
us_gl_review[neur,'sentiment'] <- 0
del <- which(us_gl_review$reviews=="")
if(length(del)>0){
us_gl_review <- us_gl_review[-del,]
}
write.csv(us_gl_review, "Verizon_US_reviews.csv")
|
2140ac0f9b57778e2b091d247fb4ddbe0aba6717 | 789bd8e74dc9c3bbb73ac1ff15082e0124cca0d0 | /man/getCriticalFeedingLevel.Rd | 380dff91ae41771e973036db8905c15461d6c0d7 | [] | no_license | sizespectrum/mizer | 58e8f41a85c035728498ae99692de1f719552fc2 | d6dbc6bcb2dbbca6c6ecf072caad7a998b54ed8b | refs/heads/master | 2023-06-08T11:42:57.079370 | 2023-06-02T13:45:35 | 2023-06-02T13:45:35 | 5,898,893 | 30 | 31 | null | 2021-09-03T11:53:31 | 2012-09-21T08:51:19 | R | UTF-8 | R | false | true | 543 | rd | getCriticalFeedingLevel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rate_functions.R
\name{getCriticalFeedingLevel}
\alias{getCriticalFeedingLevel}
\title{Get critical feeding level}
\usage{
getCriticalFeedingLevel(params)
}
\arguments{
\item{params}{A MizerParams object}
}
\value{
A matrix (species x size) with the critical feeding level
}
\description{
The critical feeding level is the feeding level at which the food intake is
just high enough to cover the metabolic costs, with nothing left over for
growth or reproduction.
}
|
fe9c6d4d32e4cf73416e6df61cbc7bd27cde6eea | f543e704058be1388bd8aeaae1f28a0b097d6940 | /Driver_SCNAs/example/gene_deal.r | d24f702181c3bd41939f60da3afcfd67dc47e2c7 | [] | no_license | zhouyao-max/Driver_SCNAs | 080b730e2d59cb95320112a22582d0c206442dd0 | 8a452e94222a0a4601de2918df223c90d0fcdda8 | refs/heads/master | 2023-03-27T18:11:54.368778 | 2021-03-29T10:44:51 | 2021-03-29T10:44:51 | 352,605,296 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 366 | r | gene_deal.r | ######To obtain expression profile
######expression in at least 10% samples
######log2(x+1)
gene_deal <- function(exp_mat){
genes <- row.names(exp_mat)
n1 <- c();
num_1 <- apply(exp_mat,1,function(x){n1 <- c(length(which(x==0)),n1)})
gene_2 <- genes[which(num_1 < 0.9*ncol(exp_mat))]
exp_matrix <- log2(as.matrix(exp_mat[gene_2,])+1)
return(exp_matrix)
} |
175f54b68720d30fbec9845fc77d4d3e90633fc3 | 0f110f7055bff1d59cdad676d66f7b780eb3339f | /StochasticGradientDescEx/SGD.R | 327876bab7a18dc0a4a8e6235293007aee3bbdca | [] | no_license | jayantamajumdar/PredictiveModelingCode | 3ecc9a5fcfc6f6c4c1cc62a4f6edb1ccb47d3aa2 | 34a6c6ceea8b46ed1f3613d25beb7b52c25624e6 | refs/heads/master | 2021-01-10T10:37:19.343446 | 2015-06-04T18:36:34 | 2015-06-04T18:36:34 | 36,837,741 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,234 | r | SGD.R | ## Implementation of Stochastic Gradient Descent, without R library, to estimate the parameters of a Ridge Regression
## We use 100 epochs and learning rates of 0.000025, 0.00055 and 0.0075 to find best model
## dataset is derived from https://archive.ics.uci.edu/ml/datasets/Forest+Fires
## data is already partitioned into forestfire-train.csv and forestfire-test.csv
train = read.csv('forestfire-train.csv')
test = read.csv('forestfire-test.csv')
library(ggplot2)
getRMSE <- function(pred, actual) {
error=sqrt(mean((pred-actual)^2))
return(error)
}
addIntercept <- function(mat) {
## add intercept to the matrix
allones= rep(1, nrow(mat))
return(cbind(Intercept=allones, mat))
}
predictSamples <- function(beta, mat) {
## TODO: compute the predicted value using matrix multiplication
## Note that for a single row of mat, pred = sum_i (beta_i * feature_i)
return (mat %*% beta)
}
MAX_EPOCH = 100
## Build sgd function
sgd <- function(learn.rate, lambda, train, test, epoch=MAX_EPOCH) {
## convert the train and test to matrix format
train.mat = as.matrix(train)
test.mat = as.matrix(test)
N = nrow(train.mat)
d = ncol(train.mat)
## standardize the columns of both matrices
for (i in 1:(d-1)){
train.mat[,i]=scale(train.mat[,i])
test.mat[,i]=scale(test.mat[,i])
}
tmat <- addIntercept(train.mat[, -d])
testmat <- addIntercept(test.mat[, -d])
beta = rep(0.5,d)
j = 1
# initialize dataframe to store MSE from our training set
mse.df <- NULL
# predict training residuals
pred_train =predictSamples(beta, tmat)
pred_test = predictSamples(beta, testmat)
tMse = getRMSE(pred_train, train$area)
testMSE = getRMSE(pred_test, test$area)
mse.df <- rbind(mse.df, data.frame(epoch=j, train=tMse, test=testMSE))
# Make 100 passes through training data
while(j < MAX_EPOCH){
j=j+1;
# for each row in the training data
for (n in seq(1:N)){
beta_transpose= t(beta)-learn.rate*((tmat[n,] %*% beta-train.mat[n,d])%*%tmat[n,])
beta=t(beta_transpose)
}
pred_train = predictSamples(beta, tmat)
pred_test = predictSamples(beta, testmat)
tmp_test <- data.frame(pred=pred_test, actual=test$area, type="test")
tmp_train <- data.frame(pred=pred_train, actual=train$area, type="train")
tmp <- rbind(tmp_train, tmp_test)
ggplot(tmp, aes(x=pred, y=actual, color=type)) + theme_bw() + geom_point()
tMse = getRMSE(pred_train, train$area)
testMSE = getRMSE(pred_test, test$area)
mse.df <- rbind(mse.df, data.frame(epoch=j, train=tMse, test=testMSE))
}
return(mse.df)
}
## Plot RMSE vs Epochs to see where our error is minimized for each learning rate
results_0.0075 <- sgd(.0075, .1, train, test, epoch=MAX_EPOCH)
qplot(epoch, test, data = results_0.0075)
results_0.000025 <- sgd(.000025, .1, train, test, epoch=MAX_EPOCH)
qplot(epoch, test, data = results_0.000025)
results_0.00055 <- sgd(.00055, .1, train, test, epoch=MAX_EPOCH)
qplot(epoch, test, data = results_0.00055)
## Minimum RMSE found for each of the learning rates as we continue to make passes through our training data
which.min(results_0.0075$test)
which.min(results_0.00055$test)
which.min(results_0.000025$test)
|
d0e6448df94c8d34f8b6281fc7ceaa155072ac1a | c601c3d466541d7fae1fa83aeb1c12191d8b13a4 | /tests/testthat/test-transaction_functions.R | d383a5383b8fe006bcf3f54e40fb29b757a9ba1c | [] | no_license | phillipdeonatwork/expstudies | c0a4dd57cce88d769e620b1351b36f1931d48c5d | 32cfc5513c9883066e32d37f65ca6f10ffd4df32 | refs/heads/master | 2020-05-28T07:08:08.933024 | 2019-05-26T14:26:46 | 2019-05-26T14:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 983 | r | test-transaction_functions.R | context("test-transaction_functions")
#The top row has no matching date interval in good_PM, the second row has a good date and key,
#the third has a bad key
trans_test <- data.frame(key = c("A", "A", "B"),
trans_date = c(as.Date("1999-01-01"), as.Date("2000-10-15"), as.Date("2000-10-15")),
stringsAsFactors = FALSE)
#Expoected outcome of addStart(good_PM, trans_test)
good_trans_with_start <- data.frame(start_int = as.Date("2000-10-01"),
key = "A",
trans_date = as.Date("2000-10-15"), stringsAsFactors = FALSE)
test_that("Transaction allocation works", {
expect_equal(all.equal(addStart(good_PM, trans_test), good_trans_with_start), TRUE) })
bad_exposures <- good_PY
bad_exposures[1,4] <- as.Date("2000-09-01")
test_that("Error is thrown for modified exposure frame where f_trans has no match", {
expect_error(addStart(bad_exposures, trans_test)) })
|
3ac695f08c3c12094b306c1476de317efb5e46f2 | 8f1463d75905a5190566ff24adb8d1c819c20bf4 | /inst/extdata/testscript.R | fba9fa0eca4aabc476ddf11bc7d4b05b566b2688 | [] | no_license | gdesve/epifield | 57a1f9d8d8d3319c00eefa192a963145c2d95eee | f77b5fc18496588dadd33d9aa79f8eb499dbc225 | refs/heads/master | 2022-01-22T19:25:16.566522 | 2022-01-19T15:04:24 | 2022-01-19T15:04:24 | 137,532,357 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 221 | r | testscript.R | # pour tester certaines functions depuis un script
#sink("inst/extdata/log.txt",append=FALSE)
#sink()
vtest <- c(1,2,3,4)
countif(vtest)
sortBy <- function(a, field) a[order(sapply(a, "[", i = field))]
sortBy(a, "day")
|
9cbec32b118bd4ccc4b863b3a2bc46d91ae3accf | 1f95a32235c1a0a2a0f728dccd1f6661e3ff2903 | /4-from-excel-tables-to-r-data-frames/data-frames-in-r.R | 0e9e51d0990a5dca0e6437d97e2bcb97e1af219f | [] | no_license | summerofgeorge/oreilly-r-powered-excel | cfceb83242cc1600b18c74efcdf840f72fe15a62 | a930b45be2956b32dfad3deeba338d6100b0b645 | refs/heads/master | 2021-06-22T22:37:59.794235 | 2021-03-05T14:16:44 | 2021-03-05T14:16:44 | 203,437,764 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,611 | r | data-frames-in-r.R | # What can the docs tell us
# about data frames?
?data.frame
# Create a base data frame
roster <- data.frame(
name = c("Jack", "Jill", "Billy", "Susie"),
grade = c(90, 85, 62, 97))
# Print and get to know it
roster
# Structure
str(roster)
# Dimensions
dim(roster)
# Number of rows and columns
nrow(roster)
ncol(roster)
# Check out R's built-in datasets
data()
# Let's use iris!
str(iris)
dim(iris)
iris
# Getting to bigger data sets --
# glimpse beginning of data frame instead
head(iris)
# Want to see more?
head(iris, 10)
# Spreadsheet-like viewing environment:
# View()
View(iris)
# Summary statistics
summary(iris)
# I like psych's describe
# for summary statistics
#install.packages("psych")
library(psych)
describe(iris)
# Index a data frame: df[row, column]
# Like Excel INDEX()!
iris[1,1]
iris[2,2]
iris[1:3,1:3]
# Pull non contiguous regions with vectors!
iris[c(5,10,15), c(1,3)]
# Leave arguments blank to get entire rows and columns
# All columns in rows 1:2
iris[1:2,]
# All rows in columns 1:2
iris[ ,1:2]
# Rather than index columns, use $ sign
iris[, 2]
iris$Sepal.Width
# Each column is a vector
is.vector(iris$Sepal.Length)
# Create calculated columns
iris$SepalRatio <- iris$Sepal.Length/iris$Sepal.Width
iris$SepalRatioRoot <- sqrt(iris$SepalRatio)
# Column names are separate from the data
colnames(iris)
is.vector(colnames(iris))
colnames(iris)[c(1,3)]
# Can even re-assign them
colnames(iris) <- c("Sepal.Length","Sepal.Width","Petal.Length",
"Petal.Width", "Species", "Sepal.Ratio", "Sepal.Ratio.Root")
colnames(iris)
|
1dcdc9a363e12c349b6146de7c42c28c7558cd4a | 0761c717ab96d67450d19c71aa1309d006c95eed | /run_analysis.R | 719514dddfed6677e213d17155dd26d94a3a88d6 | [] | no_license | bara811/Data_Cleansing | 481a6f08f8b43f4c4a3e263f91305879d20db4f7 | 667b1af582107e8840a07e0f34999eb616dfe276 | refs/heads/main | 2023-05-02T19:44:00.527986 | 2021-05-30T21:45:29 | 2021-05-30T21:45:29 | 372,320,433 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,612 | r | run_analysis.R | # Load the dplyr package
library(dplyr)
# Get the list of column names from features.txt
dtNames <- read.table("~/Coursera/Getting_and_Cleansing_Data/Week_4/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/features.txt", header = FALSE)
# Read the file X_test.txt into datatable dtTest
# Read the test subject details in to dtSubTest from the file subject_test.txt
# Read the test activity details in to dtActTest from the file y_test.txt
dtTest <- read.table("~/Coursera/Getting_and_Cleansing_Data/Week_4/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt", header = FALSE)
dtSubTest <- read.table("~/Coursera/Getting_and_Cleansing_Data/Week_4/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt", header = FALSE)
dtActTest <- read.table("~/Coursera/Getting_and_Cleansing_Data/Week_4/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/y_test.txt", header = FALSE)
# Combine the 3 tables together and add a column to label this data as "Test" data
dtTest <- cbind(dtTest, "Test", dtSubTest, dtActTest)
names(dtTest)[-3] <- "Type"
# Read the file X_train.txt into datatable dtTrain
# Read the train subject details in to dtSubTrain from the file subject_train.txt
# Read the train activity details in to dtActTrain from the file y_train.txt
dtTrain <- read.table("~/Coursera/Getting_and_Cleansing_Data/Week_4/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt", header = FALSE)
dtSubTrain <- read.table("~/Coursera/Getting_and_Cleansing_Data/Week_4/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", header = FALSE)
dtActTrain <- read.table("~/Coursera/Getting_and_Cleansing_Data/Week_4/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/y_train.txt", header = FALSE)
# Combine the 3 tables together and add a column to label this data as "Train" data
dtTrain <- cbind(dtTrain, "Train", dtSubTrain, dtActTrain)
names(dtTrain)[-3] <- "Type"
# Merge the training and test data sets into one data sets
dtMerged <- rbind(dtTest, dtTrain)
# add the titles of the "Type", "Subject" and "Activity" columns to the dtNames
dtNames[nrow(dtNames)+1,2] <- "Type"
dtNames[nrow(dtNames)+1,2] <- "Subject"
dtNames[nrow(dtNames)+1,2] <- "Activity"
# Assign the column names for dtMerged from dtNames
names(dtMerged) <- dtNames[,2]
# Extract the mean and standard deviation for each measurement
# Put them in a data table called dtMeanSD
dtMeanSD <- select(dtMerged, contains("mean()") | contains("std()") | matches("Type") | matches("Subject") | matches("Activity"))
# Read in the activity names table
dtActivity <- read.table("~/Coursera/Getting_and_Cleansing_Data/Week_4/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/activity_labels.txt", header = FALSE)
# Create a column for the activity names
dtMeanSD['Activity_Name'] <- NA
# Populate the activity names by looping through each row and looking them up in the activity names table (match the activity index)
for (i in 1:nrow(dtMeanSD)){
dtMeanSD[i, ncol(dtMeanSD)] <- dtActivity[match(dtMeanSD[i,ncol(dtMeanSD)-1],dtActivity[,1]),2]
}
# Create a second, independently tidy data set with the average of each variable for
# each activity and each subject
# Group the data by Subject and Activity_Name and find the mean of each variable.
# Output this to dtSumm
dtSumm <- dtMeanSD %>%
group_by(Subject, Activity_Name) %>%
summarise(across(where(is.numeric), mean))
write.table(dtSumm, file = "Summary_Grouped_Data.txt", sep = "") |
e83c23e954686531a9d76c1e57eff6aed0d5b9c0 | 0872b355956686bd4e68384d3b83f342ec02bd31 | /aDataBase/server/02-tissues-snd-srv/tissuesSndRecieveSelectorValues.R | 45ff065a36885b63eebca6aeaba33cdf4d7d9378 | [] | no_license | smartscalpel/MSui | ff211af5a0f8eb54cf9bcf488e81a8b06f11e74f | 8de2d77ecd368834c50ec717137f28569d025ac0 | refs/heads/master | 2021-06-08T11:49:55.908612 | 2019-07-16T11:44:42 | 2019-07-16T11:44:42 | 161,582,215 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,474 | r | tissuesSndRecieveSelectorValues.R | tissuesSndRecieveSelectorValues <- function(label, patientId, location, diagnosisSelector, gradeSelector, coords, timeSelector) {
tissueData <- c("null", "null", "null", "null", "null", "null", "null")
names(tissueData) <- c("label", "patient", "location", "diagnosis", "grade", "dt", "coords")
# label non empty string
tissueData["label"] <- paste("'", label, "'", sep = "")
# patient is non empty integer
tissueData["patient"] <- patientId
# Location Selector
if (location != "" & ! is.null(location)) {
tissueData["location"] <- paste("'", location, "'", sep = "")
}
# Diagnosis Selector
tissueData["diagnosis"] <- diagnosisDictionary$id[match(diagnosisSelector(), diagnosisDictionary$name)]
# Grade Selector
if (gradeSelector[[1]]() != "null") {
tissueData["grade"] <- gradeSelector[[2]]()
}
# Time Selector
if (timeSelector[[1]]() != "null") {
tissueData["dt"] <- paste("'", as.Date(timeSelector[[2]](), format = "yyyy-mm-dd"), "'", sep = "")
}
# Coords Selector
if (coords != "" & ! is.null(coords)) {
tissueData["coords"] <- paste("'", coords, "'", sep = "")
}
return(tissueData)
}
|
11730aceab4a3cf383ac0c6bafbf26843808b716 | a3f70fb3752ea0c8b1984875364e3cabdfdab009 | /examples/Ruby/ruby-multivariate-normal.R | 6a25cc8187df3d59d3a827300fd9b3891ac183fe | [
"BSD-3-Clause"
] | permissive | floidgilbert/jsr223 | 05bd5a2f6394ab0e67d66f943adf207a96470e1a | 69459b33f9435f5fdeeaf5af06382ae79acb888a | refs/heads/master | 2021-11-24T22:40:06.305979 | 2021-11-05T20:49:06 | 2021-11-05T20:49:06 | 119,085,194 | 10 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,420 | r | ruby-multivariate-normal.R | # Demonstrate the use of Apache Commons Mathematics library.
# http://commons.apache.org/proper/commons-math/
library("jsr223")
# Include both the JRuby script engine and the Apache Commons Mathematics
# libraries in the class path. Specify the paths seperately in a character
# vector.
class.path = c(
"lib/jruby-complete-9.1.2.0.jar",
"lib/commons-math3-3.6.1.jar"
)
engine <- ScriptEngine$new("ruby", class.path)
# Define the means vector and covariance matrix that will be used to create the
# bivariate normal distribution.
engine$means <- c(0, 2)
engine$covariances <- diag(1, nrow = 2)
# Import the class and create a new object from the class.
engine %@% "
java_import org.apache.commons.math3.distribution.MultivariateNormalDistribution
$mvn = MultivariateNormalDistribution.new($means, $covariances)
"
# This line would throw an error. JRuby supports 'invokeMethod' for
# native Ruby objects, but not for Java objects.
#
## engine$invokeMethod("mvn", "sample")
# Instead, use script...
engine %~% "$mvn.sample()"
## [1] 0.3279374 0.8652296
# ...or wrap the method in a function.
engine %@% "
def sample()
return $mvn.sample()
end
"
engine$invokeFunction("sample")
## [1] 0.2527757 1.1942332
# Take three samples.
replicate(3, engine$invokeFunction("sample"))
## [,1] [,2] [,3]
## [1,] 0.9924368 -1.295875 0.2025815
## [2,] 2.5145855 2.128243 1.1666272
engine$terminate()
|
c8be45e872f2d9e6c1fd2298e60c04ce36d7fcc8 | 1e6b7b9682e5a5d51df135a8a644273d7cb7108b | /fittinglevy/R/get_stabledist_variates.R | 3a63338d3433c95bd7b7cb82d28346ef716d459a | [] | no_license | Orbis-Amadeus-Oxford/Amadeus-Datawork | 96b1eaa00464ad2676b14927d9468c0136ef501d | 094d1c48384dbd514437ad08979aa7ce1add5b08 | refs/heads/master | 2020-04-01T21:03:32.278584 | 2019-09-25T14:13:50 | 2019-09-25T14:13:50 | 153,635,496 | 0 | 3 | null | 2020-02-24T15:29:57 | 2018-10-18T14:15:40 | R | UTF-8 | R | false | false | 948 | r | get_stabledist_variates.R | # Wrapper function for generating artificial Levy alpha stable samples for parametric bootstrap
if (!'pacman' %in% installed.packages()[,'Package']) install.packages('pacman', repos='http://cran.r-project.org')
pacman::p_load(boot,dplyr,StableEstim)
get_stabledist_variates <- function(dat, est_levy_qt=NULL) {
# Wrapper function
# Arguments:
# dat: numeric, original series for the surrogate distributions to be compared to
# est_levy_qt: list of four numeric or NULL, parameter estimates. Optional. Will be fitted with McCulloch if not supplied.
# Returns;
# list of random variates (the artificial distribution)
if (is.null(est_levy_qt)) {
est_levy_qt <- Levy_fun_QT(dat)
}
variates <- stabledist::rstable(n=length(dat), alpha=est_levy_qt[[1]],
beta=est_levy_qt[[2]], gamma=est_levy_qt[[3]],
delta=est_levy_qt[[4]])
return(variates)
}
|
4481598f683e4c519aca6eeb6b9ed81a85a0f171 | 7ae32e1eca55a53c30aca997ffc18e4b5ae450ba | /R/DR.R | 5d1d397017ecb494233a033dbc960cf999eea77e | [] | no_license | WaverlyWei/heteroEffect | 27701e902e51604acc7a205c9c99bf0259ce8008 | ee90d71ad5712caaba581df7aa58c4ddd6be1930 | refs/heads/master | 2022-12-03T08:52:08.211348 | 2020-08-18T04:24:50 | 2020-08-18T04:24:50 | 288,345,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,427 | r | DR.R | #'
#' This function implements the doubly robust estimator
#'
#' @param X Covariates
#' @param D Treatment
#' @param Y observed outcome
#' @param sublabel Label of the subgroup
#' @return drPsi: doubly robust estimate of the target parameter
#' @import ranger
#' @export
#'
#' Doubly Robust Estimator Implementation
#' Answer: This function receives the fitted objects returned from ModelFit
#' ModelFit takes care of weighting. No need to weight in this function
#'
#' @param sublabel Boolean vector of the sublable
#' @param modFit Fiited model object from ModelFit()
#'
#' @retun doubly robust estimate
CausalEffect <- function(type = c("general", "finite"),
sublabel = NULL,
weighted = FALSE,
modFit) {
# Initialization
weight <- drPsi <- NA
# extract model objects
psEst <- modFit$psEst
outEst <- modFit$outEst
D <- modFit$D
Y <- modFit$Y
# propensity score
e1 <- psEst$e1
e0 <- psEst$e0
# extract weight
if(weighted){
weight <- modFit$weight
# outcome estimates
u1 <- weighted.mean(sublabel * outEst[, "mu1"], na.rm = TRUE, w = weight)
u0 <- weighted.mean(sublabel * outEst[, "mu0"], na.rm = TRUE, w = weight)
}
else{
u1 <- mean(sublabel * outEst[, "mu1"], na.rm = TRUE)
u0 <- mean(sublabel * outEst[, "mu0"], na.rm = TRUE)
}
init <- u1 - u0
# Doubly Robust Estimator
if(type == "general"){
if(!is.null(sublabel)){
if(weighted){
drPsi <- init + weighted.mean(sublabel * (D/e1 - (1 - D)/e0) *
(Y - outEst[, "mu"]) + outEst[, "mu1"] -
outEst[, "mu0"] - (u1 - u0), w = weight)
}
else{
drPsi <- init + mean(sublabel * (D/e1 - (1 - D)/e0) *
(Y - outEst[, "mu"]) + outEst[, "mu1"] -
outEst[, "mu0"] - (u1 - u0))
}
}
else{
drPsi <- init + mean((D/e1 - (1 - D)/e0) *
(Y - outEst[, "mu"]) + outEst[, "mu1"] -
outEst[, "mu0"] - (u1 - u0))
}
}
# doubly robust estimator
drPsi <- init + weighted.mean(sublabel * (D/e1 - (1 - D)/e0) *
(Y - outEst[, "mu"]) + outEst[, "mu1"] -
outEst[, "mu0"] - (u1 - u0), w = weight)
return(drPsi)
}
|
c605cfdad36af0d92ea07c951f8da92656063e1a | 14d978463ccd66c6ce6e4a1c2f80197a2a11bf15 | /Interval Estimation/estimation_of_mean_diff1.R | 4751943248b5a951cbcecced51f7afb517d987d9 | [] | no_license | shoaibrayeen/R | 57940229bbb6525058b715649f1bee12348bcd97 | 0b4b870a62cb2de5b89182ca5c9e704702270275 | refs/heads/master | 2022-10-13T01:31:21.907175 | 2022-10-02T18:38:01 | 2022-10-02T18:38:01 | 108,674,965 | 4 | 3 | null | 2022-10-02T18:38:02 | 2017-10-28T18:44:50 | R | UTF-8 | R | false | false | 712 | r | estimation_of_mean_diff1.R | estDiffMean1<-function(avg1,avg2,pVar1,pVar2,n1,n2,alpha) {
temp<-(pVar1/n1) + (pVar2/n2)
temp<-sqrt(temp) #calculation avg population SD
qvalue<-qnorm(alpha/2)
temp<-temp*qvalue
interval<-c(avg1-avg2 + temp,avg1-avg2-temp) #calculation interval
return(interval) #returning interval value
}
avg1<-readline("Average Mean of Sample for 1st sample : ")
pVar1<-readline("Population Variance for 1st sample: ")
n1<-readline("Sample Size for 1st sample: ")
avg2<-readline("Average Mean of Sample for 2nd sample: ")
pVar2<-readline("Population Variance for 2nd sample: ")
n2<-readline("Sample Size for 2nd sample: ")
alpha<-readline("Level of Significance : ")
estDiffMean1(avg1,avg2,pVar1,pVar2,n1,n2,alpha)
|
ef30a0080de22627338f6eceb22fcb7efbf90979 | ddd3559b1f1e58944d679bcbaa813e8453eafc62 | /man/map_coloring.Rd | e79d1c27e84c3eb633bfd5bcc4ad6838b07a2d89 | [] | no_license | cran/tmaptools | 8a7ce9d8ba47cc792cadf8dfe3f231f0db57f961 | d37013ad904edcc5c0dab4db1b6ad561694a64b2 | refs/heads/master | 2021-07-25T01:47:09.925914 | 2021-01-19T19:30:02 | 2021-01-19T19:30:02 | 77,929,207 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,219 | rd | map_coloring.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_coloring.R
\name{map_coloring}
\alias{map_coloring}
\title{Map coloring}
\usage{
map_coloring(
x,
algorithm = "greedy",
ncols = NA,
minimize = FALSE,
palette = NULL,
contrast = 1
)
}
\arguments{
\item{x}{Either a shape (i.e. a \code{\link[sf:sf]{sf}} or \code{SpatialPolygons(DataFrame)} (\code{sp} package) object), or an adjacency list.}
\item{algorithm}{currently, only "greedy" is implemented.}
\item{ncols}{number of colors. By default it is 8 when \code{palette} is undefined. Else, it is set to the length of \code{palette}}
\item{minimize}{logical that determines whether \code{algorithm} will search for a minimal number of colors. If \code{FALSE}, the \code{ncols} colors will be picked by a random procedure.}
\item{palette}{color palette.}
\item{contrast}{vector of two numbers that determine the range that is used for sequential and diverging palettes (applicable when \code{auto.palette.mapping=TRUE}). Both numbers should be between 0 and 1. The first number determines where the palette begins, and the second number where it ends. For sequential palettes, 0 means the brightest color, and 1 the darkest color. For diverging palettes, 0 means the middle color, and 1 both extremes. If only one number is provided, this number is interpreted as the endpoint (with 0 taken as the start).}
}
\value{
If \code{palette} is defined, a vector of colors is returned, otherwise a vector of color indices.
}
\description{
Color the polygons of a map such that adjacent polygons have different colors
}
\examples{
if (require(tmap) && packageVersion("tmap") >= "2.0") {
data(World, metro)
World$color <- map_coloring(World, palette="Pastel2")
qtm(World, fill = "color")
# map_coloring used indirectly: qtm(World, fill = "MAP_COLORS")
data(NLD_prov, NLD_muni)
tm_shape(NLD_prov) +
tm_fill("name", legend.show = FALSE) +
tm_shape(NLD_muni) +
tm_polygons("MAP_COLORS", palette="Greys", alpha = .25) +
tm_shape(NLD_prov) +
tm_borders(lwd=2) +
tm_text("name", shadow=TRUE) +
tm_format("NLD", title="Dutch provinces and\nmunicipalities", bg.color="white")
}
}
|
50e6bc6a071c23e78d02f95cc55f5bbfc389a555 | 6e779770ac08aa0331f797f3825ab81c9f76a595 | /server.r | 122700ede4a3edd30cae08b01b6f43fb9c458bfb | [] | no_license | ssomasekar/Developing-data-products | 614a1471f3731e842d10027843b02b37654f83c1 | 11bc7f98fa9bd9604ba52a39af59ccb27c89406d | refs/heads/master | 2021-01-10T03:44:08.431304 | 2015-11-23T00:53:05 | 2015-11-23T00:53:05 | 46,686,299 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 422 | r | server.r | library(shiny)
data("ChickWeight")
shinyServer(
function(input, output) {
output$plot1 <- renderPlot({
## Render a line chart
plot(ChickWeight[,input$idx], main = c(input$idx," of the chick on a certain diet"), type = "s", xlab = "Weight", ylab = "# of Chicks")
meanVal <- mean(ChickWeight[,input$idx])
abline(h=meanVal, col = "red")
output$meanVal <- renderPrint(meanVal)
})
}
)
|
f1d2578841a2ba0c740919d7105b1ea729ab8683 | f681257722365f95a24cfbe26310345b50a4a4f6 | /tests/testthat.R | bf56d744606e2a5426f73d548703592d9842ba6a | [
"MIT"
] | permissive | idiv-biodiversity/lcvplants | ac3008d1a0ec7a19211e670905a7056442082724 | 4521372afff25f0f8e7f43a2f825277dc6a1c5d4 | refs/heads/master | 2023-05-23T22:01:33.748106 | 2022-11-07T14:48:18 | 2022-11-07T14:48:18 | 215,979,088 | 14 | 4 | NOASSERTION | 2021-09-14T16:56:14 | 2019-10-18T08:37:16 | R | UTF-8 | R | false | false | 62 | r | testthat.R | library(testthat)
library(lcvplants)
test_check("lcvplants")
|
10ce0f57a89cdc6ae02dbce51fa9ee6202a58830 | b24627582a26dfde8256981f4374cd36cae7f1da | /starter/5/validationCurve_solution.R | 9ac4eaa5a5516cc19ddaa5414a1b116beb790880 | [] | no_license | faridcher/ml-course | a91f7e88cff09413b7c65b6cdcaedd93b041cc9a | ed04e18e0ee285f78d131886a86923dfefc1f635 | refs/heads/master | 2023-03-30T07:53:49.970749 | 2021-04-10T21:55:16 | 2021-04-10T21:55:16 | 56,297,188 | 44 | 22 | null | 2021-03-29T07:13:44 | 2016-04-15T06:38:30 | R | UTF-8 | R | false | false | 2,679 | r | validationCurve_solution.R | validationCurve <- function(X, y, Xval, yval) {
#VALIDATIONCURVE Generate the train and validation errors needed to
#plot a validation curve that we can use to select lambda
# [lambda_vec, error_train, error_val] <- ...
# VALIDATIONCURVE(X, y, Xval, yval) returns the train
# and validation errors (in error_train, error_val)
# for different values of lambda. You are given the training set (X,
# y) and validation set (Xval, yval).
#
# Selected values of lambda (you should not change this)
lambda_vec <- c(0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10)
# You need to return these variables correctly.
error_train <- rep(0, length(lambda_vec))
error_val <- rep(0,length(lambda_vec))
# ----------------------- YOUR CODE HERE -----------------------
# Instructions: Fill in this function to return training errors in
# error_train and the validation errors in error_val. The
# vector lambda_vec contains the different lambda parameters
# to use for each calculation of the errors, i.e,
# error_train[i], and error_val[i] should give
# you the errors obtained after training with
# lambda <- lambda_vec[i]
#
# Note: You can loop over lambda_vec with the following:
#
# for (i in 1:length(lambda_vec)) {
# lambda <- lambda_vec[i]
# # Compute train / val errors when training linear
# # regression with regularization parameter lambda
# # You should store the result in error_train[i]
# # and error_val[i]
# ....
#
# }
#
#
for (i in 1:length(lambda_vec)) {
lambda <- lambda_vec[i]
theta <- trainLinearReg(X, y, lambda)
# Set to zero when calculating error since already been trained
lambda <- 0
# We are calc J as error not as cost. So, lambda should not be included when
# calculating error for thetas that have been trained, else it will be biased
# Refer to 2.1 of Exercise 5, error is computed without lambda
e_train <- linearRegCostFunction(X, y, lambda)(theta)
e_val <- linearRegCostFunction(Xval, yval, lambda)(theta) # J over all CV set for new set of theta
# Accumulating error from i=1:m
if (i == 1)
{
error_train <- e_train
error_val <- e_val
}
else
{
error_train <- rbind(error_train, e_train)
error_val <- rbind(error_val, e_val)
}
}
list(lambda_vec = lambda_vec, error_train = error_train, error_val = error_val)
# ----------------------------------------------------------------------------
}
|
5f8ea5e1b0fd67e3f686879939da7bec396876a9 | 0969a8a76b2361bd1301b61503392debe2e454e3 | /Rproject/Rcode/project_package/proj.GR.CHC.package/man/plotGR_splined_compa.Rd | 011997f1f0575f5a050e11a1636017c57111b587 | [] | no_license | daliagachc/GR_chc | 9a85b133919863769e9d655c2e7c63fa3676177a | c40d7a28e18f1cea4c940af44151e48c8926bf55 | refs/heads/master | 2020-06-01T04:00:06.653784 | 2019-06-13T05:33:04 | 2019-06-13T05:33:04 | 190,622,843 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 698 | rd | plotGR_splined_compa.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_plotGR_splined_compa.R
\name{plotGR_splined_compa}
\alias{plotGR_splined_compa}
\title{Plot GR splined}
\usage{
plotGR_splined_compa(splined_rep_df, npfevent_size_frame)
}
\arguments{
\item{splined_rep_df}{Dataframe of evaluated splined functions}
\item{npfevent_size_frame}{data frame containing the growing modes}
}
\value{
a ggplot object
}
\description{
plots GR obtained by the splined approach. Binning is specific (can be found within the fun)
}
\details{
npfevent_size_frame only used as mask to check which trajectories to plot.
}
\examples{
plotGR_splined_compa(splined_rep_df,npfevent_size_frame)
}
|
3ee4a467c0d75c5d031c2cdd76426e271537ae59 | dd8132404e8c7b028cb13cba904c50aace01c6a7 | /swt/src/spc/c1.u/c1_lex.r | 665ce1f577ab7b4cdebe1d4246470b558ad1b6d9 | [] | no_license | arnoldrobbins/gt-swt | d0784d058fab9b8b587f850aeccede0305d5b2f8 | 2922b9d14b396ccd8947d0a9a535a368bec1d6ae | refs/heads/master | 2020-07-29T09:41:19.362530 | 2019-10-04T11:36:01 | 2019-10-04T11:36:01 | 209,741,739 | 15 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,414 | r | c1_lex.r | # getsym --- get a symbol from the input
subroutine getsym
include "c1_com.r.i"
untyped info (IDSIZE)
integer lookup
Symbol = Nsymbol
Symptr = Nsymptr
Symlen = Nsymlen
Symline = Nsymline
if (Nsymlen > 0)
call scopy (Nsymtext, 1, Symtext, 1)
else
Symtext (1) = EOS
repeat {
call gettok
if (Nsymbol == '#'c) {
call c_preprocessor
next
}
if (Nsymbol == IDSYM && lookup (Nsymtext, info, Keywd_tbl) == YES)
if (IDTYPE (info) == DEFIDTYPE) {
call invoke_macro (info)
next
}
else if (IDTYPE (info) == KEYWDIDTYPE)
Nsymbol = IDPTR (info)
else
FATAL ("Undefined IDTYPE in Nsymbol"p)
break
}
DBG (1, call display_symbol (" Symbol='*s'"s, Symbol)
DB call print (ERROUT, " ^*i '*s'(*i)*n"p, Symptr, Symtext, Symlen))
return
end
# gettok --- get next token from input stream
subroutine gettok
include "c1_com.r.i"
integer i
integer ctoi, gctoi, index, mapdn
longint val
longint gctol
character c
procedure analyze_number forward
procedure check_becomes_op (c, sym) forward
procedure check_double_op (c, sym1, sym2) forward
Nsymptr = LAMBDA
repeat { # until a symbol is found
Nsymlen = 0
Nsymtext (1) = EOS
Nsymline = Line_number (Level)
repeat
ngetch (c)
until (c ~= ' 'c && c ~= HT)
select (c)
when (SET_OF_LETTERS, '_'c, '$'c) {
while (IS_LETTER (c) || IS_DIGIT (c) || c == '$'c || c == '_'c) {
Nsymlen += 1
Nsymtext (Nsymlen) = c
ngetch (c)
}
call putback (c)
Nsymtext (Nsymlen + 1) = EOS
if (ARG_PRESENT (m))
call mapstr (Nsymtext, LOWER)
Nsymbol = IDSYM
break
} # end of identifier processing
when (SET_OF_DIGITS) { # Number
analyze_number
break
} # end of number processing
when ('.'c) { # Number or .
ngetch (c)
if (IS_DIGIT (c)) {
call putback (c)
c = '.'c
analyze_number
}
else {
call putback (c)
Nsymbol = '.'c
}
break
}
when ('"'c, "'"c) { # quoted strings
if (c == '"'c)
Nsymbol = STRLITSYM
else
Nsymbol = CHARLITSYM
call collect_quoted_string (c, Nsymtext, Nsymlen)
break
}
when ('>'c) { # > >< >= >> >>=
ngetch (c)
if (c == '<'c)
Nsymbol = NESYM
else if (c == '='c)
Nsymbol = GESYM
else if (c == '>'c) {
ngetch (c)
if (c == '='c)
Nsymbol = RSHIFTAASYM
else {
Nsymbol = RSHIFTSYM
call putback (c)
}
}
else {
Nsymbol = '>'c
call putback (c)
}
break
}
when ('<'c) { # < <> <= << <<=
ngetch (c)
if (c == '>'c)
Nsymbol = NESYM
else if (c == '='c)
Nsymbol = LESYM
else if (c == '<'c) {
ngetch (c)
if (c == '='c)
Nsymbol = LSHIFTAASYM
else {
Nsymbol = LSHIFTSYM
call putback (c)
}
}
else {
Nsymbol = '<'c
call putback (c)
}
break
}
when ('='c) { # = ==
check_becomes_op ('='c, EQSYM)
break
}
when ('!'c) { # ! !=
check_becomes_op ('!'c, NESYM)
break
}
when ('/'c) { # /* */ / /=
ngetch (c)
if (c == '*'c) {
ngetch (c)
repeat {
while (c ~= '*'c && c ~= EOF)
ngetch (c)
if (c == EOF) {
SYNERR ("Missing trailing comment delimiter"p)
break
}
ngetch (c)
} until (c == '/'c)
next
}
call putback (c)
check_becomes_op ('/'c, DIVAASYM)
break
}
when ('-'c) { # -> - -- -=
ngetch (c)
if (c == '>'c)
Nsymbol = POINTSTOSYM
else {
call putback (c)
check_double_op ('-'c, DECSYM, SUBAASYM)
}
break
}
when ('+'c) { # + ++ +=
check_double_op ('+'c, INCSYM, ADDAASYM)
break
}
when ('&'c) { # & && &=
check_double_op ('&'c, SANDSYM, ANDAASYM)
break
}
when ('|'c) { # | || |=
check_double_op ('|'c, SORSYM, ORAASYM)
break
}
when ('*'c) { # * *=
check_becomes_op ('*'c, MULAASYM)
break
}
when ('%'c) { # % %=
check_becomes_op ('%'c, REMAASYM)
break
}
when ('^'c) { # ^ ^=
check_becomes_op ('^'c, XORAASYM)
break
}
when (NEWLINE)
next
else { # single_character symbol
# if (c == '\'c) { # check if '\' at end of line
# ngetch (c)
# if (c == NEWLINE)
# next
# call putback (c)
# c = '\'c
# }
Nsymbol = c
Nsymtext (1) = c
Nsymtext (2) = EOS
Nsymlen = 1
break
}
} # repeat until a symbol is found
DBG (2, call display_symbol ("Nsymbol='*s'"s, Nsymbol)
DB call print (ERROUT, " ^*i '*s'(*i)*n"p, Nsymptr, Nsymtext, Nsymlen))
return
# collect_integer --- collect the digits of an integer
procedure collect_integer (radix) {
integer radix
local x; integer x
repeat {
if (radix <= 10)
x = c - '0'c + 1
else
x = index ("0123456789abcdef"s, mapdn (c))
if (1 > x || x > radix)
break
Nsymtext (Nsymlen + 1) = c # Strange order for efficiency
Nsymlen += 1
ngetch (c)
}
Nsymtext (Nsymlen + 1) = EOS
}
# convert_integer --- convert an integer symbol to the specified radix
procedure convert_integer (radix) {
integer radix
local i; integer i
i = 1
val = gctol (Nsymtext, i, radix)
if (Nsymtext (i) ~= EOS)
SYNERR ("Illegal character in integer constant"p)
}
# return_integer --- convert 'val' back to characters and determine
# data type
procedure return_integer (radix) {
integer radix
if (c == 'l'c || c == 'L'c) {
Nsymbol = LONGLITSYM
ngetch (c)
}
else if (c == 's'c || c == 'L'c) {
Nsymbol = SHORTLITSYM
ngetch (c)
}
else if (radix == 10)
if (val > MAXSHORT)
Nsymbol = LONGLITSYM
else
Nsymbol = SHORTLITSYM
else
if (val > MAXUNSIGNED)
Nsymbol = LONGLITSYM
else
Nsymbol = SHORTLITSYM
call ltoc (val, Nsymtext, MAXTOK)
}
# analyze_number --- collect an integer/real number
procedure analyze_number {
collect_integer (10)
select (c)
when ('r'c, 'R'c) { # radix specified
convert_integer (10)
if (val < 2 | val > 16) {
SYNERR ("Radix must be between 2 and 16"p)
val = 16
}
ngetch (c)
Nsymlen = 0
collect_integer (val)
convert_integer (val)
return_integer (16)
}
when ('x'c, 'X'c) {
if (Nsymtext (1) == '0'c && Nsymtext (2) == EOS) {
ngetch (c)
Nsymlen = 0
collect_integer (16)
convert_integer (16)
}
else {
SYNERR ("Illegal hexadecimal constant"p)
convert_integer (10)
}
return_integer (16)
}
when ('.'c, 'e'c, 'E'c) {
if (c == '.'c) {
Nsymtext (Nsymlen + 1) = '.'c
Nsymlen += 1
ngetch (c)
collect_integer (10)
}
if (c == 'e'c || c == 'E'c) {
Nsymtext (Nsymlen + 1) = 'e'c
Nsymlen += 1
ngetch (c)
if (c == '-'c || c == '+'c) {
Nsymtext (Nsymlen + 1) = c
Nsymlen += 1
ngetch (c)
}
collect_integer (10)
}
Nsymbol = DOUBLELITSYM
}
else
if (Nsymtext (1) == '0'c) {
convert_integer (8)
return_integer (8)
}
else {
convert_integer (10)
return_integer (10)
}
call putback (c)
}
# check_becomes_op --- check for a single-character OP or OP=
procedure check_becomes_op (c, sym) {
character c
integer sym
local d; character d
ngetch (d)
if (d == '='c)
Nsymbol = sym
else {
call putback (d)
Nsymbol = c
}
}
# check_double_op --- check for a single character OP, OPOP, or OP=
procedure check_double_op (c, sym1, sym2) {
character c
integer sym1, sym2
local d; character d
ngetch (d)
if (d == c)
Nsymbol = sym1
else if (d == '='c)
Nsymbol = sym2
else {
Nsymbol = c
call putback (d)
}
}
end
# refill_buffer --- refill the input buffer and return first character
subroutine refill_buffer (c)
character c
include "c1_com.r.i"
integer getlin
repeat {
if (Level < 1) {
c = EOF
Inbuf (PBLIMIT) = EOS
Ibp = PBLIMIT
return
}
if (getlin (Inbuf (PBLIMIT), Infile (Level)) ~= EOF) {
Line_number (Level) = Line_number (Level) + 1
DBG (17, call print (ERROUT, "....*s"s, Inbuf (PBLIMIT)))
break
}
call close (Infile (Level))
Level = Level - 1
if (Level >= 1) { # restore the name of the module that #
# included the one we just finished #
call scopy (Mem, Fname_table (Level), Module_name, 1)
call dsfree (Fname_table (Level))
# and reset the line number to prevent #
# screwy error message numbering #
Symline = Line_number (Level)
}
}
c = Inbuf (PBLIMIT)
Ibp = PBLIMIT + 1
return
end
# putback --- push character back onto input
subroutine putback (c)
character c
include "c1_com.r.i"
Ibp = Ibp - 1
if (Ibp >= 1)
Inbuf (Ibp) = c
else
FATAL ("too many characters pushed back"p)
return
end
# putback_str --- push string back onto input
subroutine putback_str (str)
character str (ARB)
include "c1_com.r.i"
integer i
integer length
for (i = length (str); i > 0; i = i - 1)
call putback (str (i))
return
end
# putback_num --- push decimal number back onto input
subroutine putback_num (n)
integer n
integer len
integer itoc
character chars (MAXLINE)
len = itoc (n, chars, MAXLINE)
chars (len + 1) = EOS
call putback_str (chars)
return
end
# collect_quoted_string --- obtain a quoted string from input
subroutine collect_quoted_string (quote, text, tl)
character quote, text (ARB)
integer tl
include "c1_com.r.i"
integer gctoi
character c
procedure analyze_escape forward
ngetch (c)
while (tl < MAXTOK && c ~= NEWLINE && c ~= quote) {
if (c == '\'c)
analyze_escape
else {
text (tl + 1) = c
tl += 1
}
ngetch (c)
}
if (c == NEWLINE)
SYNERR ("Missing closing quote"p)
else if (tl >= MAXTOK)
SYNERR ("Quoted literal too long"p)
text (tl + 1) = EOS
return
# analyze_escape --- convert an escaped character in a quoted string
procedure analyze_escape {
local i; integer i
local str; character str (5)
ngetch (c)
select (c)
when (NEWLINE)
c = EOF
when ('n'c)
c = NEWLINE
when ('t'c)
c = HT
when ('b'c)
c = BS
when ('r'c)
c = CR
when ('f'c)
c = FF
when ('0'c, '1'c, '2'c, '3'c, '4'c, '5'c, '6'c, '7'c) {
for (i = 1; i <= 3 && '0'c <= c && c <= '7'c; i += 1) {
str (i) = c
ngetch (c)
}
str (i) = EOS
call putback (c)
i = 1
c = gctoi (str, i, 8)
}
if (c ~= EOF) {
text (tl + 1) = c
tl += 1
}
}
end
|
8328f36b7a796407a156f99d912abb7a4ba10594 | a3b9e037da87e9822251e77b1a9dda58eea000ad | /man/WL.randProj.test.Rd | 1232a6f009f2ce32e27d7d1858a815584ab33713 | [] | no_license | lingxuez/sLED | 218698569535fcb78b2c1feab4dec19eb583b0bc | 973b72d766a0862f1bbb2677c09ec6f01b25d704 | refs/heads/master | 2021-01-09T07:59:50.183844 | 2017-10-05T13:58:56 | 2017-10-05T13:58:56 | 68,491,244 | 13 | 4 | null | null | null | null | UTF-8 | R | false | true | 1,418 | rd | WL.randProj.test.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation-WL.R
\name{WL.randProj.test}
\alias{WL.randProj.test}
\title{Two-sample covariance test (Wu and Li 2015)}
\usage{
WL.randProj.test(X, Y, nproj = 100, useMC = FALSE, mc.cores = 1)
}
\arguments{
\item{X}{n1 by p matrix, observation of the first population, columns are features}
\item{Y}{n2 by p matrix, observation of the second population, columns are features}
\item{nproj}{number of random projections to use}
\item{useMC}{logical variable indicating whether to use multicore parallelization.
R packages \code{parallel} and \code{doParallel} are required if set to \code{TRUE}.}
\item{mc.cores}{decide the number of cores to use when \code{useMC} is set to \code{TRUE}.}
}
\value{
A list containing the following components:
\item{test.stat}{test statistic}
\item{pVal}{the p-value calculated using the limiting distribution
(max of independent standard normal)}
}
\description{
The two-sample covariance test using random projections
proposed in Wu and Li (2015)
"Tests for High-Dimensional Covariance Matrices Using Random Matrix Projection".
}
\references{
Wu and Li (2015)
"Tests for High-Dimensional Covariance Matrices Using Random Matrix Projection",
arXiv preprint arXiv:1511.01611.
}
\seealso{
\code{Cai.max.test()}, \code{Chang.maxBoot.test()}, \code{LC.U.test()},
\code{Schott.Frob.test()}.
}
|
0d224ac7ef8e606db59ce5d234bce0bdc26f56fa | 0dea415b6391b1ecf3e9a2f7210d09039d8c2409 | /older_stuff/graph_off/feeling_therm_ridges.R | fcbbd77fa34a3c67fc0068ccd3b9ce88c64a1059 | [] | no_license | ryanburge/nd | b9755b9c7f50dc4ea391638c50634e8afcb82e92 | ac9bd778b6480263f6c07d7451f61227a59b0e48 | refs/heads/master | 2021-08-08T19:33:58.137774 | 2020-06-04T19:03:49 | 2020-06-04T19:03:49 | 187,513,068 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,714 | r | feeling_therm_ridges.R | dem <- nd %>% mutate(nonden=car::recode(q3, "1=1; 2:17=0"),
nonden=frcode(nonden==0 ~ "Denominational",
nonden==1 ~ "Non-Denominational")) %>%
group_by(nonden) %>%
mean_ci(q29_1) %>%
na.omit() %>%
mutate(group = "Democrats")
rep <- nd %>% mutate(nonden=car::recode(q3, "1=1; 2:17=0"),
nonden=frcode(nonden==0 ~ "Denominational",
nonden==1 ~ "Non-Denominational")) %>%
group_by(nonden) %>%
mean_ci(q29_2) %>%
na.omit() %>%
mutate(group = "Republicans")
nd %>% mutate(nonden=car::recode(q3, "1=1; 2:17=0"),
nonden=frcode(nonden==0 ~ "Denominational",
nonden==1 ~ "Non-Denominational")) %>%
group_by(nonden) %>%
ggplot(., aes(x = q29_1, y = nonden)) +
geom_density_ridges_gradient(aes(fill = ..x..), scale =3, size = .03) +
# scale_fill_gradientn(colours = c("dodgerblue3", "gray", "firebrick3")) +
theme_gg("Josefin Sans") +
labs(x = "", y ="", title = "Feeling Thermometer for Democrats") +
ggsave("dem_therm_ridges.png", type = "cairo-png", width = 10)
nd %>% mutate(nonden=car::recode(q3, "1=1; 2:17=0"),
nonden=frcode(nonden==0 ~ "Denominational",
nonden==1 ~ "Non-Denominational")) %>%
group_by(nonden) %>%
ggplot(., aes(x = q29_2, y = nonden)) +
geom_density_ridges_gradient(aes(fill = ..x..), scale =3, size = .03) +
# scale_fill_gradientn(colours = c("dodgerblue3", "gray", "firebrick3")) +
theme_gg("Josefin Sans") +
labs(x = "", y ="", title = "Feeling Thermometer for Republicans") +
ggsave("rep_therm_ridges.png", type = "cairo-png", width = 10) |
55dfe98b4643ee7ccb510e71abde8b841943e05e | 26dea210be60fafab93c89e4bb11d5ff9edeba72 | /04Function/34Create_a_function.R | ee079ba5c44e60f8cbae6362cb3b1ba2a7dec794 | [] | no_license | MomusChao/R | a71df4f7430d644c18f853ad4f06b0838d5545c9 | 014c8e5ec43dc5d02b9faa41b49032ed5c340439 | refs/heads/master | 2021-06-19T02:21:11.297723 | 2020-12-09T22:28:18 | 2020-12-09T22:28:18 | 83,297,248 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 93 | r | 34Create_a_function.R | # create a function
f1 = function(x)
{
y = x+10
return(y)
}
# input a num
f1(10) |
1832c5b6fc8e7db3765cc2d79fb43d39fe25beb8 | 187edf294857df8c27ea5a3dcb6093b41d1f32c6 | /R/istack.r | b054bca261a2d6b00e25237ed3a0067f851ddb63 | [
"MIT"
] | permissive | papaemmelab/istack | fba7c8071c619fa9a8e1cd83d4628184ad57eb07 | 713ab603179c662420ca1a8144e5f257d98e8837 | refs/heads/master | 2023-05-30T22:22:04.063222 | 2018-12-15T04:38:45 | 2018-12-15T04:38:45 | 160,869,487 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,326 | r | istack.r | #' Stack custom icons with group coloring
#' @param D Data Frame
#' @param var categorical variable that is to be stacked
#' @param group grouping categorical variable that is to color the icons
#' @param icon link to any image icon on the internet. For coloring to work, has to be a PNG with transparent background
#' @param size icon size
#' @param asp icon aspect ratio
#' @param size icon size
#' @return a ggplot object that can be modified downstream
#' @export
istack = function(D, var, group = NULL, icon, size = 0.03, asp = 1, sort = TRUE) {
# incase this is a tibble
D = data.frame(D)
# giving nicknames
D['var'] = factor(D[[var]])
if (!is.null(group)) {
D['group'] = factor(D[[group]])
}
# sort the levels of variable column
if (sort) {
D['var'] = factor(D[['var']], names(sort(table(D[['var']]), decreasing = FALSE)))
}
# calculate coordinates
D =
do.call(
rbind,
lapply(
split(D, D['var']),
function(df) {
if (!is.null(group)) {
df = df[order(df['group']),]
}
df['n'] = seq(nrow(df))
return(df)
}
)
)
D['image'] = icon
# create the plot
if (!is.null(group)) {
base = ggplot2::ggplot(data = D, aes(x = n, y = var, color = group)) + ggplot2::labs(color = group)
} else {
base = ggplot2::ggplot(data = D, aes(x = n, y = var))
}
p = base +
ggimage::geom_image(aes(image=image), asp = asp, size = size) +
ggplot2::theme(
plot.title = element_text(hjust = 0.5),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks = element_blank(),
axis.line = element_blank()
) +
ggplot2::ylab(var) +
ggplot2::xlab('') +
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks(4), expand = c(0,0.6))
return(p)
}
#' Simulate cancer demo dataset
#' @export
simulate_cancers = function(n = 10) {
diseases = c(rep('Breast cancer', 3), rep('Prostate cancer', 3),
'Brain cancer', rep('Colorectal cancer', 2), rep('Colon cancer', 3),
'Pancreatic cancer', 'Thyroid cancer', 'Lung cancer',
'Bladder cancer', 'Ovarian cancer', 'Sarcoma', 'Leukemia')
treatments = c('Untreated', rep('Radiotherapy', 3), rep('Chemotherapy', 5),
'Targeted therapy')
nums = sample((round(n/2)):(n*3), length(diseases), replace=T)
cancers = data.frame(unname(do.call(c, mapply(function(d, n) {rep(d, n)}, diseases, nums))))
colnames(cancers) = 'Disease'
cancers['Treatment'] = sample(treatments, nrow(cancers), replace=T)
return(cancers)
}
#' @export
simulate_gym = function(n = 10) {
exercises = c(rep('Squat', 3), rep('Bicep', 3),
'Stretching', rep('Bench', 2), rep('Legs', 3),
'Back', 'Dumbbell', 'Dead lift',
'Pullup', 'Weighted pullup', 'Hammer')
days = c('Monday', rep('Tuesday', 2), 'Wednesday',
'Thursday', 'Rest day', rep('No leg day', 4))
nums = sample((round(n/2)):(n*3), length(exercises), replace=T)
gym = data.frame(unname(do.call(c, mapply(function(d, n) {rep(d, n)}, exercises, nums))))
colnames(gym) = 'exercises'
gym['days'] = sample(days, nrow(gym), replace=T)
return(gym)
} |
dd7ca1d2b317a2bc57f008c85cbf576a3232a6d8 | 90a90a263632b12fa87dfa0bf260e03fca98d47f | /global.R | bf3f477f4a607ff99cda245f406df84f99b0d1db | [] | no_license | yogesh1612/ML_Algos_for_Classification | 81818429e5b936ece73c360f5a9ee29818bb5621 | 7c163e05c667c5b0aee78065199e7bc6508c33ef | refs/heads/main | 2023-05-08T10:49:45.192200 | 2021-06-08T14:43:29 | 2021-06-08T14:43:29 | 371,950,724 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,495 | r | global.R | # Needs to import ROCR package for ROC curve plotting:
library(ROCR)
plot_roc <- function(model0, df0_test){
model.probs <- predict(model0, df0_test[,-1], type="prob")
pred <- prediction(model.probs[,2], df0_test$y)
perf <- performance(pred,"tpr","fpr")
auc_ROCR <- performance(pred, measure = "auc")
auc <- auc_ROCR@y.values[[1]];
print(auc) # print this below ROC curve in plots tab
plot(perf,colorize=TRUE) # display this in output Plots tab
legend("bottomright",
bty = "n",
horiz = F,
legend = paste0("AUC Score ",round(auc,3)),
cex = 1)
} # func ends
require(pROC)
plot_roc_multi <- function(model0, df0_test){
model_preds = predict(model0, df0_test[,-1])
y_test = as.factor(make.names(df0_test$y))
a0 = multiclass.roc(y_test, as.numeric(model_preds))
print(auc(a0)) # print for display
# plotting func. Display the one below
rs <- a0[['rocs']]; length(rs)
n1 = length(unique(y_test))
plot.roc(rs[[1]])
legend("bottomright",
bty = "n",
horiz = F,
legend = paste0("AUC Score ",round(auc(a0),3)),
cex = 1)
sapply(2:n1, function(i) lines.roc(rs[[i]],col=i))
} # func ends
#'--- choosing between binary and multiclass ROC --- '
plot_roc_gen <- function(model0, df0_test){
if (length(unique(df0_test$y)) > 2) {
plot_roc_multi(model0, df0_test) # works. Whew.
} else {
plot_roc(model0, df0_test) # works easy
}
} # func ends
runfunc <- function(df0,
kfoldcv_ui = 5,
train_propn_ui = 0.7,
pred_data=NULL,
model_selected_ui = "lg_reg",
svm_type = NULL)
{
'--- basic data prep ---'
require(rsample)
set.seed(123)
if (is.numeric(df0$y)){
df0$y = as.factor(paste0('y_', df0$y))
}else{
df0$y <- as.factor(df0$y)
}
inTrain <- createDataPartition(y = df0$y, p = train_propn_ui, list=FALSE)
df0_train <-df0[inTrain,]
df0_test <- df0[-inTrain,]
' --- common train_control setup for caret ---'
require(caret)
train_control <- trainControl(method="repeatedcv",
number=kfoldcv_ui,
repeats=3,
classProbs = T,
savePredictions = T)
#, search='random')
'--- trying logreg now ---'
if (model_selected_ui == "lg_reg"){
set.seed(1045)
logit.CV <- train(x= df0_train[,-1] , y= df0_train[,1],
method = 'glm',
family = 'binomial',
trControl = train_control)
return(list(logit.CV,df0_train,df0_test))
} # model ends
'--- below all are for SVM ---'
if (model_selected_ui=="svm"){
set.seed(1045)
if (svm_type == "SVM_linear_fixed"){
set.seed(1045)
svm2 <- train(y ~., data = df0_train, method = "svmLinear",
trControl = train_control, preProcess = c("center","scale")) #7s
return(list(svm2,df0_train,df0_test))
} # MODEL ENDS
if (svm_type == "SVM_linear_grid"){
set.seed(1045)
svm3 <- train(y ~., data = df0_train, method = "svmLinear",
trControl = train_control, preProcess = c("center","scale"),
tuneGrid = expand.grid(C = seq(0, 3, length = 25))) # # 6s
return(list(svm3,df0_train,df0_test))
}
## non-linear SVM using RBFs
if (svm_type == "SVM_RBF"){
set.seed(1045)
svm3a <- train(y ~., data = df0_train, method = "svmRadial",
trControl = train_control, preProcess = c("center","scale"),
tuneLength = 10)
return(list(svm3a,df0_train,df0_test))
} # model ends
## non-linear SVM using polynomial bases
if (svm_type == "SVM_polynomial"){
set.seed(1045)
svm4 <- train(y ~., data = df0_train, method = "svmPoly",
trControl = train_control, preProcess = c("center","scale"),
tuneLength = 4)
return(list(svm4,df0_train,df0_test))
} # MODEL ENDS
} #---SVM ends here
if (model_selected_ui == "nb"){
set.seed(1045)
# trainControl in caret for cross-validn in classifn
nb_trControl <- trainControl(method = "cv",
number = kfoldcv_ui,
# repeats = 3,
classProbs = TRUE,
summaryFunction = multiClassSummary)
# run knn in caret now
system.time({
set.seed(1045)
suppressWarnings(
nb_fit <- train(y ~ .,
data = df0_train,
method = 'nb',
trControl = nb_trControl,
#preProc = c("center", "scale"),
metric = 'ROC')
)
})
return(list(nb_fit,df0_train,df0_test))
} # func ends
if (model_selected_ui == "nn"){
set.seed(1045)
# trainControl in caret for cross-validn in classifn
nnet_trControl <- trainControl(method = "cv",
number = kfoldcv_ui,
# repeats = 3,
classProbs = TRUE,
summaryFunction = multiClassSummary)
# run knn in caret now
set.seed(123)
system.time({
set.seed(1045)
nnet_fit <- train(y ~ .,
data = df0_train,
method = 'nnet',
trControl = nnet_trControl,
#preProc = c("center", "scale"),
metric = 'ROC',
tuneGrid=expand.grid(size=c(10), decay=c(0.1)))
}) # 26 secs!
return(list(nnet_fit,df0_train,df0_test))
}
}
pca_plot <- function(y,X){
y = y; X = X
if (is.numeric(y)){y = as.character(paste0('y_', y))}
X_num <- X %>% dplyr::select(where(is.numeric))
#a0 = apply(X, 2, function(x) {is.numeric(x)}) %>% which(.) %>% as.numeric(); a0
a1 = princomp(X_num, cor=TRUE)$scores[,1:2]
a2 = data.frame(y=y, x1=a1[,1], x2=a1[,2])
p <- ggplot(data=a2, aes(x=x1, y=x2, colour = factor(y))) +
geom_point(size = 4, shape = 19, alpha = 0.6) +
xlab("PCA compt1") + ylab("PCA compt 2")
plot(p) } # func ends
|
e1bb2132ae75ee076cb2e8149f65b70b6a37b722 | eb6f8b652a5c600f5f84d67b2e643fb6fe29cd17 | /36_fit_prefintensity_sptemp.R | cfe07c5d7b7cdc8d41ab4dd0c81d7b0b67be1961 | [] | no_license | jkbest2/spatq_sims | b528ab6a81f3a42473700bfa295247152f0186a0 | e2f4c1e65a32ce437e878717773dd41ff3574c6e | refs/heads/master | 2022-08-15T05:01:25.337849 | 2022-05-18T21:33:29 | 2022-05-18T21:33:29 | 225,442,641 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,486 | r | 36_fit_prefintensity_sptemp.R | ## If script is run in the REPL interactively, use the local `spatq` package and
## manually set a range of replicates to fit, because they were probably not
## passed as command line arguments when R was started. Otherwise (typically on
## Hyak via SLURM), use the installed version of `spatq` and read in the
## replicate numbers from the command line arguments.
if (interactive()) {
devtools::load_all("~/dev/spatq", helpers = FALSE)
repl_arg <- c(1, 5)
} else {
library(spatq)
repl_arg <- as.numeric(commandArgs(trailingOnly = TRUE))
}
library(tidyverse)
## Where are we working?
root_dir <- "."
## Which simulation study are we fitting?
study <- "prefintensity"
## What range of replicates are going to be fit?
repls <- repl_arg[1]:repl_arg[2]
## How many years to fit?
max_T <- 15
## Tune the optimization routine
optcontrol <- list(eval.max = 1000L, iter.max = 750L)
## Names of the operating models
opmods <- 1:6
## Names of the estimation models
estmods <- "survey_spt"
## List all possible combinations of OM/EM in given replicate range
specify_fits <- function(study, repls, opmods, estmods, root_dir = ".") {
create_res_dir(study, repls)
res_paths <- all_res_file_paths(study, repls, opmods, estmods, root_dir)
df <- cross_df(list(estmod = estmods, opmod = opmods, repl = repls)) %>%
mutate(study = study,
Rdata = res_paths$rdata,
sub_df = map(estmod, specify_subset),
estd = map(estmod, estmod_pars),
root_dir = root_dir)
df
}
## Which still need to be fit?
fits_todo <- function(fit_spec, result_root = "prefintensity/results") {
fit_spec %>%
filter(!file.exists(Rdata))
}
## How many observations to use from each ?
specify_subset <- function(estmod) {
sub_df <- switch(estmod,
survey_spt = data.frame(vessel_idx = 2, n = 0))
sub_df
}
## Specify which parameters to estimate for each estimation model; don't
## estimate catchability parameters if using a single survey vessel.
estmod_pars <- function(estmod) {
switch(estmod,
survey_spt = specify_estimated(beta = TRUE,
gamma = FALSE,
omega = list(omega_n = TRUE,
omega_w = FALSE),
epsilon = list(epsilon_n = TRUE,
epsilon_w = FALSE),
lambda = FALSE,
eta = FALSE,
phi = FALSE,
psi = FALSE,
kappa_map =
c(1, NA, 1, NA, NA, NA, NA, NA),
obs_lik = 1L))
}
fit_list <- fits_todo(specify_fits(study = study,
repls = repls,
opmods = opmods,
estmods = estmods,
root_dir = "."))
## Iterate over rows to fit each model
for (idx in seq_len(nrow(fit_list))) {
spec <- spatq_simstudyspec(as.list(fit_list[idx, ]))
setup <- spatq_simsetup(repl = spec$repl,
study,
spec$opmod,
spec$sub_df[[1]],
max_T = max_T,
root_dir = root_dir,
index_step = 1,
spec_estd = spec$estd[[1]])
obj <- spatq_obj(setup,
runSymbolicAnalysis = TRUE,
normalize = TRUE,
silent = TRUE)
fit <- tryCatch({
## Fit with large number of iterations and do it twice so more likely to
## reach optimum. Previous fits have ended early and/or with large
## gradient components, and many of these did not have PD Hessians
fit <- spatq_fit(obj = obj, control = optcontrol)
fit <- spatq_fit(obj = obj, fit = fit, control = optcontrol)
fit},
error = function(e) list(fail = TRUE))
lpb <- tryCatch(
gather_nvec(obj$env$last.par.best),
error = function(e) list(fail = TRUE))
rep <- tryCatch(
report_spatq(obj),
error = function(e) list(fail = TRUE))
sdr <- tryCatch(
sdreport_spatq(obj),
error = function(e) list(fail = TRUE))
save_fit(spec, fit, lpb, rep, sdr)
save_index(spec, sdr, feather = FALSE)
}
|
99dfcf9840b58bff8421c3fb0597b4ba678b2b22 | 2ec6726b083ec743e7f221bbb8e2a397c8d6b152 | /RProgramming/week4/rankhospital.R | e707ca96f1bd2c7909bd0270c27324bdae75fb60 | [] | no_license | salma-rodriguez/DataScienceTraining | bc697a23d21762bf13baf5113b0a53bee8253dc1 | a5297d975380e860c46c73f9230e0e7c9dc3f5cd | refs/heads/master | 2020-12-31T05:10:34.942826 | 2020-04-23T03:38:51 | 2020-04-23T03:38:51 | 58,333,024 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,904 | r | rankhospital.R | source("best.R")
## description
rankhospital <- function(state, outcome, num = "best")
{
ret = NA
data <- read.csv("outcome-of-care-measures.csv",
colClasses = "character")
states <- unique(data$State) ## valid states
outcomes <- c("heart attack", "heart failure", "pneumonia") ## valid
## Check that state and outcome are valid
if (!(state %in% states))
stop("invalid state") ## Oops! This is not a good state
if (!(outcome %in% outcomes))
stop("invalid outcome") ## Oops! This is not a good outcome
base = "Hospital.30.Day.Death..Mortality..Rates.from."
simple = simpleCap(outcome) ## Need to get the column name
s = sprintf("%s%s", base, simple)
# fl1 <- data[[s]] != "Not Available"
# data = data.frame(Hospital.Name = data$Hospital.Name[fl1],
# Hospital.Rate = data[[s]][fl1],
# State = data$State[fl1])
dat <- data[order(suppressWarnings(as.numeric(data[[s]])),
data$Hospital.Name), ]
fl1 = dat[[s]] != "Not Available" ## Wanna keep these entries
fl2 = dat$State == state ## This is the state we want
tmp <- data.frame(Hospital.Name = dat$Hospital.Name[fl1 & fl2],
Rate = dat[[s]][fl1 & fl2],
State = dat$State[fl1 & fl2],
Rank = 1:length(dat$Hospital.Name[fl1 & fl2])
)
if (num == "best") {
ret <- tmp$Hospital.Name[1]
}
else if (num == "worst") {
ret <- tmp$Hospital.Name[nrow(tmp)]
}
else if (as.numeric(num) <= nrow(data)) {
ret <- tmp$Hospital.Name[num]
}
as.character(ret)
}
|
f3a92fcfaea5602e51a3f7fff57dd6352397dbac | 86243a1fee0721a5abe79b98fa85b94bb23843a9 | /GlobalFun.R | df8faf590324a83a15f03ba487ccb6418abc8523 | [] | no_license | Okenfor/DSCapstone_YieldChallenge | 10081a67ff9150540f8d0d804b306e12a5a8051e | acc8147235aa6f85deaeda19ced5202c68675d42 | refs/heads/master | 2021-01-10T15:55:42.948520 | 2015-11-23T22:59:22 | 2015-11-23T22:59:22 | 46,670,612 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,303 | r | GlobalFun.R | filterByFreq <- function(docTermMatrix, prob) {
freqs <- (slam::col_sums(docTermMatrix))
qt <- quantile(freqs, probs = c(prob))
idxFreqs <- freqs > qt
}
###
#word cloud and other analysis
#http://www.rdatamining.com/examples/text-mining
#https://rstudio-pubs-static.s3.amazonaws.com/31867_8236987cf0a8444e962ccd2aec46d9c3.html
getWordCloud <- function(df.terms, title){
df.terms <- df.terms[,filterByFreq(df.terms, .75)]
require(wordcloud)
df.terms.freqs <- sort(colSums(df.terms), decreasing=TRUE)
#head(df.terms.freqs)
set.seed(1234)
dark2 <- brewer.pal(6, "Dark2")
wordcloud(names(df.terms.freqs), main = title, df.terms.freqs, max.words = 100, rot.per=0.2, colors=dark2)
}
getWordHist <- function(df.terms, title){
df.terms.freqs <- colSums(df.terms)
dt.terms <- data.frame(word=names(df.terms.freqs), freq=df.terms.freqs)
dt.terms <- dt.terms[dt.terms$freq>50,]
dt.terms <- dt.terms[order(dt.terms$freq, decreasing = T),]
dt.terms$word <- factor(dt.terms$word, as.character(dt.terms$word))
require(ggplot2)
p <- ggplot(data = dt.terms[1:50,], aes(word, freq))
p <- p + geom_bar(stat="identity")
p <- p + theme(axis.text.x=element_text(angle=45, hjust=1))
p <- p + ggtitle(title)
p
}
######################
#clustering
#http://michael.hahsler.net/SMU/CSE7337/install/tm.R
## do document clustering
plotTreeCluster <- function(df.terms){
df.terms.freqs <- colSums(df.terms)
df.terms <- df.terms[,names(df.terms.freqs[df.terms.freqs>3])]
m <- as.matrix(df.terms)
rownames(m) <- 1:nrow(m)
### don't forget to normalize the vectors so Euclidean makes sense
norm_eucl <- function(m) m/apply(m, MARGIN=1, FUN=function(x) sum(x^2)^.5)
m_norm <- norm_eucl(m)
## hierarchical clustering
library(proxy)
library(cluster)
### this is going to take 4-ever (O(n^2))
distances <- dist(t(m_norm), method="cosine") ##warning: t() is the transpose function, we look for clustering words not documents
hc <- hclust(d=distances, method="ward.D2")
plot(hc, hang=-1)
groups <- cutree(hc, k=5) # "k=" defines the number of clusters you are using
rect.hclust(hc, k=5, border="red") # draw dendogram with red borders around the 5 clusters
#cl <- cutree(hc, 30)
#table(cl)
}
############################### |
b5207d49a11fb02beedbe41ff83a36966ad98726 | cfc7df2e5ccd9373da696034819852d15c22f53c | /Analysis3/createGraphs.R | 2255b216598c909ddcc0e75411aecddfd4862616 | [] | no_license | dororo1225/GazeCommunication1 | 16e77113163f2a29823cd30e82e6c128f8d936be | e00262ccece5957b62d0a8fd1aab7fd2d0683be1 | refs/heads/master | 2020-04-13T01:04:11.145165 | 2019-09-29T08:34:08 | 2019-09-29T08:34:08 | 162,863,129 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,290 | r | createGraphs.R | #### CREATE GRAPHS
library(tidyverse)
library(rstan)
library(loo)
library(ggmcmc)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
# load data
read_csv("Data_processed.csv", col_types = "cdiiiidcdi") %>%
mutate(ID_pair = as.numeric(as.factor(Name))) -> df
read_csv("Data_locomotion.csv", col_types = "cdcdi") %>%
mutate(ID_pair = as.numeric(as.factor(Name))) -> df_locomotion
# prepare data list for stan model
expand.grid(avg_Distance = seq(min(df$avg_Distance), max(df$avg_Distance), length = 40),
Name = unique(df$Name),
AgeinMonths = unique(df$AgeinMonths),
stringsAsFactors = FALSE) %>%
as_tibble() %>%
left_join(df_locomotion, by = c("Name", "AgeinMonths")) %>%
mutate(id = row_number()) -> df_predict
data <- list(N = nrow(df),
N_pair = length(unique(df$Name)),
N_bout = df$N_Bout,
Y = df$N_Infant,
X1 = df$AgeinMonths,
X3 = df$avg_Distance,
ID_pair = df$ID_pair,
N_predict = nrow(df_predict),
X1_predict = df_predict$AgeinMonths,
X3_predict = df_predict$avg_Distance,
ID_pair_predict = df_predict$ID_pair)
# model fitting
## model_selected.stan
form <- c("Age + Dist")
model <- stan_model(file = 'StanModels/model_selected.stan')
fit <- sampling(model,
data = data,
chains = 4,
iter = 6000,
warmup = 1000,
thin = 2,
refresh = 0,
sample_file = "StanResults/model_selected.csv",
seed = 12345)
summary(fit, pars = c("beta0", "beta1", "beta3", "sigma_session", "sigma_pair"))$summary %>%
signif(digits = 3) %>% kable()
fit <- read_stan_csv(csvfiles = sprintf("StanResults/model_selected_%s.csv", 1:4))
# figure S5
ggs(fit) %>%
filter(str_detect(.$Parameter, pattern = "q")) %>%
group_by(Parameter) %>%
summarise(EAP = mean(value),
q_975 = quantile(value, probs = 0.975),
q_025 = quantile(value, probs = 0.025)) %>%
mutate(id = as.numeric(str_extract(.$Parameter, pattern = "[[:digit:]]+"))) %>%
left_join(df_predict, by = "id") -> df_fit
df_fit %>%
ggplot(aes(x = avg_Distance)) +
geom_ribbon(aes(ymax = q_975, ymin = q_025), fill = "grey75") +
geom_line(aes(y = EAP, color = AgeinMonths), lwd = 1.2) +
geom_point(data = df, aes(y = N_Infant / N_Bout, color = AgeinMonths, size = N_Bout), alpha = 0.8) +
facet_grid(Name~AgeinMonths) +
labs(x = "distance (m)", y = "proportion", color = "age in months", size = "Num EC Bout") +
guides(size = FALSE) +
scale_color_viridis_c() +
scale_fill_viridis_c() +
scale_x_continuous(breaks = seq(0, 2, by = 0.5), limits = c(0, 2.2)) +
theme_bw() +
theme(panel.grid = element_blank(),
axis.title = element_text(face = "bold", size = 18),
axis.text = element_text(color = "black"),
strip.text = element_text(face = "bold", size = 18),
legend.title = element_text(face = "bold", size = 18))
ggsave(width = 16.4, height = 9.5, file = "figS5.pdf", dpi = 300)
# Session Information
devtools::session_info() %>% {
print(.$platform)
.$packages %>% dplyr::filter(`*` == "*") %>%
knitr::kable(format = "markdown")
} |
f28ceea0adb08460af9ea53b8b2fd1bdd10537da | c8d5e5e8eae8057746f3d72be582c30c1007f285 | /R/interpolate.R | ccf48ef9e665a53056717829f8856696a82320a0 | [] | no_license | cran/hydroToolkit | 2c3e3a42fe1cc1f745d5760642d02bf87bf6ea54 | 260c3b1b33cf195317f23d7c5635e6348bbb2472 | refs/heads/master | 2022-07-07T09:56:47.244814 | 2020-05-16T09:00:02 | 2020-05-16T09:00:02 | 264,451,856 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,726 | r | interpolate.R | #' Interpolation
#'
#' @description This functions applies interpolation to fill in missing (or non-recorded) values.
#'
#' @param df data frame with two columns: 'Date' or 'POSIXct' class in the first column and a numeric variable in the second one.
#' @param miss_table data frame with three columns: first and last date of interpolation (first and second column respectively). The last and third column, is a numeric with the number of steps to interpolate. See \link{report_miss_data}.
#' @param threshold numeric variable with the maximum number of dates in which to apply the interpolation.
#' @param method string with the interpolation method. In this version only 'linear' method is allowed.
#'
#' @return A data frame with date and the interpolated numeric variable.
#'
#' @importFrom stats approx na.omit
#'
#' @export
#'
#' @examples
#' # Create BDHI hydro-met station
#' guido <- create_hydroMet(class_name = 'BDHI')
#'
#' # List with meteorological variables (slots in BDHI's object)
#' cargar <- list('precip', 'Qmd', 'Qmm')
#'
#' # Now assign as names the files
#' hydro_files <- list.files( system.file('extdata', package = "hydroToolkit"), pattern = 'Guido' )
#' names(cargar) <- hydro_files
#'
#' # Build the object with the met records
#' guido <- build_hydroMet(obj = guido, slot_list = cargar,
#' path = system.file('extdata', package = "hydroToolkit") )
#'
#' # Get mean daily discharge and report miss data
#' Qmd <- get_hydroMet(obj = guido, name = 'Qmd')[[1]]
#' miss <- report_miss_data(df = Qmd)
#'
#' # Now interpolate miss values
#' Qmd_fill <- interpolate(df = Qmd, miss_table = miss, threshold = 5, method = "linear")
#'
# Mirar en curso Metodos Matematicos => U1: interpolacion => cuadratica, Lagrange y Newton (hay script de R)
interpolate <- function(df, miss_table, threshold, method = 'linear'){
#******************************************
# Condicionales
#******************************************
# df: es data frame?
if( is.data.frame(df) == FALSE ){
return('df must be of class data frame')
}
# df: tiene dos columnas
if(ncol(df) != 2){
return('df should have two columns')
}
# df: es Date o POSIXct la primera?
if( class(df[ , 1])[1] != 'Date' & class(df[ , 1])[1] != 'POSIXct'){
return('df[ , 1] must be of class Date or POSIXct')
}
# df: es numerica la segunda
if( is.numeric(df[ , 2]) == FALSE ){
return('df[ , 2] must be of class numeric')
}
#*****
# Remuevo la ultima fila de report_miss_data()
miss_table <- na.omit(miss_table)
# miss_table: es data frame?
if( is.data.frame(miss_table) == FALSE ){
return('miss_table must be of class data frame')
}
# miss_table: col_1 y col_2 Date o POSIXct?
if( class(miss_table[ , 1])[1] != 'Date' & class(miss_table[ , 1])[1] != 'POSIXct'){
return('miss_table[ , 1] must be of class Date or POSIXct')
}
if( class(miss_table[ , 2])[1] != 'Date' & class(miss_table[ , 2])[1] != 'POSIXct'){
return('miss_table[ , 2] must be of class Date or POSIXct')
}
# miss_table: col_3 numeric
if( is.numeric(miss_table[ , 3]) == FALSE ){
return('miss_table[ , 3] must be of class numeric')
}
#*****
# threshold: es unico
if(length(threshold) != 1){
return('threshold should be of length one')
}
# threshold: es numérico?
if( is.numeric(threshold) == FALSE ){
return('threshold should be of class numeric')
}
#*****
# method: solo lineal
if(method != 'linear'){
return('In this version only linear interpolation is allowed')
}
#******************************************
#******************************************
# Comienzo funcion
# que filas uso?
filas <- which(miss_table[ , 3] <= threshold)
if( length(filas) == 0){
return('There are no gaps where to interpolate. Check threshold argument!')
}
n_it <- length(filas)
out <- df[ , 2]
for(i in 1:n_it){
i1 <- which( df[ , 1] == miss_table[filas[i], 1] ) # primera posicion valor a interpolar
i2 <- which( df[ , 1] == miss_table[filas[i], 2] ) # ultima posicion a interpolar
# salvo el caso de que uno de los extremos de la interpolación sea el primer o ultimo valor de la serie
if(i1 != 1 & i2 != nrow(df)){
j1 <- i1 - 1 # primer valor a extraer
j2 <- i2 + 1 # ultimo valor a extraer
var_aux <- df[j1:j2, 2]
aux <- approx(x = j1:j2, y = var_aux, xout = i1:i2)[[2]]
out[i1:i2] <- aux
rm(i, i1, i2, j1, j2, var_aux, aux)
}
}# fin bucle
df_out <- data.frame(df[ , 1], out)
colnames(df_out) <- c('Date', 'out')
return(df_out)
} |
abd0384256b7d8cd6895f79afafa71b324258526 | 661f547a94c05cd9c40a8829689ef8e62d0d470f | /man/GPois.Rd | ba4b0c3d80e21332c418d0da456902e7a82f0f7b | [] | no_license | cran/ProDenICA | a71088a4e200229b3ed9ee5a79a604d0f233c1a1 | 7cd748e3ec158186f313822d59786c69158e2754 | refs/heads/master | 2022-03-09T15:16:31.004307 | 2022-02-21T07:50:23 | 2022-02-21T07:50:23 | 17,681,613 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,570 | rd | GPois.Rd | \name{GPois}
\alias{GPois}
\title{
Fit a tilted Gaussian density via a Poisson GAM}
\description{
This is a contrast method for \code{ProDenICA}. It fits a tilted
Gaussian density estimate by multiplying the Gaussian density by an
exponential tilt function using a cubic smoothing spline
}
\usage{
GPois(x, df = 6, B = 500, order = 1, widen = 1.2, density.return = FALSE, ...)
}
\arguments{
\item{x}{vector of real values}
\item{df}{degrees of freedom for the smoothing-spline fit; default is 6}
\item{B}{number of grid points for density estimate; default is 500}
\item{order}{A robustness parameter to avoid responding to outliers in
\code{x}. The range of \code{x} is estimated by the \code{order}th
and \code{n-order+1}th order statistics. Default is \code{order=1}}
\item{widen}{an expansion factor to widen the range of \code{x};
default is \code{widen=1.2}}
\item{density.return}{logical variable, with default \code{FALSE}. If
\code{density.return=TRUE}, the estimated density is returned}
\item{\dots}{additional arguments to GAM; typically not used}
}
\details{See Section 14.7.4
of 'Elements of Statistical Learning (Hastie, Tibshirani and Friedman,
2009, 2nd Edition)' for details}
\value{
a list with components
\item{Gs}{estimated contrast function, which is the log of the tilting
function, evaluated at the original values of \code{x}. \code{mean(Gs)}
is measure of negentropy}
\item{gs}{estimated first derivative of \code{Gs} at \code{x}}
\item{gps}{estimated second derivative of \code{Gs} at \code{x}}
\item{density}{if \code{density.return=TRUE}, a list with components
\code{$x} the grid of B values of \code{x}, and \code{$y} the estimated
density.}
}
\references{
Hastie, T. and Tibshirani, R. (2003) \emph{Independent Component Analysis
through Product Density Estimation} in \emph{Advances in Neural Information
Processing Systems 15} (Becker, S. and Obermayer, K., eds), MIT Press,
Cambridge, MA. pp 649-656\cr
Hastie, T., Tibshirani, R. and Friedman, J. (2009) Elements of
Statistical Learning (2nd edition), Springer.\cr
\url{https://hastie.su.domains/ElemStatLearn/printings/ESLII_print12_toc.pdf}
}
\author{
Trevor Hastie and Rob Tibshirani
}
\seealso{
\code{ProDenICA}, \code{G1} and \code{G0}
}
\examples{
p=2
### Can use letters a-r below for dist
dist="n"
N=1024
A0<-mixmat(p)
s<-scale(cbind(rjordan(dist,N),rjordan(dist,N)))
x <- s \%*\% A0
fit=ProDenICA(x,Gfunc=GPois, whiten=TRUE, density=TRUE)
par(mfrow=c(2,1))
plot(fit)
}
\keyword{smooth}
\keyword{distribution}
|
efb2254f790fe0f4da828d25a510694d830c2fcc | ac4217536bceb6a9a5c424e79116e064e770e166 | /rankall.R | 39f980760aaab470f9973c031a754690b9846691 | [] | no_license | srikanthvsvasan/DS-programming-assignment3 | 9002462a8a27ae4a1b92e3d940ccbdaae9c350b8 | 33eebef2e145d2344159e50c718de3cc0df43d08 | refs/heads/master | 2021-08-19T14:42:59.445081 | 2017-11-26T17:41:48 | 2017-11-26T17:41:48 | 111,833,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,740 | r | rankall.R | rankall <- function(outcome, num="best") {
debug<-FALSE
# Read the outcome file. Treat "Not Available" as NA
data <- read.csv("outcome-of-care-measures.csv", na.strings = "Not Available",
stringsAsFactors = FALSE)
# Initialize the list mapping column numbers for conditions
outcomes<-c("heart attack" = 11, "heart failure" = 17, "pneumonia" = 23)
# Check if 'outcome' argument is valid
if(! outcome %in% names(outcomes)) {
stop("invalid outcome")
}
# Extract only the necessart columns (Name, State, relevant outcome)
data<-data[,c(2,7, outcomes[outcome])]
# Rename the columns for easier manipulation
names(data)<-c("Name", "State", "Outcome")
# Remove NAs
data<-data[complete.cases(data$Outcome),]
# Order based on the outcome and then Name
data<-data[order(data$Outcome, data$Name),]
# Split state wise
data_state_wise<-split(data, data$State)
print(names(data_state_wise))
# Create the result data frame
df_result<-data.frame(matrix(nrow = 0, ncol=2))
# Assign column names to the data frame
names(df_result)<-c("hospital", "state")
lapply(data_state_wise, extract <- function(x) {
if (identical(num, "best")) {
# Index is the top row as that has the least mortality rate.
r_idx = 1
} else if (identical(num, "worst")) {
# Index is the last row as that has the worst mortality rate.
r_idx = nrow(x)
} else {
# Choose the hostpital name in the row indicated by the rank 'num'
r_idx = num
}
df_result<-rbind(df_result, data.frame(hospital = x[r_idx, 1],
state = x[r_idx, 2]))
print(df_result)
})
}
|
d3620bb6a9c995110c69d16633b9ee98189986bc | dac5515028a17981be8e747c2809aaa2ef44d586 | /Data_cleaning/dataformatting_2006.R | 52a3c34bb6581cba93684743a1bcf897121d7788 | [] | no_license | HallettLab/tulare-labpaper | f74234195ab979182c7d45b545f90f24ce910015 | 6754f3fc72fa99df63aba64e119f26c58d1126c6 | refs/heads/master | 2021-09-16T17:43:15.384671 | 2021-08-04T23:55:19 | 2021-08-04T23:55:19 | 191,231,065 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 926 | r | dataformatting_2006.R | library(tidyverse)
library(readxl)
library(stringr)
## GOPHER, COWPIE, BARE, LITTER
dat <- read_excel(datpath,
sheet = "Plants 2006", skip = 1)
datnames <- read_excel(datpath,
sheet = "Plants 2006")[0,]
names(dat) = names(datnames)
names(dat)[1:6] = c("year", "Page", "OddEven", "quadrat","transect", "site")
dat2006env <- dat %>%
select(quadrat, site, transect, year, GOPHER, BARE, ROCK, LITTER, COWPIE)
dat2006 <- dat %>%
select(-c(GOPHER, BARE, ROCK, LITTER, COWPIE, Page, OddEven)) %>%
gather(species, cover, "X__7":"X__77") %>%
mutate(dummyspp = substr(species, 1,3)) %>%
filter(dummyspp != "X__") %>%
select(-dummyspp) %>%
# mutate(site = substr(quadrat, 1,2)) %>%
filter(site == "TH-UBUG" | site == "TH-BG" | site == "TH-BUG") %>%
mutate(year = 2006)
key2006 <- dat2006 %>%
select(quadrat, site, year, transect) %>%
unique()
rm(dat, datnames)
|
87b441e6e9a0f6486e5fd9ff01f7c113b3436914 | 90b8a58129da0472f3f22f4a403d2775f5b48850 | /Rcode/functions/formatdata_final.R | e1fc353453c4f01c52752c29310af130c0a3b1c8 | [] | no_license | trbal/pim_rank | cdf03f3c91eb0ca673e5180d05223721d7d5590b | 2c1438a1cc1e276724883932f93883bf1f393283 | refs/heads/master | 2020-07-06T04:09:30.656541 | 2019-09-07T13:39:46 | 2019-09-07T13:39:46 | 202,887,214 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,676 | r | formatdata_final.R | #load data per trial
load("/root/Desktop/CNSDAN_Thesis_Template_tex/data/dlp-trials.Rdata")
#load data per stimulus
load("/root/Desktop/CNSDAN_Thesis_Template_tex/data/dataNL.RData")
findnsyl <- function(x){
dataNL[which(dataNL$spelling == data.block$spelling[j]),"nsyl"]
}
for(i in 51:58){
cat(i,"\n")
#extracte data from block 1 and remove NA
data.block <- na.omit(dlp.trials[dlp.trials$block==i,c("spelling","lexicality","rt","participant")])
data.block$participant <- droplevels(data.block$participant)
#add nsyl in data.block
nsyl <- c()
dat.sp <- data.block$spelling
for(j in 1:nrow(data.block)){
cat(i," ",j,"\n")
nsyl[j] <- dataNL[which(dataNL$spelling == dat.sp[j]),"nsyl"]
}
filen <- paste("fullblock",i,".RData", sep="")
save(data.block,nsyl,file = filen)
#remove outliers per participant per nsyl
for(j in 1:length(levels(data.block$participant))){
pt <- which(data.block$participant == levels(data.block$participant)[j])
med <- median(data.block$rt[pt])
mda <- mad(data.block$rt[pt])
rmpt <- intersect(pt, which(abs((data.block$rt - med)/mda) > 3))
if(!identical(rmpt,integer(0))){
data.block <- data.block[-rmpt,]
nsyl <- nsyl[-rmpt]
}
rownames(data.block) <- c()
}
#
data.block <- aggregate(x = data.block$rt,by = list(spelling = data.block$spelling, lexicality = data.block$lexicality, nsyl = nsyl),FUN = mean, simplify = TRUE, drop = TRUE)
names(data.block)[4] <- "rt"
filen <- paste("aggblock",i,".RData",sep="")
data.fin <- data.frame(lexicality = data.block$lexicality, nsyl = data.block$nsyl, rt = data.block$rt)
save(data.fin, file = filen)
}
|
2b534d9361d329541a8c3e48915e4573f56248c3 | d048a3c3f7add7acd3367548b0dcc4dd24a8e60a | /SSC442 Final Code.R | 5ceac7cf026f50c6d64aab6577dabb59fd4f1cd7 | [] | no_license | dlaird6293/RIPJuiceWRLD | 9a515ebb7fc339b740482f326e80bd1413c801d5 | 98de4196b5d53a175a49d86d3e6c4b45f05cd1eb | refs/heads/master | 2020-12-15T01:29:10.370015 | 2020-04-29T16:09:51 | 2020-04-29T16:09:51 | 234,944,637 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,583 | r | SSC442 Final Code.R | ###################################################
# Group 16
#
# SSC442 Final Project
#
# Visuzlization Code for all fiures found below
###################################################
library(ggplot2)
library(readr)
library(dplyr)
library(RColorBrewer)
library(reshape2)
library(gridExtra)
library(scales)
#-------------------------------------Begin Setting Up Data--------------------------------------
df <- read.csv("vgsales.csv")
SQ <- df[df$Publisher=="Square Enix",]
sl <- c("EU", "Japan", "North America", "Other_Sales")
tot <- c(32.82,49.88,48.65,13.89)
regionsperperson <- c("EU", "Japan", "North America")
EU <- 446824564 /1000000
JP <- 126543135/1000000
Na <- 368474072 /1000000
salesperperson <- c(32.82/EU,49.88/JP,48.65/Na)
per_person <- data.frame("Location"=regionsperperson, "Sales"=salesperperson)
Saletots <- data.frame("Location"=sl,"Sales"=tot)
SQ_Sales <- data.frame(SQ[sales])
Sale_totals <- data.frame(colSums(SQ_Sales))#
#EU population as of 2017 446,824,564 https://en.wikipedia.org/wiki/Demographics_of_the_European_Union
#JP Population as of 2020 126,543,135 https://www.worldometers.info/world-population/japan-population/
#NA Population as of 2020 368,474,072 https://www.worldometers.info/world-population/northern-america-population/
genre_sales = df %>% group_by(Genre) %>% summarise(GlobalSales = sum(Global_Sales),
NA_Sales = sum(NA_Sales),
EU_Sales = sum(EU_Sales),
JP_Sales = sum(JP_Sales))
Enix_sales = SQ %>% group_by(Genre) %>% summarise(GlobalSales = sum(Global_Sales),
NA_Sales = sum(NA_Sales),
EU_Sales = sum(EU_Sales),
JP_Sales = sum(JP_Sales))
Enix_sales = melt(Enix_sales)#Use For the showing of popular genres within SQ Enix
names(Enix_sales) = c('Genre','SaleType','Sale')
genre_sales = melt(genre_sales)#Use For the showing of popular genres
names(genre_sales) = c('Genre','SaleType','Sale')
#--------------------------------Finish setting up data------------------------------------------
#FIG 1
ggplot(data=Enix_sales,aes(x = reorder(Genre, -Sale), y = Sale, fill=SaleType)) +
geom_bar(stat='identity', position='dodge') +
theme_bw()+
theme(axis.text.x = element_text(hjust=1,angle=45),
plot.title = element_text(hjust=0.5)) + ## center
ggtitle('Square Enix Sales by Genre') +
scale_fill_brewer(palette = 'RdYlBu')+xlab("Genre")+
ylab('Sales')
#FIG2
ggplot(data=genre_sales,aes(x = reorder(Genre, -Sale),y = Sale,fill=SaleType)) +
geom_bar(stat='identity', position='dodge') +
theme_bw()+
theme(axis.text.x = element_text(hjust=1,angle=45),
plot.title = element_text(hjust=0.5)) + ## center
ggtitle('All Sales by Genre') +
scale_fill_brewer(palette='Set1')+xlab("Genre")+
ylab('Sales')
#FIG 4
sale_plot <- Saletots%>%
ggplot(aes(x=reorder(Location, -Sales),y=Sales))+
geom_bar(stat="identity",fill="#b38184")+
ggtitle("Square Enix Total Sales By Region")+
xlab("Sales By Location")+ylab("Sales (Millions of units)")+
theme(plot.title = element_text(hjust = 0.5))
sale_plot
#FIG5
sales_per_person_graph <- per_person%>%
ggplot(aes(x=reorder(Location, -Sales),y=Sales))+
geom_bar(stat="identity",fill="#5ac18e")+
ggtitle("Unit Sales Per Person")+
xlab("Sales By Location")+ylab("Sales (Units Per Person)")+
theme(plot.title = element_text(hjust = 0.5))
sales_per_person_graph
# REGRESSION MODEL #
num_obs = nrow(df)
train_index = sample(num_obs, size = trunc(0.50 * num_obs))
train_data = df[train_index, ]
test_data = df[-train_index, ]
train_data$Genre.f <- factor(train_data$Genre)
train_data$Platform.f <- factor(train_data$Platform)
fit_0 = lm(Global_Sales ~ 1, data = train_data)
fit_1=lm(Global_Sales ~ (Genre), data = train_data)
fit_2=lm(Global_Sales ~ (Platform), data = train_data)
#RMSE Function
rmse = function(actual, predicted) {
sqrt(mean((actual - predicted) ^ 2))
}
better_rmse = function(model, data, response) {
rmse(actual = subset(data, select = response, drop = TRUE),
predicted = predict(model, data))
}
#train RMSE
print(paste0("Fit 1 Train: ", sqrt(mean((train_data$Global_Sales - predict(fit_1, train_data)) ^ 2))))
print(paste0("Fit 2 Train: ", sqrt(mean((train_data$Global_Sales - predict(fit_2, train_data)) ^ 2))))
# test RMSE
print(paste0("Fit 1 Test: ", sqrt(mean((test_data$Global_Sales - predict(fit_1, test_data)) ^ 2))))
print(paste0("Fit 2 Test: ", sqrt(mean((test_data$Global_Sales - predict(fit_2, test_data)) ^ 2))))
# Our RMSE values are both low and close togther in value,so we can argue our model is
#neither over fitted, or under fitted.
model_list = list(fit_1, fit_2)
# Using sapply() to get train RMSE and test RMSE
train_rmse = sapply(model_list, better_rmse, data = train_data, response = "Global_Sales")
test_rmse = sapply(model_list, better_rmse, data = test_data, response = "Global_Sales")
#Get Complexity
get_complexity = function(model) {
length(coef(model)) - 1
}
model_complex = sapply(model_list, get_complexity)
plot(model_complex, train_rmse, type = "b",
ylim = c(min(c(train_rmse, test_rmse)),
max(c(train_rmse, test_rmse))),
xlab = "Model Complexity Size",
ylab = "RMSE")
lines(model_complex, test_rmse, type = "b", col = "red")
summary(fit_1)
summary(fit_2)
summary(fit_0)
|
7c1d55ee7ef685262874e1ae0b48f7ae0022416a | 23c125fce8877c36c6a30aede40e61f47d807fee | /test.R | 4a7b0afcded29ade9bab7817421a26a5fde00c66 | [] | no_license | Teddysatrio/MarketFit | d80d4ba2f5dfc7b08c79bc93445684c3d5937a7e | b0841c29b96ae6c5b99abdc2176ef3bfbe3bed22 | refs/heads/main | 2023-08-20T23:22:13.311686 | 2021-10-14T07:22:27 | 2021-10-14T07:22:27 | 411,200,511 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 35,115 | r | test.R | library(shiny)
library(shinythemes)
library(tidyverse)
library(cat)
library(plotly)
library(shinydashboard)
ui <- dashboardPage(
dashboardHeader(title = "Best Market Fit Offer"),
dashboardSidebar(disable = TRUE),
dashboardBody(
div(style = 'overflow-x: scroll', sidebarPanel(width = 3,
tags$h3("Input Detail New Package:", style="font-size:30px; font-style:bold; color:black"),
textInput("package_name_detail", label = h3("Package Name :", style="font-size:17px"), ""),
radioButtons("status", label = h2("Status", style="font-size:17px"),
choices = list("Active", "Not Active" ),
),
selectInput("pc", label = h2("Package Category", style="font-size:17px"),
choices = list("Prepaid" , "Postpaid", "Both"),
),
#dateInput("date", label = h3("Launch Date")),
radioButtons("pas", label = h2("Package Activity Status", style="font-size:17px"),
choices = list("Yes" , "No"),
),
numericInput("package_price", label = h2("Package Price", style="font-size:17px"), value = 0),
radioButtons("pt1", label = h2("Package Type 1 (with per day data limit till validity)", style="font-size:17px"),
choices = list("Yes" , "No" ),
),
radioButtons("pt2", label = h2("Package Type 2 (package with unlimited usage data per day)", style="font-size:17px"),
choices = list("Yes" , "No" ),
),
numericInput("package_validity", label = h2("Package Validity", style="font-size:17px"), value = 0),
radioButtons("vas", label = h2("VAS (Value Added Service)", style="font-size:17px"),
choices = list("Yes" , "No" ),
),
radioButtons("tvas", label = h2("Type of VAS", style="font-size:17px"),
choices = list("VAS" , "Yes", "No"),
),
selectInput("pbc", label = h2("Package Business Category", style="font-size:17px"),
choices = list("BB" , "CLM" , "Corporate" , "International" , "My Special Plan", "MySmartVALUE",
"Paket Video Harian", "PUAS Plan", "QOS", "Service Plan", "Simas Plan", "Switch", "VAS", "Voice"),
),
selectInput("category_business", label = h2("Category Business", style="font-size:17px"),
choices = list("Validity + quota", "Roaming" , "VAS" , "Unlimited" , "Voice", "Sms",
"Bonus/promotion"),
),
selectInput("SOP", label = h2("SOP (Scheme of Package)", style="font-size:17px"),
choices = list("1" , "2" , "3" , "4" , "5", "6")
),
submitButton("Check"),
),mainPanel(
strong(h1("Market Fit Testing", style="font-style:bold; color:black ")),
fluidRow(
box(h3("Smartfren New Package"),width = 15,status = "primary" ,div(style = 'overflow-x: scroll', DT::dataTableOutput('result')))
),
fluidRow(
box(h3("Comparison"), br(), status = "primary", width = 15 ,
(strong(uiOutput('datafungsi', style="font-size:17px"))
))),
fluidRow(
box(h3("Smartfren Package"),br(), status = "primary", width = 15,
(strong(uiOutput('datafung_SF', style="font-size:17px")))
)),
fluidRow(
box(h3("Competitor Package"),status = "primary",width = 15 ,div(style = 'overflow-x: scroll', DT::dataTableOutput('table')))
),
#plotOutput("bar",height = 500)
)))
)
server <- function(input, output) {
Data = reactive({
df <- data.frame("Package Name"=input$package_name_detail, "Status"=input$status, "Package Category"=input$pc, #"Launch Date"=as.character(input$date),
"Package Activity Status"=input$pas, "Package Price"=input$package_price,
"Package Type 1"=input$pt1, "Package Type 2"=input$pt2, "Package Validity"=input$package_validity, "VAS"=input$vas, "Type of VAS"=input$tvas, "Package Business Category"=input$pbc,
"Category Business"=input$category_business, "SOP"=input$SOP)
return(list(df=df))
})
output$result <- DT::renderDataTable({
DT::datatable(Data()$df)
})
datamatch_SF <- reactive({
SF <- read.csv(file = 'SF.csv', header = T, sep=";")
return(list(SF = SF))
})
datafungsi_SF <- reactive({
d<-c()
e<-"e"
f<-"f"
for(i in 1: length(datamatch_SF()$SF[,1])){
if(input$package_price == datamatch_SF()$SF[i,]$package_price & input$package_validity == datamatch_SF()$SF[i,]$package_validity & input$vas == datamatch_SF()$SF[i,]$vas & input$SOP == datamatch_SF()$SF[i,]$SOP){
d<-c(d,e)
print(d)
}else{
d<-c(d,f)
print(d)
}
}
if(("e" %in% d)==T){
paste("<font color=\"#FF0000\">","This Package Already Exist","</font>")
}else{
paste("<font color=\"#00b300\">","New Package for Smartfren","</font>")
}
})
output$datafung_SF <- renderText({
print(datafungsi_SF())
})
datasetInput <- reactive({
OC <- read.csv(file = 'OC.csv', header = T, sep=";")
OC_1 <- OC %>% filter(Package.Price <=20000 & Category.Business =="Validity + quota")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_2 <- OC %>% filter(Package.Price <=20000 & Category.Business =="Roaming")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_3 <- OC %>% filter(Package.Price <=20000 & Category.Business =="VAS")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_4 <- OC %>% filter(Package.Price <=20000 & Category.Business =="Unlimited")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_5 <- OC %>% filter(Package.Price <=20000 & Category.Business =="Voice")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_6 <- OC %>% filter(Package.Price <=20000 & Category.Business =="Sms")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_49 <- OC %>% filter(Package.Price <=20000 & Category.Business =="Bonus/promotion")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_7 <- OC %>% filter(Package.Price >20000 & Package.Price <=50000 & Category.Business =="Validity + quota")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_8 <- OC %>% filter(Package.Price >20000 & Package.Price <=50000 & Category.Business =="Roaming")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_9 <- OC %>% filter(Package.Price >20000 & Package.Price <=50000 & Category.Business =="VAS")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_10 <- OC %>% filter(Package.Price >20000 & Package.Price <=50000 & Category.Business =="Unlimited")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_11 <- OC %>% filter(Package.Price >20000 & Package.Price <=50000 & Category.Business =="Voice")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_12 <- OC %>% filter(Package.Price >20000 & Package.Price <=50000 & Category.Business =="Sms")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_50 <- OC %>% filter(Package.Price >20000 & Package.Price <=50000 & Category.Business =="Bonus/promotion")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_13 <- OC %>% filter(Package.Price >50000 & Package.Price <=75000 & Category.Business =="Validity + quota")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_14 <- OC %>% filter(Package.Price >50000 & Package.Price <=75000 & Category.Business =="Roaming")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_15 <- OC %>% filter(Package.Price >50000 & Package.Price <=75000 & Category.Business =="VAS")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_16 <- OC %>% filter(Package.Price >50000 & Package.Price <=75000 & Category.Business =="Unlimited")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_17 <- OC %>% filter(Package.Price >50000 & Package.Price <=75000 & Category.Business =="Voice")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_18 <- OC %>% filter(Package.Price >50000 & Package.Price <=75000 & Category.Business =="Sms")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_51 <- OC %>% filter(Package.Price >50000 & Package.Price <=75000 & Category.Business =="Bonus/promotion")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_19 <- OC %>% filter(Package.Price >75000 & Package.Price <=100000 & Category.Business =="Validity + quota")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_20 <- OC %>% filter(Package.Price >75000 & Package.Price <=100000 & Category.Business =="Roaming")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_21 <- OC %>% filter(Package.Price >75000 & Package.Price <=100000 & Category.Business =="VAS")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_22 <- OC %>% filter(Package.Price >75000 & Package.Price <=100000 & Category.Business =="Unlimited")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_23 <- OC %>% filter(Package.Price >75000 & Package.Price <=100000 & Category.Business =="Voice")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_24 <- OC %>% filter(Package.Price >75000 & Package.Price <=100000 & Category.Business =="Sms")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_52 <- OC %>% filter(Package.Price >75000 & Package.Price <=100000 & Category.Business =="Bonus/promotion")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_25 <- OC %>% filter(Package.Price >100000 & Package.Price <=150000 & Category.Business =="Validity + quota")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_26 <- OC %>% filter(Package.Price >100000 & Package.Price <=150000 & Category.Business =="Roaming")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_27 <- OC %>% filter(Package.Price >100000 & Package.Price <=150000 & Category.Business =="VAS")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_28 <- OC %>% filter(Package.Price >100000 & Package.Price <=150000 & Category.Business =="Unlimited")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_29 <- OC %>% filter(Package.Price >100000 & Package.Price <=150000 & Category.Business =="Voice")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_30 <- OC %>% filter(Package.Price >100000 & Package.Price <=150000 & Category.Business =="Sms")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_53 <- OC %>% filter(Package.Price >100000 & Package.Price <=150000 & Category.Business =="Bonus/promotion")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_31 <- OC %>% filter(Package.Price >150000 & Package.Price <=200000 & Category.Business =="Validity + quota")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_32 <- OC %>% filter(Package.Price >150000 & Package.Price <=200000 & Category.Business =="Roaming")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_33 <- OC %>% filter(Package.Price >150000 & Package.Price <=200000 & Category.Business =="VAS")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_34 <- OC %>% filter(Package.Price >150000 & Package.Price <=200000 & Category.Business =="Unlimited")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_35 <- OC %>% filter(Package.Price >150000 & Package.Price <=200000 & Category.Business =="Voice")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_36 <- OC %>% filter(Package.Price >150000 & Package.Price <=200000 & Category.Business =="Sms")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_54 <- OC %>% filter(Package.Price >150000 & Package.Price <=200000 & Category.Business =="Bonus/promotion")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_37 <- OC %>% filter(Package.Price >200000 & Package.Price <=400000 & Category.Business =="Validity + quota")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_38 <- OC %>% filter(Package.Price >200000 & Package.Price <=400000 & Category.Business =="Roaming")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_39 <- OC %>% filter(Package.Price >200000 & Package.Price <=400000 & Category.Business =="VAS")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_40 <- OC %>% filter(Package.Price >200000 & Package.Price <=400000 & Category.Business =="Unlimited")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_41 <- OC %>% filter(Package.Price >200000 & Package.Price <=400000 & Category.Business =="Voice")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_42 <- OC %>% filter(Package.Price >200000 & Package.Price <=400000 & Category.Business =="Sms")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_55 <- OC %>% filter(Package.Price >200000 & Package.Price <=400000 & Category.Business =="Bonus/promotion")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_43 <- OC %>% filter(Package.Price >400000 & Category.Business =="Validity + quota")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_44 <- OC %>% filter(Package.Price >400000 & Category.Business =="Roaming")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_45 <- OC %>% filter(Package.Price >400000 & Category.Business =="VAS")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_46 <- OC %>% filter(Package.Price >400000 & Category.Business =="Unlimited")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_47 <- OC %>% filter(Package.Price >400000 & Category.Business =="Voice")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_48 <- OC %>% filter(Package.Price >400000 & Category.Business =="Sms")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
OC_56 <- OC %>% filter(Package.Price >400000 & Category.Business =="Bonus/promotion")%>% select(c(Operator, Package.Name.Purchase,Package.customer.type, Package.Price, Package.Type.1, Package.Type.2,Package.Validity, Value.added.Service.VAS.., Category.Business, SOP))
return(list(OC=OC, OC_1=OC_1,
OC_2=OC_2,
OC_3=OC_3,
OC_4=OC_4,
OC_5=OC_5,
OC_6=OC_6,
OC_7=OC_7,
OC_8=OC_8,
OC_9=OC_9,
OC_10=OC_10,
OC_11=OC_11,
OC_12=OC_12,
OC_13=OC_13,
OC_14=OC_14,
OC_15=OC_15,
OC_16=OC_16,
OC_17=OC_17,
OC_18=OC_18,
OC_19=OC_19,
OC_20=OC_20,
OC_21=OC_21,
OC_22=OC_22,
OC_23=OC_23,
OC_24=OC_24,
OC_25=OC_25,
OC_26=OC_26,
OC_27=OC_27,
OC_28=OC_28,
OC_29=OC_29,
OC_30=OC_30,
OC_31=OC_31,
OC_32=OC_32,
OC_33=OC_33,
OC_34=OC_34,
OC_35=OC_35,
OC_36=OC_36,
OC_37=OC_37,
OC_38=OC_38,
OC_39=OC_39,
OC_40=OC_40,
OC_41=OC_41,
OC_42=OC_42,
OC_43=OC_43,
OC_44=OC_44,
OC_45=OC_45,
OC_46=OC_46,
OC_47=OC_47,
OC_48=OC_48,
OC_49=OC_49,
OC_50=OC_50,
OC_51=OC_51,
OC_52=OC_52,
OC_53=OC_53,
OC_54=OC_54,
OC_55=OC_55,
OC_56=OC_56
))
})
datamatch <- reactive({
if(input$package_price == 0){
b = print("no data")
}
else if(input$package_price <=20000 & input$category_business == "Validity + quota"){
b = datasetInput()$OC_1
}else if(input$package_price <=20000 & input$category_business =="Roaming"){
b = datasetInput()$OC_2
}else if(input$package_price <=20000 & input$category_business =="VAS"){
b = datasetInput()$OC_3
}else if(input$package_price <=20000 & input$category_business =="Unlimited"){
b = datasetInput()$OC_4
}else if(input$package_price <=20000 & input$category_business =="Voice"){
b = datasetInput()$OC_5
}else if(input$package_price <=20000 & input$category_business =="Sms"){
b = datasetInput()$OC_6
}else if(input$package_price <=20000 & input$category_business =="Bonus/promotion"){
b = datasetInput()$OC_49
}else if(input$package_price >20000 & input$package_price <=50000 & input$category_business =="Validity + quota"){
b = datasetInput()$OC_7
}else if(input$package_price >20000 & input$package_price <=50000 & input$category_business =="Roaming"){
b = datasetInput()$OC_8
}else if(input$package_price >20000 & input$package_price <=50000 & input$category_business =="VAS"){
b = datasetInput()$OC_9
}else if(input$package_price >20000 & input$package_price <=50000 & input$category_business =="Unlimited"){
b = datasetInput()$OC_10
}else if(input$package_price >20000 & input$package_price <=50000 & input$category_business =="Voice"){
b = datasetInput()$OC_11
}else if(input$package_price >20000 & input$package_price <=50000 & input$category_business =="Sms"){
b = datasetInput()$OC_12
}else if(input$package_price >20000 & input$package_price <=50000 & input$category_business =="Bonus/promotion"){
b = datasetInput()$OC_50
}else if(input$package_price >50000 & input$package_price <=75000 & input$category_business =="Validity + quota"){
b = datasetInput()$OC_13
}else if(input$package_price >50000 & input$package_price <=75000 & input$category_business =="Roaming"){
b = datasetInput()$OC_14
}else if(input$package_price >50000 & input$package_price <=75000 & input$category_business =="VAS"){
b = datasetInput()$OC_15
}else if(input$package_price >50000 & input$package_price <=75000 & input$category_business =="Unlimited"){
b = datasetInput()$OC_16
}else if(input$package_price >50000 & input$package_price <=75000 & input$category_business =="Voice"){
b = datasetInput()$OC_17
}else if(input$package_price >50000 & input$package_price <=75000 & input$category_business =="Sms"){
b = datasetInput()$OC_18
}else if(input$package_price >50000 & input$package_price <=75000 & input$category_business =="Bonus/promotion"){
b = datasetInput()$OC_51
}else if(input$package_price >75000 & input$package_price <=100000 & input$category_business =="Validity + quota"){
b = datasetInput()$OC_19
}else if(input$package_price >75000 & input$package_price <=100000 & input$category_business =="Roaming"){
b = datasetInput()$OC_20
}else if(input$package_price >75000 & input$package_price <=100000 & input$category_business =="VAS"){
b = datasetInput()$OC_21
}else if(input$package_price >75000 & input$package_price <=100000 & input$category_business =="Unlimited"){
b = datasetInput()$OC_22
}else if(input$package_price >75000 & input$package_price <=100000 & input$category_business =="Voice"){
b = datasetInput()$OC_23
}else if(input$package_price >75000 & input$package_price <=100000 & input$category_business =="Sms"){
b = datasetInput()$OC_24
}else if(input$package_price >75000 & input$package_price <=100000 & input$category_business =="Bonus/promotion"){
b = datasetInput()$OC_52
}else if(input$package_price >100000 & input$package_price <=150000 & input$category_business =="Validity + quota"){
b = datasetInput()$OC_25
}else if(input$package_price >100000 & input$package_price <=150000 & input$category_business =="Roaming"){
b = datasetInput()$OC_26
}else if(input$package_price >100000 & input$package_price <=150000 & input$category_business =="VAS"){
b = datasetInput()$OC_27
}else if(input$package_price >100000 & input$package_price <=150000 & input$category_business =="Unlimited"){
b = datasetInput()$OC_28
}else if(input$package_price >100000 & input$package_price <=150000 & input$category_business =="Voice"){
b = datasetInput()$OC_29
}else if(input$package_price >100000 & input$package_price <=150000 & input$category_business =="Sms"){
b = datasetInput()$OC_30
}else if(input$package_price >100000 & input$package_price <=150000 & input$category_business =="Bonus/promotion"){
b = datasetInput()$OC_53
}else if(input$package_price >150000 & input$package_price <=200000 & input$category_business =="Validity + quota"){
b = datasetInput()$OC_31
}else if(input$package_price >150000 & input$package_price <=200000 & input$category_business =="Roaming"){
b = datasetInput()$OC_32
}else if(input$package_price >150000 & input$package_price <=200000 & input$category_business =="VAS"){
b = datasetInput()$OC_33
}else if(input$package_price >150000 & input$package_price <=200000 & input$category_business =="Unlimited"){
b = datasetInput()$OC_34
}else if(input$package_price >150000 & input$package_price <=200000 & input$category_business =="Voice"){
b = datasetInput()$OC_35
}else if(input$package_price >150000 & input$package_price <=200000 & input$category_business =="Sms"){
b = datasetInput()$OC_36
}else if(input$package_price >150000 & input$package_price <=200000 & input$category_business =="Bonus/promotion"){
b = datasetInput()$OC_54
}else if(input$package_price >200000 & input$package_price <=400000 & input$category_business =="Validity + quota"){
b = datasetInput()$OC_37
}else if(input$package_price >200000 & input$package_price <=400000 & input$category_business =="Roaming"){
b = datasetInput()$OC_38
}else if(input$package_price >200000 & input$package_price <=400000 & input$category_business =="VAS"){
b = datasetInput()$OC_39
}else if(input$package_price >200000 & input$package_price <=400000 & input$category_business =="Unlimited"){
b = datasetInput()$OC_40
}else if(input$package_price >200000 & input$package_price <=400000 & input$category_business =="Voice"){
b = datasetInput()$OC_41
}else if(input$package_price >200000 & input$package_price <=400000 & input$category_business =="Sms"){
b = datasetInput()$OC_42
}else if(input$package_price >200000 & input$package_price <=400000 & input$category_business =="Bonus/promotion"){
b = datasetInput()$OC_55
}else if(input$package_price >400000 & input$category_business =="Validity + quota"){
b = datasetInput()$OC_43
}else if(input$package_price >400000 & input$category_business =="Roaming"){
b = datasetInput()$OC_44
}else if(input$package_price >400000 & input$category_business =="VAS"){
b = datasetInput()$OC_45
}else if(input$package_price >400000 & input$category_business =="Unlimited"){
b = datasetInput()$OC_46
}else if(input$package_price >400000 & input$category_business =="Voice"){
b = datasetInput()$OC_47
}else if(input$package_price >400000 & input$category_business =="Sms"){
b = datasetInput()$OC_48
}else if(input$package_price >400000 & input$category_business =="Bonus/promotion"){
b = datasetInput()$OC_56
}
return(list(b=b))
})
output$table <- DT::renderDataTable({
DT::datatable(datamatch()$b)
})
# output$bar <- renderPlot({
# color <- c("blue", "red","yellow", "green", "pink")
# our_data <- datamatch()$b
# barplot(height=count(datamatch()$b,datamatch()$b$Operator), names=datamatch()$b$Operator,
# col=color,
# xlab="Operator",
# ylab="Values",
# main="My title",
# ylim=c(0,20)
# )
#})
datafungsi <- reactive({
if(#dim(b)[1]==0 #using dimension
length(datamatch()$b[,1])==0){#using length
print("There are No Similiar Competitor Package")
}else{
total_poin = 0;
for(i in 1: length(datamatch()$b[,1])){
temp_poin = 0
#print(b[i,]$Package.Price)
#print(length(b$Package.Price))
if(input$package_price <= datamatch()$b[i,]$Package.Price){
temp_poin = temp_poin+1
}
else{
temp_poin = temp_poin+0
}
if(input$package_validity >= datamatch()$b[i,]$Package.Validity){
temp_poin = temp_poin+1
}
if(input$vas=="Yes"){
temp_poin = temp_poin +1
}else if(input$vas=="No" & datamatch()$b[i,]$Value.added.Service.VAS..=="No"){
temp_poin = temp_poin +1
}
if(input$SOP == datamatch()$b[i,]$SOP){
temp_poin = temp_poin +1
}else if(input$SOP == 6){
temp_poin = temp_poin +1
}else if(input$SOP == 3 & datamatch()$b[i,]$SOP ==2){
temp_poin = temp_poin +1
}else if(input$SOP == 2 & datamatch()$b[i,]$SOP ==1){
temp_poin = temp_poin +1
}else if(input$SOP == 3 & datamatch()$b[i,]$SOP ==1){
temp_poin = temp_poin +1
}else if(input$SOP != datamatch()$b[i,]$SOP & datamatch()$b[i,]$SOP !=6){
temp_poin = temp_poin +1
}
if(temp_poin > 2){
total_poin = total_poin +1
}
}
tp <- paste0(input$package_name_detail," total point is ", total_poin, " out of ",length(datamatch()$b[,1]))
prcnt <- (total_poin / length(datamatch()$b[,1]))*100
if(total_poin / length(datamatch()$b[,1])>= 0.5){
HTML(paste0(tp, br(), br(),"Fit to the market ",sprintf(prcnt, fmt = '%#.2f'),"%",br(), br(), "Estimated number of customer : Not Available"))
#print(total_poin / length(datamatch()$b[,1])," %")
}else if(total_poin/length(datamatch()$b[,1])<0.5){
HTML(paste0(tp, br(), br(),"Not Fit to the market ",sprintf(prcnt, fmt = '%#.2f'),"%",br(), br(), "Estimated number of customer : Not Available"))
#print(total_poin / length(datamatch()$b[,1])," %")
}
}
})
output$datafungsi <- renderText({
print(datafungsi())
})
}
#maybe kasih bar chart tentang count package per competitor
shinyApp(ui, server) |
f47d1fa92a215575a381f6aa0aa375f281b9da12 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/TSPred/examples/ipeadata_m.Rd.R | bd0719a66927d6016a2fcd63ddae5b8cbb3515d0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 254 | r | ipeadata_m.Rd.R | library(TSPred)
### Name: ipeadata_m
### Title: The Ipea Most Requested Dataset (monthly)
### Aliases: ipeadata_m ipeadata_m.cont
### Keywords: datasets ipeadata time teries
### ** Examples
data(ipeadata_m)
str(ipeadata_m)
plot(ts(ipeadata_m[1]))
|
515b0a282c5dc3c9409de3943c434cc7812cb2ba | 739d5a00e7321b875de4196b56c716fb0c38aa84 | /R/TextReuseCorpus.R | 9a45fd2cce6e65cb6662a753cef1b640144d5951 | [
"MIT"
] | permissive | Sandy4321/textreuse | 244d6aa1ddec34c486e54e25bff72ea92f93cd7b | d0aa6a50ced8d05ac1894a5fba01f9b9210160b2 | refs/heads/master | 2021-01-15T11:19:52.421585 | 2015-10-28T15:46:26 | 2015-10-28T15:46:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,087 | r | TextReuseCorpus.R | #' TextReuseCorpus
#'
#' This is the constructor function for a \code{TextReuseCorpus}, modeled on the
#' virtual S3 class \code{\link[tm]{Corpus}} from the \code{tm} package. The
#' object is a \code{TextReuseCorpus}, which is basically a list containing
#' objects of class \code{\link{TextReuseTextDocument}}. Arguments are passed
#' along to that constructor function. To create the corpus, you can pass either
#' a character vector of paths to text files using the \code{paths =} parameter,
#' a directory containing text files (with any extension) using the \code{dir =}
#' parameter, or a character vector of documents using the \code{text = }
#' parameter, where each element in the characer vector is a document. If the
#' character vector passed to \code{text = } has names, then those names will be
#' used as the document IDs. Otherwise, IDs will be assigned to the documents.
#' Only one of the \code{paths}, \code{dir}, or \code{text} parameters should be
#' specified.
#'
#' @details If \code{skip_short = TRUE}, this function will skip very short or
#' empty documents. A very short document is one where there are two few words
#' to create at least two n-grams. For example, if five-grams are desired,
#' then a document must be at least six words long. If no value of \code{n} is
#' provided, then the function assumes a value of \code{n = 3}. A warning will
#' be printed with the document ID of each skipped document.
#'
#' @param paths A character vector of paths to files to be opened.
#' @param dir The path to a directory of text files.
#' @param text A character vector (possibly named) of documents.
#' @param meta A list with named elements for the metadata associated with this
#' corpus.
#' @param progress Display a progress bar while loading files.
#' @param tokenizer A function to split the text into tokens. See
#' \code{\link{tokenizers}}. If value is \code{NULL}, then tokenizing and
#' hashing will be skipped.
#' @param ... Arguments passed on to the \code{tokenizer}.
#' @param hash_func A function to hash the tokens. See
#' \code{\link{hash_string}}.
#' @param keep_tokens Should the tokens be saved in the documents that are
#' returned or discarded?
#' @param keep_text Should the text be saved in the documents that are returned
#' or discarded?
#' @param skip_short Should short documents be skipped? (See details.)
#'
#' @examples
#' dir <- system.file("extdata/legal", package = "textreuse")
#' corpus <- TextReuseCorpus(dir = dir, meta = list("description" = "Field Codes"))
#' # Subset by position or file name
#' corpus[[1]]
#' names(corpus)
#' corpus[["ca1851-match"]]
#'
#' @export
TextReuseCorpus <- function(paths, dir = NULL, text = NULL, meta = list(),
progress = interactive(),
tokenizer = tokenize_ngrams, ...,
hash_func = hash_string,
keep_tokens = FALSE,
keep_text = TRUE,
skip_short = TRUE) {
if (!is.null(tokenizer)) {
assert_that(is.function(tokenizer),
is.function(hash_func))
tokenizer_name <- as.character(substitute(tokenizer))
hash_func_name <- as.character(substitute(hash_func))
loading_msg <- "Loading, tokenizing, and hashing "
} else {
tokenizer_name <- NULL
hash_func_name <- NULL
loading_msg <- "Loading "
}
# If we get a character vector of documents, use that; otherwise load
# the files from disk.
if (!missing(text)) {
assert_that(missing(paths),
is.null(dir),
is.character(text))
if (progress) {
len <- length(text)
message(loading_msg, prettyNum(len, big.mark = ","), " documents.")
pb <- txtProgressBar(min = 0, max = len, style = 3)
}
if (is.null(names(text)))
names(text) <- str_c("doc-", 1:length(text))
docs <- lapply(seq_along(text), function(i) {
d <- TextReuseTextDocument(text = text[i],
tokenizer = tokenizer, ...,
hash_func = hash_func,
keep_tokens = keep_tokens,
keep_text = keep_text,
skip_short = skip_short,
meta = list(id = names(text)[i],
tokenizer = tokenizer_name,
hash_func = hash_func_name))
if (progress) setTxtProgressBar(pb, i)
d
})
if (progress) close(pb)
names(docs) <- names(text)
} else {
if (missing(paths) & !is.null(dir)) {
assert_that(is.dir(dir))
paths <- Sys.glob(str_c(dir, "/*"))
}
vapply(paths, is.readable, logical(1), USE.NAMES = FALSE)
if (progress) {
len <- length(paths)
message(loading_msg, prettyNum(len, big.mark = ","), " documents.")
pb <- txtProgressBar(min = 0, max = len, style = 3)
}
docs <- lapply(seq_along(paths), function(i) {
d <- TextReuseTextDocument(file = paths[i], tokenizer = tokenizer, ...,
hash_func = hash_func,
keep_tokens = keep_tokens,
keep_text = keep_text,
skip_short = skip_short,
meta = list(tokenizer = tokenizer_name,
hash_func = hash_func_name))
if (progress) setTxtProgressBar(pb, i)
d
})
if (progress) close(pb)
names(docs) <- filenames(paths)
}
# Filter documents that were skipped because they were too short
if (skip_short) docs <- Filter(Negate(is.null), docs)
assert_that(is.list(meta))
meta$tokenizer <- tokenizer_name
meta$hash_func <- hash_func_name
if (!is.null(names(meta))) meta <- sort_meta(meta)
corpus <- list(documents = docs, meta = meta)
class(corpus) <- c("TextReuseCorpus", "Corpus")
corpus
}
#' @export
meta.TextReuseCorpus <- function(x, tag = NULL, ...) {
if (is.null(tag))
x$meta
else
x$meta[[tag]]
}
#' @export
`meta<-.TextReuseCorpus` <- function(x, tag = NULL, ..., value) {
if (is.null(tag)) {
assert_that(is.list(value))
x$meta <- value
} else {
x$meta[[tag]] <- value
}
x
}
#' @export
print.TextReuseCorpus <- function(x, ...) {
cat("TextReuseCorpus\n")
cat("Number of documents:", length(x), "\n")
pretty_print_metadata(x)
}
#' @export
length.TextReuseCorpus <- function(x) {
length(x$documents)
}
#' @export
`[.TextReuseCorpus` <- function(x, i) {
x$documents <- x$documents[i]
x
}
#' @export
`[[.TextReuseCorpus` <- function(x, i) {
x$documents[[i]]
}
#' @export
names.TextReuseCorpus <- function(x) {
names(x$documents)
}
#' @export
`names<-.TextReuseCorpus` <- function(x, value) {
names(x$documents) <- value
x
}
#' @param x An R object to check.
#' @export
#' @rdname TextReuseCorpus
is.TextReuseCorpus <- function(x) {
inherits(x, "TextReuseCorpus")
}
|
e8191a60c3862798ae9a5fe96d27c1905bf7d1e8 | b13af52836ab966bdb7c41553a4558f6ecdb3497 | /02_scripts/02_clean_names.R | 5e9d08cacfa5f5725024192aebe94ed23b54a992 | [] | no_license | mylinhluong/fictional-happiness | efe23601cd017106c2be1cf1c924d9b787f542b4 | 40033264d24043db3cd18737fd227b8c9a19dd71 | refs/heads/master | 2023-01-19T14:06:16.018919 | 2020-11-22T01:02:34 | 2020-11-22T01:02:34 | 233,969,999 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,075 | r | 02_clean_names.R | #this script selects specific variables for self-report (baseline & follow-up) data and IATs, and renames variables
library(dplyr)
#Create a .txt file within the errors folder
clean_names_02 <- file(here("02_scripts","Errors", "02_clean_names.txt"), open = "wt")
sink(clean_names_02, type = "message")
#####selecting columns for self-report data at baseline and follow-up#####
##baseline
self_report_baseline<-self_report%>%
select(group,breq3_id1:breq3_ext4,selfefficacy_1:selfefficacy_6, attitudes_i1:attitudes_a2, srbai1:srbai4,
intention1a:intention_strength, pain_side, pain_both,pain_nrsl_v2.x,
ipaq_sl_job.x:ipaq_sl_semin.x, age_year:sociodems_post)
#View(self_report_baseline)
##follow-up
self_report_followup<-self_report%>%
select(group, pain_nrsl_v2.y:ipaq_sl_semin.y)
#View(self_report_followup)
#####re-naming variables for IPAQ#####
##baseline
self_report_baseline_IPAQ<-select(self_report_baseline,group=group,
PA_JOB_UNPAID_WRK=ipaq_sl_job.x,
PA_WRK_VIG_FREQ= ipaq_sl_ovday.x, PA_WRK_VIG_TIME_HR=ipaq_sl_ovdhrs.x, PA_WRK_VIG_TIME_MIN=ipaq_sl_ovdmin.x,
PA_WRK_MOD_FREQ=ipaq_sl_omday.x, PA_WRK_MOD_TIME_HR=ipaq_sl_omdhrs.x, PA_WRK_MOD_TIME_MIN=ipaq_sl_omdmin.x,
PA_WRK_WALK_FREQ=ipaq_sl_owday.x, PA_WRK_WALK_TIME_HR=ipaq_sl_owdhrs.x, PA_WRK_WALK_TIME_MIN=ipaq_sl_owdmin.x,
PA_TRANS_FREQ=ipaq_sl_tmday.x, PA_TRANS_TIME_HOUR=ipaq_sl_tmdhrs.x, PA_TRANS_TIME_MIN=ipaq_sl_tmdmin.x,
PA_CYCLING_FREQ=ipaq_sl_tbday.x, PA_CYCLING_TIME_HR=ipaq_sl_tbwhrs.x, PA_CYCLING_TIME_MIN=ipaq_sl_tbwmin.x,
PA_TRANS_WALK_FREQ=ipaq_sl_twday.x, PA_TRANS_WALK_TIME_HR=ipaq_sl_twdhrs.x, PA_TRANS_WALK_TIME_MIN=ipaq_sl_twdmin.x,
PA_GARDEN_VIG_FREQ=ipaq_sl_gvday.x, PA_GARDEN_VIG_TIME_HR=ipaq_sl_gvdhrs.x, PA_GARDEN_VIG_TIME_MIN=ipaq_sl_gvmin.x,
PA_GARDEN_MOD_FREQ=ipaq_sl_gmday.x, PA_GARDEN_MOD_TIME_HR=ipaq_sl_gmdhrs.x, PA_GARDEN_MOD_TIME_MIN=ipaq_sl_gmdmin.x,
PA_INSIDE_MOD_FREQ=ipaq_sl_hmday.x, PA_INSIDE_MOD_HR=ipaq_sl_hmdhrs.x, PA_INSIDE_MOD_MIN=ipaq_sl_hmdmin.x,
PA_LEISURE_WALK_FREQ=ipaq_sl_lwday.x, PA_LEISURE_WALK_TIME_HR=ipaq_sl_lwdhrs.x, PA_LEISURE_WALK_TIME_MIN=ipaq_sl_lwdmin.x,
PA_LEISURE_VIG_FREQ=ipaq_sl_lvday.x, PA_LEISURE_VIG_TIME_HR=ipaq_sl_lvdhrs.x, PA_LEISURE_VIG_TIME_MIN=ipaq_sl_lvdmin.x,
PA_LEISURE_MOD_FREQ=ipaq_sl_lmday.x, PA_LEISURE_MOD_TIME_HR=ipaq_sl_lmdhrs.x, PA_LEISURE_MOD_TIME_MIN=ipaq_sl_lmdmin.x)
##follow-up
self_report_followup_IPAQ<-select(self_report_followup,group=group,
PA_JOB_UNPAID_WRK=ipaq_sl_job.y,
PA_WRK_VIG_FREQ= ipaq_sl_ovday.y, PA_WRK_VIG_TIME_HR=ipaq_sl_ovdhrs.y, PA_WRK_VIG_TIME_MIN=ipaq_sl_ovdmin.y,
PA_WRK_MOD_FREQ=ipaq_sl_omday.y, PA_WRK_MOD_TIME_HR=ipaq_sl_omdhrs.y, PA_WRK_MOD_TIME_MIN=ipaq_sl_omdmin.y,
PA_WRK_WALK_FREQ=ipaq_sl_owday.y, PA_WRK_WALK_TIME_HR=ipaq_sl_owdhrs.y, PA_WRK_WALK_TIME_MIN=ipaq_sl_owdmin.y,
PA_TRANS_FREQ=ipaq_sl_tmday.y, PA_TRANS_TIME_HOUR=ipaq_sl_tmdhrs.y, PA_TRANS_TIME_MIN=ipaq_sl_tmdmin.y,
PA_CYCLING_FREQ=ipaq_sl_tbday.y, PA_CYCLING_TIME_HR=ipaq_sl_tbwhrs.y, PA_CYCLING_TIME_MIN=ipaq_sl_tbwmin.y,
PA_TRANS_WALK_FREQ=ipaq_sl_twday.y, PA_TRANS_WALK_TIME_HR=ipaq_sl_twdhrs.y, PA_TRANS_WALK_TIME_MIN=ipaq_sl_twdmin.y,
PA_GARDEN_VIG_FREQ=ipaq_sl_gvday.y, PA_GARDEN_VIG_TIME_HR=ipaq_sl_gvdhrs.y, PA_GARDEN_VIG_TIME_MIN=ipaq_sl_gvmin.y,
PA_GARDEN_MOD_FREQ=ipaq_sl_gmday.y, PA_GARDEN_MOD_TIME_HR=ipaq_sl_gmdhrs.y, PA_GARDEN_MOD_TIME_MIN=ipaq_sl_gmdmin.y,
PA_INSIDE_MOD_FREQ=ipaq_sl_hmday.y, PA_INSIDE_MOD_HR=ipaq_sl_hmdhrs.y, PA_INSIDE_MOD_MIN=ipaq_sl_hmdmin.y,
PA_LEISURE_WALK_FREQ=ipaq_sl_lwday.y, PA_LEISURE_WALK_TIME_HR=ipaq_sl_lwdhrs.y, PA_LEISURE_WALK_TIME_MIN=ipaq_sl_lwdmin.y,
PA_LEISURE_VIG_FREQ=ipaq_sl_lvday.y, PA_LEISURE_VIG_TIME_HR=ipaq_sl_lvdhrs.y, PA_LEISURE_VIG_TIME_MIN=ipaq_sl_lvdmin.y,
PA_LEISURE_MOD_FREQ=ipaq_sl_lmday.y, PA_LEISURE_MOD_TIME_HR=ipaq_sl_lmdhrs.y, PA_LEISURE_MOD_TIME_MIN=ipaq_sl_lmdmin.y)
#####selecting columns for IATs#####
IAT<-IAT%>%
select(group:expressions.percentcorrect)%>%
rename(subject=group)
#end of script
#close the error message catching script and save the file
sink(type = "message")
close(clean_names_02)
#Open the .txt file for inspection
readLines(here("02_scripts","Errors", "02_clean_names.txt"))
|
0bacc31597e58fc6cdf2e74e6c4cc8932eb48855 | 131fe3dfba973e0cd9664bfb581b06dbc6d1f300 | /man/israeli_survey.Rd | 04ace04e8e7ff6c384268c3c22b7e0a784557fd2 | [
"MIT"
] | permissive | aczepielik/CrossTabCluster | d61592ec6a78584ee087abd86181f0fa0721d056 | bdb4719502e09641cb38fb163207cebc44b7733a | refs/heads/master | 2022-11-11T00:18:08.822129 | 2020-06-13T14:02:34 | 2020-06-13T14:02:34 | 268,132,246 | 0 | 0 | NOASSERTION | 2020-06-13T13:10:13 | 2020-05-30T17:54:04 | R | UTF-8 | R | false | true | 380 | rd | israeli_survey.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{israeli_survey}
\alias{israeli_survey}
\title{Principal worries of 1554 Israeli survey participants}
\format{The 8x5 contingency table}
\source{
Gutttman
}
\usage{
israeli_survey
}
\description{
Principal worries of 1554 Israeli survey participants
}
\keyword{datasets}
|
eac629fd96913f8a2ff5c5e482fe40a384b39354 | bcce8dcb8fb272b7a5bbfa226dd877b53212413b | /predict-admission/explore/explore-flowsheet-IDs.R | 66166a29dcaf9ea70ea17a1a41fd2f81349a37a6 | [
"BSD-3-Clause"
] | permissive | joefarrington/EDcrowding | b04eb9e3c328f7ab2d251e70eb5a6427a4a5566b | 59e664da4ff3db9a21bde89d1e909ea3d829f8ed | refs/heads/master | 2023-05-05T13:59:37.230430 | 2021-05-05T18:20:14 | 2021-05-05T18:20:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 868 | r | explore-flowsheet-IDs.R |
# Load libraries
# ==============
library(DBI)
library(dplyr)
library(tidyverse)
library(lubridate)
library(data.table)
# Set up connection
ctn <- DBI::dbConnect(RPostgres::Postgres(),
host = Sys.getenv("UDS_HOST"),
port = 5432,
user = Sys.getenv("UDS_USER"),
password = Sys.getenv("UDS_PWD"),
dbname = "uds")
# Get data ---------------------------------------------------------
# all patients in ED now
sqlQuery <- "select distinct flowsheet_value_key, flowsheet_row_name
from covid_staging.flowsheet"
sqlQuery %>% gsub('\n','',sqlQuery)
covid_staging_flowsheet_IDs <- data.table(dbGetQuery(ctn, sqlQuery))
# load("~/Devart/covid_staging_flowsheet_IDs.rda")
save(covid_staging_flowsheet_IDs, file = "~/Devart/covid_staging_flowsheet_IDs.rda")
|
14d726627ec56f2747c9d4a216ff946cd440bae5 | 4d252ade9889ca4860102a3a2e76cdee1acf10f9 | /man/Alroy_3timers.Rd | f300bff27496bc75dcd11e92b65817636969e090 | [] | no_license | josteist/Compadre | 78a42e1a7c22b3ff094014cf7d93b959020f985f | 9b59dd2fa7e8f5c307f76dbd2186fc6e1b8cf106 | refs/heads/master | 2021-06-10T23:37:11.999007 | 2021-05-29T06:23:21 | 2021-05-29T06:23:21 | 171,682,480 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 501 | rd | Alroy_3timers.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Alroy_3timers.R
\name{Alroy_3timers}
\alias{Alroy_3timers}
\title{Function to calculate Alroy's three-timer rates of macroevolutionary change}
\usage{
Alroy_3timers(Obs, dts = rep(1, dim(Obs)[2]))
}
\arguments{
\item{Obs}{matrix of observations (taxa by time)}
\item{dts}{vector of interval duration (defaults to 1)}
}
\value{
}
\description{
Function to calculate Alroy's three-timer rates of macroevolutionary change
}
|
9b8c5ea108f8c1ad032002a955db251dea6257e5 | 7224813a0f5d032aed634a637328e87030dd070d | /man/prefabricated_datasets.Rd | eeb25c5d1a8b7d545f7703fe00f3cd304b78eb7f | [
"MIT"
] | permissive | ethanwhite/portalcasting | 3d164472a54e5ae746a668cc848e813e4b795eaf | 03bd42b5a01536a9178e9aeb47046847acda9984 | refs/heads/master | 2023-04-30T20:59:54.178447 | 2022-06-01T00:13:25 | 2022-06-01T00:13:25 | 157,772,725 | 0 | 0 | MIT | 2018-11-15T21:06:31 | 2018-11-15T21:06:31 | null | UTF-8 | R | false | true | 713 | rd | prefabricated_datasets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prefab_rodents_datasets.R
\name{prefabricated_datasets}
\alias{prefabricated_datasets}
\alias{prefab_datasets}
\alias{prefab_dataset_controls}
\title{Provide the Names or Controls for the Prefab Rodent Datasets}
\usage{
prefab_datasets()
prefab_dataset_controls()
}
\value{
\code{prefab_datasets}: \code{character} vector of dataset names. \cr
\code{prefab_dataset_controls}: \code{list} vector of dataset controls. \cr
}
\description{
Create a \code{character} vector of the names of the pre-fabricated (prefab) rodent datasets or a \code{list} of their controls
}
\examples{
prefab_datasets()
prefab_dataset_controls()
}
|
7e9bfa22efb1f099b1fab19cce8f9fcace34049c | 85bb84e6717c8af7097712226831f3f26ca54eb0 | /03/SRCR/TP3/exaustao.R | eb29c8a30e6f4b24d793667cd17bd01338984af9 | [
"MIT"
] | permissive | rgllm/uminho | 8aec0b5b57acb8839b00547a63ecd2e00f323a28 | 1c535a72fcc56a263f301af729ec3039dfbb1e34 | refs/heads/master | 2021-01-17T18:02:30.723706 | 2020-08-16T20:52:21 | 2020-08-16T20:52:21 | 64,925,506 | 2 | 3 | null | null | null | null | ISO-8859-1 | R | false | false | 16,228 | r | exaustao.R | # Import das bibliotecas necessárias
library("neuralnet")
library("hydroGOF")
library("arules")
# Leitura do ficheiro de dados
dataset <- read.csv("C:\\Users\\perei\\Desktop\\exaustao-norm-rand.csv", header=TRUE, sep=";", dec=".")
# Discretização dos valores dos atributos -- Para o relatório
#dataset$Performance.KDTMean <- discretize(dataset$Performance.KDTMean,method="frequency", categories=10)
#dataset$Performance.MAMean <- discretize(dataset$Performance.MAMean,method="frequency", categories=10)
#dataset$Performance.MVMean <- discretize(dataset$Performance.MVMean,method="frequency", categories=10)
#dataset$Performance.TBCMean <- discretize(dataset$Performance.TBCMean,method="frequency", categories=10)
#dataset$Performance.DDCMean <- discretize(dataset$Performance.DDCMean,method="frequency", categories=10)
#dataset$Performance.ADMSLMean <- discretize(dataset$Performance.ADMSLMean,method="frequency", categories=10)
#dataset$Performance.DMSMean <- discretize(dataset$Performance.DMSMean,method="frequency", categories=10)
#dataset$Performance.AEDMean <- discretize(dataset$Performance.AEDMean,method="frequency", categories=10)
#write.csv(dataset, "exaustao-discretizada.csv")
# Divisão dos dados para aprendizagem em vários conjuntos com dimensões distintas
dados1 <- dataset[1:600,] # 633 registos
dados2 <- dataset[1:422,] # 422 registos
dados3 <- dataset[1:211,] # 211 registos
dados4 <- dataset[1:140,] # 140 registos
# Divisão dos dados para aprendizagem em vários conjuntos com dimensões distintas
treino1 <- dataset[601:844,] # 211 registos
treino2 <- dataset[423:844,] # 422 registos
treino3 <- dataset[212:844,] # 634 registos
treino4 <- dataset[141:844,] # 704 registos
sub41 <-subset(treino1, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","FatigueLevel"))
sub42 <-subset(treino2, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","FatigueLevel"))
sub51 <-subset(treino1, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","Performance.KDTMean","FatigueLevel"))
sub52 <-subset(treino2, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","Performance.KDTMean","FatigueLevel"))
sub61 <-subset(treino1, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","Performance.DMSMean","Performance.ADMSLMean","FatigueLevel"))
sub62 <-subset(treino2, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","Performance.DMSMean","Performance.ADMSLMean","FatigueLevel"))
sub71 <-subset(treino1, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","Performance.DMSMean","Performance.ADMSLMean","Performance.KDTMean","FatigueLevel"))
sub72 <-subset(treino2, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","Performance.DMSMean","Performance.ADMSLMean","Performance.KDTMean","FatigueLevel"))
sub81 <-subset(treino1, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","Performance.DMSMean","Performance.ADMSLMean","Performance.KDTMean","Performance.TBCMean","FatigueLevel"))
sub82 <-subset(treino2, select=c("Performance.Task","Performance.DDCMean","Performance.MAMean","Performance.MVMean","Performance.DMSMean","Performance.ADMSLMean","Performance.KDTMean","Performance.TBCMean","FatigueLevel"))
# Diferentes camadas intermédias
hidden1 <- c(60, 40, 20)
hidden2 <- c(120, 80, 40)
hidden3 <- c(6, 4, 2)
hidden4 <- c(10,5)
hidden5 <- c(20)
# Diferentes tolerâncias (thresholds)
threshold1 <- 0.01
threshold2 <- 0.1
# Diferentes fórmulas
formula1 <- FatigueLevel ~ Performance.KDTMean+Performance.MAMean+Performance.MVMean+Performance.TBCMean+Performance.DDCMean+Performance.DMSMean+Performance.AEDMean+Performance.ADMSLMean+Performance.Task
formula4NNR <- FatigueLevel ~ Performance.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean
formula5NNR <- FatigueLevel ~ Performance.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean+Performance.KDTMean
formula6NNR <- FatigueLevel ~ Performance.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean+Performance.DMSMean+Performance.ADMSLMean
formula7NNR <- FatigueLevel ~ Performance.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean+Performance.DMSMean+Performance.ADMSLMean+Performance.KDTMean
formula8NNR <- FatigueLevel ~ Performance.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean+Performance.DMSMean+Performance.ADMSLMean+Performance.KDTMean+Performance.TBCMean
formulaTask <- Performance.Task ~ Performance.KDTMean+Performance.MAMean+Performance.MVMean+Performance.TBCMean+Performance.DDCMean+Performance.DMSMean+Performance.AEDMean+Performance.ADMSLMean+FatigueLevel
formula2NR <- FatigueLevel ~ Performance.Task+Performance.MAMean+Performance.KDTMean+Performance.DMSMean
formula3NR <- FatigueLevel ~ Performance.Task+Performance.MAMean+Performance.KDTMean+Performance.DMSMean+Performance.DDCMean
formula4NR <- FatigueLevel ~ Performance.Task+Performance.MAMean+Performance.KDTMean+Performance.DMSMean+Performance.DDCMean+Performance.AEDMean
formula5NR <- FatigueLevel ~ Performance.Task+Performance.MAMean+Performance.KDTMean+Performance.DMSMean+Performance.DDCMean+Performance.AEDMean+Performance.TBCMean
formula6NR <- FatigueLevel ~ Performance.Task+Performance.MAMean+Performance.KDTMean+Performance.DMSMean+Performance.DDCMean+Performance.AEDMean+Performance.TBCMean+Performance.ADMSLMean
formula7NR <- FatigueLevel ~ Performance.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean
formula8NR <- FatigueLevel ~ Performance.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean+Performance.KDTMean
formula9NR <- FatigueLevel ~ Performance.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean+Performance.DMSMean+Performance.ADMSLMean
formula10NR <- FatigueLevel ~ Performace.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean+Performance.KDTMean+Performance.DMSMean+Performance.ADMSLMean
formula11NR <- FatigueLevel ~ Performace.Task+Performance.DDCMean+Performance.MAMean+Performance.MVMean+Perfomance.TBCMean+Performance.KDTMean+Performance.ADMSLMean+Performance.DMSMean
sub71NR <- subset(treino1, select=c("Performance.Task","Performance.MAMean","Performance.DDCMean","Performance.MVMean","FatigueLevel"))
sub72NR <- subset(treino2, select=c("Performance.Task","Performance.MAMean","Performance.DDCMean","Performance.MVMean","FatigueLevel"))
sub81NR <- subset(treino1, select=c("Performance.Task","Performance.MAMean","Performance.DDCMean","Performance.MVMean","Performance.KDTMean","FatigueLevel"))
sub82NR <- subset(treino2, select=c("Performance.Task","Performance.MAMean","Performance.DDCMean","Performance.MVMean","Performance.KDTMean","FatigueLevel"))
sub91NR <- subset(treino1, select=c("Performance.Task","Performance.MAMean","Performance.DDCMean","Performance.MVMean","Performance.DMSMean","Performance.ADMSLMean","FatigueLevel"))
sub92NR <- subset(treino2, select=c("Performance.Task","Performance.MAMean","Performance.DDCMean","Performance.MVMean","Performance.DMSMean","Performance.ADMSLMean","FatigueLevel"))
sub1NR <- subset(treino1, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","FatigueLevel"))
sub2NR <- subset(treino2, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","FatigueLevel"))
sub3NR <- subset(treino1, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","Performance.DDCMean","FatigueLevel"))
sub4NR <- subset(treino2, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","Performance.DDCMean","FatigueLevel"))
sub5NR <- subset(treino1, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","Performance.DDCMean","Performance.AEDMean","FatigueLevel"))
sub6NR <- subset(treino2, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","Performance.DDCMean","Performance.AEDMean","FatigueLevel"))
sub7NR <- subset(treino1, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","Performance.DDCMean","Performance.AEDMean","Performance.TBCMean","FatigueLevel"))
sub8NR <- subset(treino2, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","Performance.DDCMean","Performance.AEDMean","Performance.TBCMean","FatigueLevel"))
sub9NR <- subset(treino1, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","Performance.DDCMean","Performance.AEDMean","Performance.TBCMean","Performance.ADMSLMean","FatigueLevel"))
sub10NR <- subset(treino2, select=c("Performance.Task","Performance.MAMean","Performance.KDTMean","Performance.DMSMean","Performance.DDCMean","Performance.AEDMean","Performance.TBCMean","Performance.ADMSLMean","FatigueLevel"))
# Função que determina a existência ou ausência de cansaço/fatiga/exaustão
determinaExaustao <- function(dataset)
{
# 0 - Não existe exaustão
# 1 - Existe exaustão
# Dos níveis 1 a 3, considera-se que não existe exaustão
# Dos níveis 4 a 7, considera-se que existe exaustão
auxSet <- dataset
# Se (exaustão <= 0.3), então exaustão = 0
# Se não exaustão = 1
auxSet$FatigueLevel <- ifelse(auxSet$FatigueLevel <= 0.3, 0, 0.1)
return (auxSet);
}
# Função que determina a exaustão em três níveis distintos
escala3niveis <- function(dataset)
{
# 0 - Não se encontra exausto (níveis 1, 2 e 3)
# 0.5 - Encontra-se em risco de exaustão (níveis 4 e 5)
# 1 - Encontra-se em exaustão (níveis 6 e 7)
auxSet <- dataset
# Se (exaustão >= 0.6), então exaustão = 1
# Se não { Se (exaustão <= 0.3), então exaustão = 0
# Se não exaustão = 0.5
# }
auxSet$FatigueLevel <- ifelse(auxSet$FatigueLevel >= 0.6, 0.2,
ifelse(auxSet$FatigueLevel <= 0.3, 0, 0.1))
return (auxSet)
}
# Função que determina a exaustão em quatro níveis distintos
escala4niveis <- function(dataset)
{
# 0 - Encontra-se num estado físico e mental excelente (nível 1)
# 0.3 - Encontra-se num estado normal (níveis 2 e 3)
# 0.6 - Encontra-se cansado, mas não em exaustão completa (níveis 4 e 5)
# 1 - Encontra-se num estado de exaustão (níveis 6 e 7)
# Guardam-se os dados numa variável, de modo a não perder os dados originais
auxSet <- dataset
# Se (exaustão >= 0.6), então exaustão = 1
# Se não { Se (exaustão >= 0.4), então exaustão = 0.6
# Se não { Se (exaustão >= 0.2), então exaustão = 0.3
# Se não exaustão = 0
# }
# }
#
auxSet$FatigueLevel <- ifelse(auxSet$FatigueLevel >= 0.6, 0.3,
ifelse(auxSet$FatigueLevel >= 0.4, 0.2,
ifelse(auxSet$FatigueLevel >= 0.2, 0.1, 0)))
return (auxSet)
}
# Função que treina a rede com as características fornecidas e testa os dados de treino
exaustao <- function(dados, treino, hidden, formula, t, algoritmo)
{
# Treino da rede com os parâmetros passados como input
net <- neuralnet(formula,dados, hidden=hidden, threshold=t, algorithm=algoritmo, lifesign="full", linear.output = FALSE)
# Falta acrescentar o linear.output
# Guardamos o input de treino numa variável distinta, à qual retiramos o output, de modo a ser possível
# testar os dados e, de seguida, compará-los com os originais
input <- treino
input$FatigueLevel <- NULL
# Testamos os dados de input com a rede já treinada
net$res <- compute(net, input)
# Guardamos os resultados obtidos num dataframe
res <- data.frame(atual = treino$FatigueLevel, prev = net$res$net.result)
# Arredonda-se os resultados obtidos e guarda-se numa nova variável
net$prev <- round(res$prev)
# Acrescentar casas decimais??
# Comparação entre output original e output depois do treino
net$rmse <- rmse(c(treino$FatigueLevel), c(net$prev))
return (net)
}
id7niveis <- function(dataset)
{
auxSet <- dataset
v <- auxSet$Performance.KDTMean+auxSet$Performance.MAMean+auxSet$Performance.MVMean+auxSet$Performance.TBCMean+auxSet$Performance.DDCMean+auxSet$Performance.DMSMean+auxSet$Performance.AEDMean+auxSet$Performance.ADMSLMean+auxSet$Performance.Task
auxSet$FatigueLevel <- ifelse(v <= 2.34, 0.7,
ifelse(v <= 3.28, 0.6,
ifelse(v <= 4.22, 0.5,
ifelse(v <= 5.16, 0.4,
ifelse(v <= 6.1, 0.3,
ifelse(v <= 7.04, 0.2, 0.1))))))
return (auxSet)
}
task <- function(dados, treino, hidden, formula, t, algoritmo)
{
# Treino da rede com os parâmetros passados como input
net <- neuralnet(formula,dados, hidden=hidden, threshold=t, algorithm=algoritmo, lifesign="full", linear.output = TRUE)
# Falta acrescentar o linear.output
# Guardamos o input de treino numa variável distinta, à qual retiramos o output, de modo a ser possível
# testar os dados e, de seguida, compará-los com os originais
input <- treino
input$Performance.Task<- NULL
# Testamos os dados de input com a rede já treinada
net$res <- compute(net, input)
# Guardamos os resultados obtidos num dataframe
res <- data.frame(atual = treino$Performance.Task, prev = net$res$net.result)
# Arredonda-se os resultados obtidos e guarda-se numa nova variável
net$prev <- round(res$prev)
# Acrescentar casas decimais??
# Comparação entre output original e output depois do treino
net$rmse <- rmse(c(treino$Performance.Task), c(net$prev))
return (net)
}
qualTarefa <-function(net, kdt, ma, mv, tbc, ddc, dms, aed, admsl, exaustion)
{
teste1<-data.frame(Performance.KDTMean=kdt,Performance.MAMean=ma, Performance.MVMean=mv,Performance.TBCMean=tbc,
Performance.DDCMean =ddc, Performance.DMSMean=dms,Performance.AEDMean=aed, Performance.ADMSLMean=admsl,
FatigueLevel=exaustion)
net.results<-compute(net, teste1)
print(round(net.results$net.result,digits =1))
}
# Resultados vão ficar armazenados numa variável. De forma a verificar o valor de rmse, executar View(res1$rmse).
res1 <- task(dados2, treino2, hidden1, formulaTask, threshold1, "rprop+")
qualTarefa(res1, 0.1, 0.2, 0.5, 0.2, 0.1, 0.2, 0.3, 0.2, 0.3)
# Resultados vão ficar armazenados numa variável. De forma a verificar o valor de rmse, executar View(res1$rmse).
#r <- id7niveis(dataset)
#dr1 <- r[1:422,]
#tr1 <- r[423:844,]
#dr2 <- subset(dr1, select=c("FatigueLevel","Performance.DMSMean", "Performance.TBCMean", "Performance.DDCMean", "Performance.KDTMean"))
#tr2 <- subset(tr1, select=c("FatigueLevel","Performance.DMSMean", "Performance.TBCMean", "Performance.DDCMean", "Performance.KDTMean"))
#formulaA <- FatigueLevel ~ Performance.DMSMean+Performance.TBCMean+Performance.DDCMean+Performance.KDTMean
#res7 <- exaustao(dr2, tr2, hidden2, formulaA, threshold1, "rprop+")
#res1 <- exaustao(dados2, treino2, hidden5, formula1, threshold1, "rprop+")
#ex <- determinaExaustao(dataset) # Novo dataset com exaustão 0 ou 1
#d1 <- ex[1:422,] # Novos dados de aprendizagem
#t1 <- ex[423:844,] # Novos dados de treino
#res2 <- exaustao(d1, t1, hidden2, formula1, threshold1, "rprop+")
#escala3n <- escala3niveis(dataset)
#escala4n <- escala4niveis(dataset)
#d3 <- escala3n[1:422,]
#t3 <- escala3n[423:844,]
#res3 <- exaustao(d3, t3, hidden2, formula1, threshold1, "rprop+")
#escala4n <- escala4niveis(dataset)
#d4 <- escala4n[1:422,]
#t4 <- escala4n[423:844,]
|
2acd02beaaaf0c8c3d965b37ca13c97138aae815 | df15df62c38774a3be118b9b801375e92afbb138 | /qap.R | 45678eddeb929f38471d726be175503d7289caae | [] | no_license | cavieres/lab2_simulated-annealing | c0eee646fd06801773372f9a3d411a95196cc549 | 4acfc7c6a53a432a7cc7d09c668c21c01c94d15c | refs/heads/master | 2021-01-21T19:13:37.780437 | 2017-06-07T05:06:30 | 2017-06-07T05:06:30 | 92,130,913 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 3,049 | r | qap.R | # Algoritmo QAP.
# Para ejecutar script, realizar en consola:
# > setwd("<letra-unidad>:\\<carpeta-proyecto>")
# Ej: setwd("C:\\Users\\cavie\\desarrollo\\lab2_simulated-annealing")
# > Establecer los parametros de configuracion del algoritmo en el archivo "config.yaml".
# > source("qap.R")
# > Costo(c(2, 1, 4, 3))
library(yaml)
config = yaml.load_file("config.yaml")
# Matriz de flujos.
#F <- matrix(c(0, 3, 0, 2, 3, 0, 0, 1, 0, 0, 0, 4, 2, 1, 4, 0), nrow = 4, ncol = 4)
F <- read.table(config$QAP$Flows, header = FALSE)
# Matriz de distancias
#D <- matrix(c(0, 22, 53, 0, 22, 0, 40, 0, 53, 40, 0, 55, 0, 0, 55, 0), nrow = 4, ncol = 4)
D <- read.table(config$QAP$Distances, header = FALSE)
# Ubicacion de instalacion i en arreglo de ubicaciones
Fi = function(i, ubicaciones) {
return(which(ubicaciones == i))
}
# Flujo entre las instalaciones i y j.
f = function(i, j) {
return(F[i, j])
}
# Distancia entre las ubicaciones k y l.
d = function(k, l) {
return(D[k, l])
}
# Costo solucion (funcion objetivo)
# de arreglo de ubicaciones.
Costo = function(ubicaciones) {
costo = 0
for(i in 1:length(ubicaciones)) {
for(j in 1:length(ubicaciones)) {
costo = costo + f(i, j) * d(Fi(i, ubicaciones), Fi(j, ubicaciones))
}
}
return(costo)
}
# Busqueda de vecindad aleatoria por medio de swap.
N = function(s) {
sPrima = s
instalacionInicial = sample(length(sPrima), 1)
if (instalacionInicial == length(sPrima))
instalacionFinal = instalacionInicial - 1
else
instalacionFinal = instalacionInicial + 1
aux = sPrima[instalacionInicial]
sPrima[instalacionInicial] = sPrima[instalacionFinal]
sPrima[instalacionFinal] = aux
return(sPrima)
}
# Generacion de solucion inicial.
s0 = function() {
#return(c(2, 1, 4, 3))
#cargar arreglo de flujos:
flujos = F
#llamar a libreria:
library("plyr")
#ordenar el arreglo de flujos de forma creciente:
#creación del vector(arreglo de flujos):
suma_f<- rep(0, len = nrow(flujos))
resultado=0;
#sumar los flujos:
for(i in 1:nrow(flujos))
{
resultado=0;
for(j in 1:ncol(flujos))
{
resultado <- resultado + flujos[i,j];
}
suma_f[i]=resultado;
}
#ordenar de forma decreciente:
indice_flujos <-order(suma_f,decreasing = TRUE)
#ver valor del arreglo:
suma_f
#cargar arreglo de distancias:
distancias = D
#creación del vector(arreglo de distancias):
suma_d<- rep(0, len = nrow(flujos))
#sumar las distancias:
for(i in 1:nrow(flujos))
{
resultado=0;
for(j in 1:ncol(flujos))
{
resultado <- resultado + distancias[i,j];
}
suma_d[i]=resultado;
}
#ver valor del arreglo:
suma_d
#ordenar de forma creciente para las distancias:
indice_distancias <- order(suma_d)
#generación de la solución óptima:
solucion<- rep(0, len = nrow(flujos))
#arma el arreglo de soluciones:
for(i in 1:nrow(flujos))
{
solucion[indice_distancias[i]]<- indice_flujos[i]
}
return(solucion)
} |
6b1adf0ee02bc9cf421841f5025181a3e863328e | b44b98969edeb5f1dd379eafc59799ffff16d10d | /cachematrix.R | 18913881d474ebd7cefec86fe2904e97dcf3226f | [] | no_license | TrevorDison/ProgrammingAssignment2 | f3d0bba98ed77b0e483cf4aff9fafef1cc814242 | 7f6bff4ce17c3c33ab0efca913e0085ff8337213 | refs/heads/master | 2020-03-25T01:00:04.073317 | 2018-08-02T23:07:11 | 2018-08-02T23:07:11 | 143,217,004 | 0 | 0 | null | 2018-08-01T23:06:30 | 2018-08-01T23:06:29 | null | UTF-8 | R | false | false | 1,368 | r | cachematrix.R | ## First function (makeCacheMatrix) creates a list of four functions that
## can be called to set or get a matrix or its inverse.
## Second function (cacheSolve) calculates or retrieves the inverse depending
## on whether it is already stored in memory.
## Create a list of the functions needed to set/get a matrix and its inverse
## First function stores the matrix (X) in memory and sets N to NULL.
## Second function retrieves the matrix (X) from memory.
## Third function stores the inverse (N) in memory.
## Fourth function retrieves the inverse (N) from memory.
makeCacheMatrix <- function(X = matrix()) {
N <- NULL
set <- function(Y) {
X <<- Y
N <<- NULL
}
get <- function() X
setinverse <- function(inverse) N <<- inverse
getinverse <- function() N
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Calculate or retrieve the inverse matrix
## If the inverse is not null (already exists in memory), then the function
## retrieves the inverse. Otherwise, it calculates and returns the inverse.
cacheSolve <- function(X, ...) {
N <- X$getinverse()
if(!is.null(N)) {
message("getting cached data")
return(N)
}
data <- X$get()
N <- solve(data, ...)
X$setinverse(N)
N
}
|
fc80b05417f69c66a5e01c112894e16008704af3 | 7eb128f9b7899c33d4854009edbd38dd566cba72 | /R Tutorials/text mining/bigramtokenizer.R | e6e6f7bf56817741880771a2b819b71d40ca83b2 | [] | no_license | chengjun/Research | 1149add090ec563f544c4b5a886c01b1392a25d4 | c01e3d2eac2bca74671abb9cd63e1b06e5566fc8 | refs/heads/master | 2021-06-15T16:35:15.005107 | 2019-08-06T09:03:29 | 2019-08-06T09:03:29 | 11,498,113 | 4 | 6 | null | 2021-04-15T09:27:11 | 2013-07-18T08:36:12 | Mathematica | UTF-8 | R | false | false | 232 | r | bigramtokenizer.R | library("RWeka")
library("tm")
data("crude")
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 4))
tdm <- TermDocumentMatrix(crude, control = list(tokenize = BigramTokenizer))
inspect(tdm[340:345,1:10]) |
ba6f11a41e57baea95213111c5657993afbe198e | b82009d15790e7a71dde583da6169f5ef478873e | /finemap/locuszoom_preprocess.R | 88f576c2029f45b925eb28ef64b2cfdedba4434f | [] | no_license | boxiangliu/rpe | 9233e5b0ac6b9761fbb34e44a462b9e6cae87889 | 2125bf34e029d16a484f8530381af25bb9619d2d | refs/heads/master | 2021-10-26T02:32:10.477848 | 2019-04-09T19:31:57 | 2019-04-09T19:31:57 | 51,119,068 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 632 | r | locuszoom_preprocess.R | library(data.table)
library(stringr)
gwas_fn='../data/gwas/Fritsche_2015_AdvancedAMD.txt'
eqtl_fn='../processed_data/rasqual/output/glucose/joint/chr12/ENSG00000135437.5_RDH5.txt'
out_dir='../processed_data/finemap/locuszoom_preprocess/'
if (!dir.exists(out_dir)) {dir.create(out_dir,recursive=TRUE)}
gwas=fread(gwas_fn)
eqtl=fread(eqtl_fn,select=c(1,3,4,5,6,11),col.names=c('gene','chr','pos','ref','alt','chisq'))
eqtl[,pval:=pchisq(chisq,df=1,lower.tail=FALSE)]
eqtl[,chr:=str_replace(chr,'chr','')]
eqtl=merge(eqtl,gwas[,list(chr=Chrom,pos=Pos,Marker)],by=c('chr','pos'))
fwrite(eqtl,sprintf('%s/RHD5.txt',out_dir),sep='\t') |
3d4993cce75397d5aa1389714d806c70f75ad00e | 5bea168054d7ea022a93721a64870701a86f4dff | /weather_population_growth/GAM_weather_pop_growth_ALLweathervars.R | d9299cbe0335e989eccc0157e21447159b0e67e9 | [] | no_license | jjackson-eco/mammal_weather_lifehistory | 43119558b20facb7988b5f83a05239fcf26a7a2f | cd6fb95ac8ae80c6889fe4f4785d17cab7d18375 | refs/heads/master | 2022-07-18T06:08:49.083550 | 2022-06-07T13:59:49 | 2022-06-07T13:59:49 | 250,286,353 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,978 | r | GAM_weather_pop_growth_ALLweathervars.R | ####################################################
## ##
## Global climate and population dynamics ##
## ##
## Annual weather and population growth ##
## ##
## March 16th 2021 ##
## ##
####################################################
# Record-wise regressions linking weather to population growth rates,
# accounting for autocorrelation with GAMM and formal AR(1) time-series analysis.
# Models across all weather variables and spatial scales.
rm(list = ls())
options(width = 100)
library(tidyverse)
library(psych)
library(ggridges)
library(viridis)
library(patchwork)
library(gridExtra)
library(mgcv) # Simon Wood to the rescue again. All Hail
temp_colour <- "#990a80"
precip_colour <- "#287f79"
##__________________________________________________________________________________________________
#### 1. Load data ####
# mammal data
load("../rawdata/mammal.RData")
glimpse(mammal)
# annual weather anomaly - focus on just the mean anomaly in this script at a 5km range
mam_chelsa_annual <- readRDS("data/mam_chelsa_annual.RDS") %>%
dplyr::select(-c(4:6))
glimpse(mam_chelsa_annual)
# Species names to merge
load("../rawdata/GBIF_species_names_mamUPDATE.RData", verbose = TRUE)
##__________________________________________________________________________________________________
#### 2. Joining data ####
# linking to weather data and species names
mammal_weather <- mammal %>%
left_join(., y = mam_chelsa_annual, by = c("ID", "year")) %>%
left_join(., y = dplyr::select(lpd_gbif, Binomial, gbif_species = gbif.species.tree),
by = "Binomial") %>%
mutate(year_s = as.numeric(scale(year)))
glimpse(mammal_weather)
##__________________________________________________________________________________________________
#### 3. GAMs for each variable and scale for each record ####
# 3a. set up iteration data
# Ignoring number of odd days vars for now - they follow a zero inflated pattern
iter_dat <- expand_grid(ID_block = unique(mammal_weather$ID_block),
scale = unique(mammal_weather$scale),
weather_var = colnames(mammal_weather)[25:40])
# 3b. weather coefficients for each variable
pgr_weather_res <- bind_rows(lapply(X = 1:nrow(iter_dat), function(x){
crow = iter_dat[x,]
# current data
cdat = mammal_weather %>%
filter(ID_block == crow$ID_block, scale == crow$scale) %>%
dplyr::select(ID_block, year, ln_abundance,
weather_val = crow$weather_var,
pop_growth_rate)
# record info
rec_info = mammal_weather %>%
filter(ID_block == crow$ID_block, scale == crow$scale) %>%
dplyr::select(2:17) %>%
slice(1)
# model
if(length(which(is.na(cdat$weather_val) == T)) > 0){modcoef = rep(NA,4)}
else{mod_weather = gamm(pop_growth_rate ~
s(year, bs = "tp", k = 5) + weather_val,
data = cdat,
family = gaussian,
correlation = corARMA(form = ~ year, p = 1),
method = "REML")
modcoef = coef(mod_weather$gam)}
# returning data
cat('\r',"Your Job is",round((x/nrow(iter_dat))*100, 0),"% Complete ")
return(tibble(crow, coef_weather = modcoef[2],
rec_info))
}))
# 3c. Adding in weather variable labels
pgr_weather_res <- pgr_weather_res %>%
mutate(weather_var_lab = stringr::str_to_sentence(gsub("_", " ", weather_var))) %>%
mutate(weather_var_lab = gsub("emp", "emperature", weather_var_lab),
weather_var_lab = gsub("recip", "recipitation", weather_var_lab))
##__________________________________________________________________________________________________
#### 4. Density ridge plots for the weather variables ####
# pgr_weather_res <- readRDS("data/pgr_weather/pgr_weather_res.RDS")
# removing very large coefficients
pgr_plotdat_sm <- pgr_weather_res %>%
filter(coef_weather >= -0.2 & coef_weather < 0.2)
pgr_plotdat_lg <- pgr_weather_res %>%
filter(coef_weather >= -5 & coef_weather < 5)
pgr_weath_sm <- ggplot(pgr_plotdat_sm, aes(x = coef_weather, y = weather_var_lab, fill = stat(x))) +
geom_vline(xintercept = 0) +
geom_density_ridges_gradient(scale = 1.1) +
scale_fill_viridis_c(option = "D", guide = F) +
labs(x = "Weather variable coefficient", y = NULL) +
theme_ridges(center_axis_labels = TRUE, font_size = 20, grid = F)
pgr_weath_lg <- ggplot(pgr_plotdat_lg, aes(x = coef_weather, y = weather_var_lab, fill = stat(x))) +
geom_vline(xintercept = 0) +
geom_density_ridges_gradient(scale = 1.1, ) +
scale_fill_viridis_c(option = "D", guide = F) +
labs(x = "Weather variable coefficient", y = NULL) +
theme_ridges(center_axis_labels = TRUE, font_size = 20, grid = F) +
theme(axis.text.y = element_blank())
ggsave(pgr_weath_sm,
filename = "plots/weather_pop_growth/coef_weather_vars.jpeg",
width = 8, height = 11, units = "in", dpi = 400)
##__________________________________________________________________________________________________
#### 5. Spatial scales ####
# 5a. Spatial scales consistent?
sp_res <- pgr_weather_res %>%
dplyr::select(ID_block, scale, coef_weather, weather_var) %>%
pivot_wider(names_from = scale, values_from = coef_weather) %>%
dplyr::select(starts_with("scale"))
jpeg(filename = "plots/weather_pop_growth/scale_weather_coef.jpeg",
width = 7, height = 7, units = "in",res = 400)
pairs.panels(sp_res, smooth = FALSE, lm = TRUE, ellipses = FALSE)
dev.off()
##__________________________________________________________________________________________________
#### 6. Save data ####
saveRDS(pgr_weather_res, file = "data/pgr_weather/pgr_weather_res.RDS")
|
2b40a89c2e9e8e92459e4f8470d9e0f51801e0fc | 6c321997b2237e3432ebc89866e47c5636e8ccde | /man/plotDeltaAUC.Rd | b3407d8dfbc61a418c57917f72c2f18aa88517d9 | [] | no_license | cran/coca | e37d4a524d58e47400158ac4cfea0ea10570038e | 2baeffda08df37be4aa3b0638f99e00869a49a37 | refs/heads/master | 2021-05-16T23:21:41.927083 | 2020-07-06T16:00:09 | 2020-07-06T16:00:09 | 250,513,558 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 848 | rd | plotDeltaAUC.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auc.R
\name{plotDeltaAUC}
\alias{plotDeltaAUC}
\title{Plot area under the curve}
\usage{
plotDeltaAUC(deltaAUC, chosenK = NULL, fileName = "deltaAUC.png")
}
\arguments{
\item{deltaAUC}{Vector of the difference between the area under the curve
between each value K of the number of clusters and K-1. For the smallest
value considered (usually two) this is simply the area under the curve for
that value of cluster number.}
\item{chosenK}{Chosen number of clusters. If specified, a vertical line is
plotted in correspondance of the indicated value. Default is NULL.}
\item{fileName}{name of the png file}
}
\description{
Plot area under the curve for different values of K.
}
\author{
Alessandra Cabassi \email{alessandra.cabassi@mrc-bsu.cam.ac.uk}
}
\keyword{internal}
|
53b360548268858ad747a7559692ccf0fb50636d | 081972542da7a0a5648b4fd73fda277ce7e51ea0 | /R/plotPR.R | 455c1d3998f218a6842c1d878f1208aab913ca92 | [] | no_license | cran/synRNASeqNet | d6353853056cf7e7d23282b93a90d4f0ea4b7323 | 686c05f268ec09a8b07dd599f9ab03f392d87131 | refs/heads/master | 2021-01-18T18:12:48.143611 | 2015-04-07T00:00:00 | 2015-04-07T00:00:00 | 34,254,697 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 205 | r | plotPR.R | plotPR <-
function(piNet, ...){
plot(c(0, piNet[, "Recall"]), c(0, piNet[, "Precision"]), type = "l",
xlab = "recall", ylab = "precision", main = "PR Curve",
xlim = 0:1, ylim = 0:1, ...)
}
|
e4e19974c30d42e46078d3cc1db0a514193522c1 | bce77c018c377461c470f83b2fadbf0b98dc4020 | /ui.R | ea367df3cebf6cdbac4798a8b5b7f09fb7e5ac3c | [] | no_license | dvgodoy/ExploratoryRegression | 7c21c66738656074b534aad98286f891f4fc1d33 | f62d1dc58e7df2a904fc466a660b0bcec9fa721b | refs/heads/master | 2020-05-31T06:23:58.539273 | 2015-01-17T18:31:33 | 2015-01-17T18:31:33 | 29,400,054 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,077 | r | ui.R | shinyUI(
pageWithSidebar(
headerPanel("Exploratory Regression Using MTCARS"),
sidebarPanel(
h2('Response'),
selectInput('response','Select a variable:',
choices=c('mpg','disp','hp','drat','wt','qsec'),
selected="mpg"),
h2('Regressors'),
h4('Be careful not to check the response variable!'),
checkboxInput('intercept','Intercept',TRUE),
checkboxInput('mpg','(mpg) Miles per gallon',FALSE),
checkboxInput('cyl','(cyl) Number of cylinders',FALSE),
checkboxInput('disp','(disp) Displacement (cu.in.)',FALSE),
checkboxInput('hp','(hp) Gross horsepower',FALSE),
checkboxInput('drat','(drat) Rear axle ratio',FALSE),
checkboxInput('wt','(wt) Weight (1,000 lbs)',FALSE),
checkboxInput('qsec','(qsec) 1/4 mile time',FALSE),
checkboxInput('vs','(vs) V/S',FALSE),
checkboxInput('am','(am) Transmission type',FALSE),
checkboxInput('gear','(gear) # of forward gears',FALSE),
checkboxInput('carb','(carb) # of carburetors',FALSE),
submitButton('Submit')
),
mainPanel(
h2('Results of your regression'),
h3('Your model estimates:'),
verbatimTextOutput('fitSummary'),
h3('Coefficients statistically significant at 5%:'),
verbatimTextOutput('signif'),
h3('Your model RMSE:'),
verbatimTextOutput('fitRMSE'),
h3('Actual vs Fitted plot:'),
plotOutput('fittedPlot'),
h2('Residuals Analysis'),
h3('Residual plot:'),
plotOutput('residPlot'),
h3('Shapiro-Wilk Normality Test:'),
verbatimTextOutput('shapiro'),
h4('Obs.: Null hypothesis = normality'),
h4(uiOutput('shapiro.p')),
h3('Breusch-Pagan Heteroskedasticity Test:'),
verbatimTextOutput('breusch'),
h4('Obs.: Null hypothesis = homoskedasticity'),
h4(uiOutput('breusch.p')),
h3('Durbin-Watson Serial Correlation Test:'),
verbatimTextOutput('durbin'),
h4('Obs.: Null hypothesis = no serial correlation'),
h4(uiOutput('durbin.p'))
)
)
)
|
adbd5ddbcb163578c8d8a0d1fb8f2f5ef88f487b | c4cee858764ce7181ca91d4ce6bb05df795c3262 | /Ridge Regression.R | d466a104c2e29e8f0bcaa8d0bb4314748d7d179a | [] | no_license | kunalseth/PredictionAlgorithms | cbbeff21dfe6ed1034855c184a5ba89914494c56 | 1b421d035489f258a5821826dfd918986eac866d | refs/heads/master | 2020-09-22T14:21:56.118631 | 2016-11-22T00:07:22 | 2016-11-22T00:07:28 | 67,657,643 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,264 | r | Ridge Regression.R | library(leaps)
library(ISLR)
library(glmnet)
set.seed (1)
#2.a
x= rnorm(30)
ep=rnorm(30)
y= 3- 2*x + 3*(x^2) + ep
xpoly=poly(x,7)
x1= rnorm(1000)
ep1=rnorm(1000)
y1= 3- 2*x1 + 3*(x1^2) + ep1
x1poly=poly(x1,7)
grid =10^ seq (10,-2, length =100)
#i
ridge.mod=glmnet(x=xpoly,y=y,alpha=0, lambda = grid)
plot(ridge.mod,xvar="lambda", label=TRUE)
legend('topright', c("1","2","3","4","5","6","7") , bty='n', lty=1,col=c('black', 'red', 'green',' blue', 'light blue', 'pink','black'), cex=.75, lwd=c(2.5,2.5))
#ii
cv.ridgemod <- cv.glmnet(xpoly,y=y,alpha=0)
plot(cv.ridgemod)
best_lambda <- cv.ridgemod$lambda.min
best_lambda
#iii
ridge.pred=predict (ridge.mod ,s=best_lambda,newx=xpoly)
mean(( ridge.pred -y)^2)
rid.out=glmnet (xpoly,y,alpha =0,lambda = best_lambda)
ridge.coef=predict (rid.out ,type ="coefficients",s=best_lambda )
ridge.coef
#b
ridge.pred1=predict(rid.out,s=best_lambda,newx=poly(x1,7))
mean((ridge.pred1 -y1)^2)
#c
set.seed(1)
xl1<-rnorm(30)
el1<-rnorm(30)
yl1<-3-(2*xl1)+(3*(xl1^2))+el1
xlpoly<-poly(xl1,7)
ls.mod<-lm(yl1~xlpoly)
xl2<-rnorm(1000)
el2<-rnorm(1000)
xl2poly<-poly(xl2,7)
yl2<-3-(2*xl2)+(3*(xl2^2))+el2
ls.pred<-predict(ls.mod,newx=xl2poly)
mean((ls.pred-yl2)^2)
|
b76da8a6dec8cabf0544d892bba7dd265478f261 | 0fa7eddbfe97d8368445a0591c846f0ad553f44b | /dasb.r | 208bbc61e9dffcd5875bacac235f202293a8d2ed | [] | no_license | miumiux/Mixed | 2fa4fd7004425180e7c57369d368bfd02ac26dbf | e60696105898d5cec8a326894c1b99a4b6407086 | refs/heads/master | 2021-01-23T06:39:16.264462 | 2013-04-22T22:31:00 | 2013-04-22T22:31:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,964 | r | dasb.r |
dasb <- read.csv("C:/Users/yl2820/Dropbox/GOALS/Fp/Data/DASB_fp_diag.csv")
require(lme4)
require(nlme)
require(reshape2)
require(sm)
#|-----------------------------------------------------------------------
#|DATA PREPARISON
#|-----------------------------------------------------------------------
dat1<-dasb
#Exclude those scans with all entries are NA
dat<-subset(dat1,!is.na(dat1$FFS1)|!is.na(dat1$FFS2)|!is.na(dat1$FFS3)) ###10 subjects excluded
dat<-dat[!is.na(dat$DIAG),]
dat$JJM<-as.factor(dat$JJM)
#Number of diagnosis
table(dat$DIAG)
#A merged version of diagnosis
diag1<-substring(dat$DIAG,1,3)
table(diag1)
dat<-data.frame(dat,diag1)
#Long format data
fdat <- melt(dat,
# ID variables - all the variables to keep but not split apart on
id.vars=c("Study.Number","JJM","DIAG","diag1"),
# The source columns
measure.vars=c("FFS1","FFS2","FFS3"),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="FFS.num", #FFS1,FFS2,FFS3
value.name="FFS"
)
dat<-dat[order(dat$JJM,dat$Study.Number),]
#check for data balance
temp<-split(dat$Study.Number,dat$JJM,drop=T)
temp2<-sapply(temp,function(z){sum(!is.na(z))})
table(temp2)
#|-----------------------------------------------------------------------
#|ANALYSIS
#|-----------------------------------------------------------------------
#-----------------------With single scans-------------------------#
FFS100<-fdat$FFS*100
fdat1<-cbind(fdat,FFS100); fdat1<-fdat1[!is.na(fdat1$FFS100),]
summary(aov(fdat1$FFS100~fdat1$DIAG+fdat1$JJM+fdat1$Study.Number))
#Reduce Diagnosis into more general groups. Not much change in estimation
summary(fm21<-lmer(FFS100~1+diag1+(1|JJM)+(1|Study.Number),data=fdat1,REML=F,na.action=na.omit))
summary(fm11<-lmer(FFS100~1+diag1+(1|Study.Number),data=fdat1,REML=F,na.action=na.omit))
summary(fm0<-lm(FFS~diag1,data=fdat1))
#Test for random effect
anova(fm21,fm11)
anova(fm1,fm0)
intervals(fm2) #C.I for random effect
#-----------------------Multiple scans-------------------------#
#Below subset JJMs with two or more scans, dataset fdat2
temp1<-split(fdat,fdat$JJM)
a<-sapply(temp1,function(z){length(z$FFS)>3}) #Scan # greater than 3, Logic return
temp22<-temp1[a] #Subseting
#List to data frame
fdat2<-data.frame()
for (i in 1:length(temp22))
fdat2<-rbind(fdat2,temp22[[i]])
#Unit=>percentage
FFS100<-fdat2$FFS*100
#Extract complete data
fdat2<-cbind(fdat2,FFS100);
fdat2<-fdat2[!is.na(fdat2$FFS100),]
#Model with random subject
summary(fm2<-lmer(FFS100~1+diag1+(1|JJM)+(1|Study.Number),data=fdat2,REML=F,na.action=na.omit))
# summary(fm2<-lme(FFS100~1+diag1,random=~1|JJM/Study.Number,data=fdat2,method="ML",na.action=na.omit))
#Model with random scan
summary(fm1<-lmer(FFS100~1+diag1+(1|Study.Number),data=fdat2,REML=F,na.action=na.omit))
#Nested model
summary(fm0<-lm(FFS100~1+diag1,data=fdat2))
##Test for random effect (combination of chisq distribution)
anova(fm2,fm1)
anova(fm1,fm0)
#|-----------------------------------------------------------------------
#|SIMULATION TO CHECK MODEL STATBILITY
#|-----------------------------------------------------------------------
#Effective to lmer
set.seed(1243)
#-----------------------Multiple scans-------------------------#
remat <- summary(fm2)@REmat
var<-data.frame(as.numeric(remat[,4])) ###SD actually
row.names(var)<-c("rscan","rjjm","rnoise")
sim.std<-matrix(0,100,3)
sim.fix<-matrix(0,100,length(fm2@fixef))
p<-NULL
for (i in 1:100)
{
#Simulate random effect for scan
sp.JJM<-split(fdat2,fdat2$JJM,drop=T)
sim.JJMl<-lapply(sp.JJM,function(z){cbind(z,rep(rnorm(1,0,var["rjjm",]),nrow(z)))})
sim.JJM<-unsplit(sim.JJMl,fdat2$JJM,drop=T)
#Simulate random effect for scan
sp.SN<-split(fdat2,fdat2$Study.Number,drop=T)
sim.scanl<-lapply(sp.SN,function(z){cbind(z,rep(rnorm(1,0,var["rscan",]),nrow(z)))})
sim.scan<-unsplit(sim.scanl,fdat2$Study.Number,drop=T)
#Simulate noise
sim.noise<-rnorm(nrow(fdat2),0,var["rnoise",])
#fitted Fixed effect
fixef<-as.matrix(fm2@X)%*%as.matrix(fm2@fixef)
#rjjm,rscan,rnoise,fixef
sim<-sim.JJM[,8]+sim.scan[,8]+sim.noise+fixef
new<-cbind(fdat2[,1:5],sim)
sim.fm<-lmer(sim~1+diag1+(1|JJM)+(1|Study.Number),data=new,REML=F,na.action=na.omit)
sim.fm0<-lmer(sim~1+diag1+(1|Study.Number),data=new,REML=F,na.action=na.omit)
p[i]<-anova(sim.fm0,sim.fm)$"Pr(>Chisq)"[2]
sim.std[i,]<-as.numeric(summary(sim.fm)@REmat[,4])
sim.fix[i,]<-sim.fm@fixef
}
#-----------------------With single scans-------------------------#
remat1 <- summary(fm21)@REmat
var1<-data.frame(as.numeric(remat1[,4])) ###SD actually
row.names(var1)<-c("rscan","rjjm","rnoise")
sim.std1<-matrix(0,100,3)
sim.fix1<-matrix(0,100,length(fm21@fixef))
p1<-NULL
for (i in 1:100)
{
#Simulate random effect for scan
sp.JJM<-split(fdat1,fdat1$JJM,drop=T)
sim.JJMl<-lapply(sp.JJM,function(z){cbind(z,rep(rnorm(1,0,var1["rjjm",]),nrow(z)))})
sim.JJM<-unsplit(sim.JJMl,fdat1$JJM,drop=T)
#Simulate random effect for scan
sp.SN<-split(fdat1,fdat1$Study.Number,drop=T)
sim.scanl<-lapply(sp.SN,function(z){cbind(z,rep(rnorm(1,0,var1["rscan",]),nrow(z)))})
sim.scan<-unsplit(sim.scanl,fdat1$Study.Number,drop=T)
#Simulate noise
sim.noise<-rnorm(nrow(fdat1),0,var1["rnoise",])
#fitted Fixed effect
fixef<-as.matrix(fm21@X)%*%as.matrix(fm21@fixef)
#rjjm,rscan,rnoise,fixef
sim<-sim.JJM[,8]+sim.scan[,8]+sim.noise+fixef
new<-cbind(fdat1[,1:5],sim)
sim.fm<-lmer(sim~1+diag1+(1|JJM)+(1|Study.Number),data=new,REML=F,na.action=na.omit)
sim.fm0<-lmer(sim~1+diag1+(1|Study.Number),data=new,REML=F,na.action=na.omit)
p1[i]<-anova(sim.fm0,sim.fm)$"Pr(>Chisq)"[2]
sim.std1[i,]<-as.numeric(summary(sim.fm)@REmat[,4])
sim.fix1[i,]<-sim.fm@fixef
}
sim.std0<-as.data.frame(rbind(cbind(sim.std1,rep(0,100)),cbind(sim.std,rep(1,100))))
colnames(sim.std0)<-c("rscan","rjjm","rnoise","l")
# create value labels
jjm.f <- factor(sim.std0$l, levels= c(0,1),
labels = c("Complete data", "Multi-scans"))
#|-----------------------------------------------------------------------
#|PARAMETER ESTIMATE DENSITY FROM 100 SIMULATION
#|-----------------------------------------------------------------------
name<-c("Scan","JJM","Noise")
par(mfrow=c(3,1))
for(i in 1:3)
{
sm.density.compare(sim.std0[,i],sim.std0$l, xlab="Estimate of RE")
title(main=paste("Simulation of random effect estimate for" ,name[i] ,"(Kernal density)"))
abline(v=as.numeric(remat1[i,4]),col="red")
abline(v=as.numeric(remat[i,4]),col="green")
colfill<-c(2:(2+length(levels(jjm.f))))
legend("topright",levels(jjm.f), fill=colfill)
}
#|-----------------------------------------------------------------------
#|P-VALUE HISTOGRAM FOR TESTING RANDOM SUBJECT FROM 100 SIMULATION
#|-----------------------------------------------------------------------
par(mfrow=c(2,1))
hist(p1,freq=F,main="P-value for test RE of JJM(Complete dataset)")
lines(density(p1),col=2)
hist(p,freq=F,main="P-value for test RE of JJM(Multi-scans)")
lines(density(p),col=2)
#####################################################################
###plot check for constant variance assump#
par(mfrow=c(1,2))
str(summary(lm(varb~meanb)))
#Complete data
aa<-split(fdat1,fdat1$Study.Number,drop=T)
which.max(sapply(aa,function(z){var(z$FFS100)}))
meana<-as.numeric(sapply(aa,function(z){mean(z$FFS100)})[-183])
vara<-as.numeric(sapply(aa,function(z){var(z$FFS100)})[-183])
plot(meana,vara,main="Mean vs. Variability. Complete data")
abline(lm(vara~meana),col=2)
#Multiscan data
bb<-split(fdat2,fdat2$Study.Number,drop=T)
which.max(sapply(bb,function(z){var(z$FFS100)}))
meanb<-as.numeric(sapply(bb,function(z){mean(z$FFS100)})[-10])
varb<-as.numeric(sapply(bb,function(z){var(z$FFS100)})[-10])
plot(meanb,varb,main="Mean vs. Variability. Multi-scan")
abline(lm(varb~meanb),col=2)
|
5610c842dbcd126257197ffe0f8319609bfc3698 | 613c3fd55c48587b0cc813ebf89265892ea92009 | /PBG--Pipeline/PBG--Plots/PBG--MDS/PBG--MDS.R | 0eb59d5d0dd40c28405b06fb7e2d5d8ab2eaf86f | [
"MIT"
] | permissive | HomereAMK/PigeonBreedsGenomics | 88e0d783144bbd8c189c85ad0de90955d3204bb1 | 1a5de6c181880e6da2c87445d21766599ad6711e | refs/heads/main | 2023-07-20T13:40:16.446415 | 2021-09-02T10:55:36 | 2021-09-02T10:55:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,894 | r | PBG--MDS.R | setwd("/Users/hintze/Desktop/PhD\ Action\ Plan/Core\ Projects/Finishing/PBGP--FINAL/Analyses/PBGP--MDS/")
library(optparse)
library(ggplot2)
library(plyr)
library(RColorBrewer)
option_list <- list(make_option(c('-i','--in_file'), action='store', type='character', default="stdin", help='Input file'),
make_option(c('--no_header'), action='store_true', type='logical', default=FALSE, help='Input file has no header'),
make_option(c('--var_excl'), action='store', type='character', default=NULL, help='Variables to exclude from analysis'),
make_option(c('-a','--annot'), action='store', type='character', default=NULL, help='File with indiv annotations'),
make_option(c('--id_column'), action='store', type='numeric', default=1, help='Column to use as ID'),
make_option(c('-L','--in_maj_labels'), action='store', type='character', default=NULL, help='Column from annotation file to use as MAJOR label'),
make_option(c('-l','--in_min_labels'), action='store', type='character', default=NULL, help='Column from annotation file to use as MINOR label'),
make_option(c('-c','--in_colors'), action='store', type='character', default=NULL, help='Column from input file to use as individual colors'),
make_option(c('-s','--plot_size'), action='store', type='numeric', default=1, help='Plot size'),
make_option(c('-t','--plot_title'), action='store', type='character', default=NULL, help='Plot title'),
make_option(c('-x', '--plot_x_limits'), action='store', type='character', default=NULL, help='Comma-sepparated values for plot X-axis limits (eg: "-1,1")'),
make_option(c('-y', '--plot_y_limits'), action='store', type='character', default=NULL, help='Comma-sepparated values for plot Y-axis limits (eg: "-1,1")'),
make_option(c('-o','--out_file'), action='store', type='character', default=NULL, help='Output file'),
make_option(c('--debug'), action='store_true', type='logical', default=FALSE, help='Debug mode')
)
opt <- parse_args(OptionParser(option_list = option_list))
opt$in_file="PBGP--GoodSamples_WithWGSs_NoCrupestris_SNPCalling--Article--Ultra.mds"
opt$annot="PBGP--GoodSamples_WithWGSs_NoCrupestris_SNPCalling--Article--Ultra.annot"
opt$id_column=1
opt$in_maj_labels="Breeds"
opt$out_file="PBGP--GoodSamples_WithWGSs_NoCrupestris_SNPCalling--Article--Ultra_Auto.pdf"
read.table(opt$annot, sep = "\t")
# Print parameters
cat('# Input file:', opt$in_file, fill=TRUE)
cat('# Input file has header:', !opt$no_header, fill=TRUE)
cat('# Excluded variables:', opt$var_excl, fill=TRUE)
cat('# Annotations file:', opt$annot, fill=TRUE)
cat('# ID column:', opt$id_column, fill=TRUE)
cat('# Major label variable:', opt$in_maj_labels, fill=TRUE)
cat('# Minor label variable:', opt$in_min_labels, fill=TRUE)
cat('# Individual colors:', opt$in_colors, fill=TRUE)
cat('# Plot size:', opt$plot_size, fill=TRUE)
cat('# Plot title:', opt$plot_title, fill=TRUE)
cat('# Plot X-axis limits:', opt$plot_x_limits, fill=TRUE)
cat('# Plot Y-axis limits:', opt$plot_y_limits, fill=TRUE)
cat('# Out file:', opt$out_file, fill=TRUE)
cat('# Debug mode?:', opt$debug, fill=TRUE)
### Check plot X-axis limits
if(!is.null(opt$plot_x_limits))
opt$plot_x_limits <- as.numeric( gsub("\\(|\\)|\"", "", strsplit(opt$plot_x_limits, ",", fixed=TRUE)[[1]]) )
### Check plot Y-axis limits
if(!is.null(opt$plot_y_limits))
opt$plot_y_limits <- as.numeric( gsub("\\(|\\)|\"", "", strsplit(opt$plot_y_limits, ",", fixed=TRUE)[[1]]) )
### Read data
cat("# \tReading input file...", fill=TRUE)
data <- read.table(opt$in_file, row.names=1, sep="\t", header=!opt$no_header, stringsAsFactors=FALSE, check.names=FALSE)
n <- ncol(data)
if(opt$debug)
print(data)
# Read annotation file
if(!is.null(opt$annot)){
cat("# \tReading annotations file...", fill=TRUE)
annot <- read.table(opt$annot, sep="\t", header=TRUE, stringsAsFactors=FALSE)
if(opt$debug)
print(annot)
data <- merge(data, annot, by.x=0, by.y=opt$id_column)
# Get rownames back into place
rownames(data) <- data[,1]; data <- data[,-1]
data[colnames(annot)[opt$id_column]] <- rownames(data)
}
# Exclude variables
if( !is.null(opt$var_excl) ) {
cat("# \tExcluding variables...", fill=TRUE)
opt$var_excl <- unlist(strsplit(opt$var_excl, ","))
data <- data[!(rownames(data) %in% opt$var_excl),]
}
# Set plot title
if(is.null(opt$plot_title))
opt$plot_title <- basename(opt$in_file)
# Get Major labels mean location
colors <- NULL
if(!is.null(opt$in_maj_labels)){
cat("# Calculating Major labels...", fill=TRUE)
# Merge Major labels
in_maj_labels <- unlist(strsplit(opt$in_maj_labels, ",", fixed=TRUE))
tmp_data <- data[,in_maj_labels[1]]
data[in_maj_labels[1]] <- NULL
if(length(in_maj_labels) > 1){
for (cnt in 2:length(in_maj_labels)){
tmp_data <- paste(tmp_data, data[,in_maj_labels[cnt]], sep="/")
data[in_maj_labels[cnt]] <- NULL
}
opt$in_maj_labels <- "MERGE"
}
# Make sure Major label column is after data
data <- data.frame(data, tmp_data)
colnames(data)[ncol(data)] <- opt$in_maj_labels
# Convert to factor, in case there is a Major label with just numbers
data[,opt$in_maj_labels] <- factor(data[,opt$in_maj_labels])
# If label was in input file, decrease number of data columns
if(is.null(opt$annot) || !opt$in_maj_labels %in% colnames(annot))
n = n - 1
# Get mean value for Major label
data_mean <- ddply(data, opt$in_maj_labels, function(x){colMeans(x[, 1:n], na.rm=TRUE)})
colors <- as.character(opt$in_maj_labels)
}
# If color variable provided, override previous definitions
if (!is.null(opt$in_colors))
colors <- as.character(opt$in_colors)
### Plot
pdf(opt$out_file, width=opt$plot_size*8, height=opt$plot_size*6)
for(i in 1:(n-1)){
for(j in (i+1):n){
plot <- ggplot(data, aes_string(x=colnames(data)[i], y=colnames(data)[j], colour=colors))
#plot <- ggplot(data[-which(data$NPA_Group %in% c("Non NPA Breed","Feral")),], aes_string(x=colnames(data)[i], y=colnames(data)[j], colour=colors))
#stat_ellipse(type = "norm", linetype = 2)
#plot <- ggplot(aes(text=paste("Sample: ", opt$annot)))
plot <- plot +
labs(x = "Dimension 1", y = "Dimension 2", color = "NPA's Groups") +
theme_bw() +
#geom_hline(aes(yintercept=0), colour="black", size=0.2) +
#geom_vline(aes(xintercept=0), colour="black", size=0.2) +
theme(axis.title.x = element_text(size = 16, color="#000000", face="bold", margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 16, color="#000000", face="bold", margin = margin(t = 0, r = 20, b = 0, l = 0))) +
theme(legend.title=element_text(size=10, face="bold")) +
theme(legend.text=element_text(size=8)) +
theme(panel.background = element_rect(fill = '#F3F3F3')) +
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(),
plot.title=element_text(size=10)) +
theme(axis.line = element_line(colour = "#000000", size = 0.3)) +
theme(panel.border = element_blank()) +
guides(colour=guide_legend(override.aes=list(alpha=1, size=3), ncol=1)) +
coord_cartesian(xlim=opt$plot_x_limits, ylim=opt$plot_y_limits) +
#theme(legend.background = element_rect(fill="#F3F3F3")) +
theme(axis.text.x = element_text(color="#000000", size=7),
axis.text.y = element_text(color="#000000", size=7)) +
theme(axis.ticks.x = element_line(color="#000000", size=0.3), axis.ticks.y = element_line(color="#000000", size=0.3))
# Colors
# if(!is.null(opt$in_colors)){
# plot <- plot + scale_colour_manual(values=data[,colors])
# }else{
if(nrow(data) < 8){
plot <- plot + scale_colour_brewer(palette="Set1")
}else{
plot <- plot + scale_colour_discrete()
}
# }
# Minor labels
if(is.null(opt$in_min_labels)){
plot <- plot + geom_point(alpha=0.7, size=1) +
theme(legend.position="right",
legend.key = element_rect(fill=NA),
legend.title=element_blank())
}else{
plot <- plot + geom_text(aes_string(label=opt$in_min_labels), alpha=0.1, size=1.5)
}
# Major labels
if(!is.null(opt$in_maj_labels))
if(!is.null(opt$in_colors)){
#plot <- plot + geom_text(data=data_mean, aes_string(label=opt$in_maj_labels), size=3, colour="black", show.legend=FALSE)
}else{
#plot <- plot + geom_text(data=data_mean, aes_string(label=opt$in_maj_labels, color=colors), size=3, show.legend=FALSE)
}
print(plot)
}
}
x <- dev.off()
#######################################
data$Breeds <- factor(data$Breeds, ordered=T, levels=c("Form", "American Giant Homer","American Show Racer","Carneau","Egyptian Swift","King","Lahore","Maltese","Polish Lynx","Racing Homer", "Runt","Show Type Homer", " ",
"Wattle", "Barb","Dragoon","English Carrier","Scandaroon","Spanish Barb", " ",
"Croppers & Pouters", "English Pouter","Holle Cropper", "Horseman Pouter","Marchenero Pouter","Pomeranian Pouter", "Pygmy Pouter", "Saxon Pouter","Voorburg Shield Cropper", " ",
"Color", "Archangel","Ice Pigeon","Saxon Monk","Starling","Thuringer Clean Leg", " ",
"Owls & Frills", "African Owl","Italian Owl", "Old German Owl","Oriental Frill", " ",
"Tumblers, Rollers & Flyers", "American Flying Tumbler", "Ancient Tumbler", "Berlin Long-faced Tumbler","Budapest Tumbler","Catalonian Tumbler", "Cumulet", "Danish Tumbler", "English Long-faced Tumbler", "Helmet Medium-faced Crested", "Mookee", "Oriental Roller",
"Parlor Roller", "Portuguese Tumbler", "Temescheburger Schecken", "West of England Tumbler", " ",
"Trumpeter", "Altenburg Trumpeter", "English Trumpeter", "Laugher", " ",
"Structure", "Chinese Owl", "Fantail", "Frillback", "Indian Fantail", "Jacobin", "Old Dutch Capuchine", "Schmalkaldener Mohrenkopf", " ",
"Syrian", "Lebanon", "Shakhsharli", "Syrian Dewlap", " ",
"Non-NPA Breeds & Ferals", "Backa Tumbler", "Birmingham Roller", "California Color Pigeon", "Iranian Tumbler", "Mindian Fantail", "Saxon Fairy Swallow", "Ferals"))
ggplot(data, aes_string(x="D2_4.76950849126929", y="D3_4.29958588574251", colour="Breeds")) + geom_point(alpha = 1, size = 2.2, shape = data$Shape) +
scale_fill_manual(values=c("#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a", "#b15928", #11
"#F3F3F3", "#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", #5
"#F3F3F3", "#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", #8
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", #5
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", #4
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", #15
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", #3
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", #7
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", #3
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f"), drop=FALSE) +
scale_colour_manual(values=c("#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a", "#b15928", #11
"#F3F3F3", "#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", #5
"#F3F3F3", "#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", #8
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", #5
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", #4
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", #15
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", #3
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", #7
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", #3
"#F3F3F3","#F3F3F3", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f"), drop=FALSE) +
scale_x_continuous("Dimension 2 (4.76%)",
breaks = c(-0.025, 0, 0.025, 0.050, 0.075, 0.1),
labels = c("-0.025", "0", "0.025", "0.050", "0.075", "0.1"),
expand = c(0,0),
limits = c(-0.075, 0.115)) +
scale_y_continuous("Dimension 3 (4.29%)",
breaks = c(-0.050, -0.025, 0, 0.025, 0.050, 0.075, 0.1),
expand = c(0,0),
labels = c("-0.050", "-0.025", "0", "0.025", "0.050", "0.075", "0.1"),
limits = c(-0.085, 0.077)) +
theme(legend.key = element_blank()) +
theme(legend.title=element_blank()) +
theme(axis.title.x = element_text(size = 20, color="#000000", face="bold", margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 20, color="#000000", face="bold", margin = margin(t = 0, r = 20, b = 0, l = 0))) +
theme(legend.text=element_text(size=11)) +
theme(panel.background = element_rect(fill = '#FAFAFA')) +
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank()) +
theme(axis.line = element_line(colour = "#000000", size = 0.3)) +
theme(panel.border = element_blank()) +
guides(colour=guide_legend(override.aes=list(alpha=1, size=3, shape = c(NA, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, NA, NA, 4, 4, 4, 4, 4, NA, NA, 8, 8, 8, 8, 8, 8, 8, 8, NA, NA, 1, 1, 1, 1, 1, NA, NA, 10, 10, 10, 10, NA, NA, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 12, 12, 12, 12, 12, NA, NA, 14, 14, 14, NA, NA, 5, 5,
5, 5, 5, 5, 5, NA, NA, 2, 2, 2, NA, NA, 20, 20, 20, 20, 20, 20, 20)), ncol=4)) +
coord_cartesian(xlim=opt$plot_x_limits, ylim=opt$plot_y_limits) +
theme(legend.background = element_rect(fill="#FAFAFA", colour = "#000000", size = 0.3)) +
theme(axis.text.x = element_text(color="#000000", size=11),
# angle=270, vjust=0.5, hjust=1
axis.text.y = element_text(color="#000000", size=12)) +
theme(axis.ticks.x = element_line(color="#000000", size=0.3), axis.ticks.y = element_line(color="#000000", size=0.3)) +
theme(legend.position=c(.7425, 0.1775))
ggsave("PBGP--GoodSamples_WithWGSs_NoCrupestris_SNPCalling--Article--Ultra_D2-D3.eps", height=25, width=28, scale=0.65, dpi=1000)
|
831f733db684870e5a111339ec30d7bb4873f0df | 46cc65e160ccf5e0189f893127f0229807842f96 | /adhoc/map-test.R | fed2924dcbdf88688984363187210b08cd4f3c59 | [] | no_license | minghao2016/filearray | dfc3dfc97724ae1285e9cb4a7b28ee2ccc0194e7 | 9f49882c508b83c6b95f3e71513472bb88f824aa | refs/heads/main | 2023-08-04T03:36:40.130475 | 2021-09-20T10:07:32 | 2021-09-20T10:07:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,603 | r | map-test.R | require(filearray)
dim <- c(100,100,10,40)
set.seed(1);
tmp <- seq_len(1e5)
xs <- lapply(1:2, function(i){
file <- tempfile(); unlink(file, recursive = TRUE)
x <- filearray_create(file, dim, type = 'double')
for(i in 1:40){
x[,,,i] <- tmp
}
x
})
set.seed(2); file <- tempfile(); unlink(file, recursive = TRUE)
y <- filearray_create(file, c(100,10,40), type = 'double')
y$initialize_partition()
system.time({
z <- xs[[1]]$collapse(keep = c(2,3,4), method = "sum")
})
system.time({
fmap(xs, function(x, a){
# a <<- c(a, Sys.time() - now)
z <- x[[1]] + x[[2]]
dim(z) <- c(100,100,10)
z <- dipsaus::collapse(z, keep = c(2,3))
as.list(z + a)
}, .y = y, .input_size = 100000, .output_size = 1000,
a = 2
)
})
range(y[] - z*2)
# aa <- c()
system.time({
now <- Sys.time()
# aa <<- c(aa, Sys.time() - now)
y[] <- xs[[1]][] + xs[[2]][]
})
# filearray:::setThreads(1)
filearray:::set_buffer_size(2097152)
filearray:::set_buffer_size(8000000)
filearray:::get_buffer_size()
filearray_threads(8)
env <- new.env(parent = emptyenv())
env$a <- matrix(NA_real_, nrow = 4, ncol = ceiling(length(y) / filearray:::get_buffer_size() * 8))
env$count = 1;
system.time({
now <- Sys.time()
fmap_element_wise(xs, function(input) {
input[[1]] + input[[2]]
}, y
, profile = function(){
env$a[[env$count]] <- Sys.time() - now
now <<- Sys.time()
env$count <- env$count + 1
}
)
})
b <- t(env$a)
colSums(b, na.rm = TRUE)
summary(b)
range(y[] - xs[[1]][] - xs[[2]][])
|
41ecb8a761d07193b1873d4ad392c5760ea6ddeb | 025cd9c77cbefc7367df61ab8b45ff3259902815 | /R/display_results.R | 21be8d509acdfab6468cab683ad9a1ea8faa5395 | [] | no_license | yspreen/NbClust | 9b59eb928f4d8aaba9c24a67f387c2aa7496eec8 | e82043569b5cf104382125cb1bcd1515a36d49d5 | refs/heads/master | 2020-12-29T08:59:10.611521 | 2020-02-05T22:58:37 | 2020-02-05T22:58:37 | 238,546,165 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,881 | r | display_results.R | ######################################################################################################################
######################## Displaying results #########################################
######################################################################################################################
display_results = function(res, nc, indice, resCritical, min.nc, max.nc, method, hc, bestnc, game) {
if (indice < 31)
{
res = res[,c(indice)]
if (indice == 14) { resCritical = resCritical[,1] }
if (indice == 15) { resCritical = resCritical[,2] }
if (indice == 16) { resCritical = resCritical[,3] }
if (indice == 20) { resCritical = resCritical[,4] }
}
if (indice == 31)
{
res = res[,c(1:19,21:22,26:30)]
resCritical = resCritical[,c(1:3)]
}
if (any(indice == 20) || (indice == 23) || (indice == 24) || (indice == 25) || (indice == 32))
{
results = c(nc.KL, indice.KL, nc.CH, indice.CH, nc.Hartigan, indice.Hartigan, nc.CCC, indice.CCC, nc.Scott, indice.Scott,
nc.Marriot, indice.Marriot, nc.TrCovW, indice.TrCovW, nc.TraceW, indice.TraceW, nc.Friedman,
indice.Friedman, nc.Rubin, indice.Rubin, nc.cindex, indice.cindex, nc.DB, indice.DB, nc.Silhouette,
indice.Silhouette, nc.Duda, indice.Duda, nc.Pseudo, indice.Pseudo, nc.Beale, indice.Beale, nc.Ratkowsky,
indice.Ratkowsky, nc.Ball, indice.Ball, nc.ptbiserial, indice.ptbiserial, nc.Gap, indice.Gap,
nc.Frey, indice.Frey, nc.McClain, indice.McClain, nc.Gamma, indice.Gamma, nc.Gplus, indice.Gplus,
nc.Tau, indice.Tau, nc.Dunn, indice.Dunn, nc.Hubert, indice.Hubert, nc.sdindex, indice.sdindex, nc.Dindex, indice.Dindex, nc.SDbw, indice.SDbw)
results1 = matrix(c(results),nrow=2,ncol=30)
resultats = matrix(c(results),nrow=2,ncol=30,dimnames = list(c("Number_clusters","Value_Index"),
c("KL","CH","Hartigan","CCC", "Scott", "Marriot", "TrCovW",
"TraceW", "Friedman", "Rubin", "Cindex", "DB", "Silhouette",
"Duda","PseudoT2", "Beale", "Ratkowsky", "Ball", "PtBiserial",
"Gap", "Frey", "McClain", "Gamma", "Gplus", "Tau", "Dunn",
"Hubert", "SDindex", "Dindex", "SDbw")))
}
else
{
results = c(nc.KL, indice.KL, nc.CH, indice.CH, nc.Hartigan, indice.Hartigan, nc.CCC, indice.CCC, nc.Scott, indice.Scott,
nc.Marriot, indice.Marriot, nc.TrCovW, indice.TrCovW, nc.TraceW, indice.TraceW, nc.Friedman, indice.Friedman,
nc.Rubin, indice.Rubin, nc.cindex, indice.cindex, nc.DB, indice.DB, nc.Silhouette, indice.Silhouette,
nc.Duda, indice.Duda, nc.Pseudo, indice.Pseudo, nc.Beale, indice.Beale, nc.Ratkowsky, indice.Ratkowsky,
nc.Ball, indice.Ball, nc.ptbiserial, indice.ptbiserial, nc.Frey, indice.Frey, nc.McClain, indice.McClain,
nc.Dunn, indice.Dunn, nc.Hubert, indice.Hubert, nc.sdindex, indice.sdindex, nc.Dindex, indice.Dindex, nc.SDbw, indice.SDbw
)
results1 = matrix(c(results),nrow=2,ncol=26)
resultats = matrix(c(results),nrow=2,ncol=26,dimnames = list(c("Number_clusters","Value_Index"),
c("KL","CH","Hartigan","CCC", "Scott", "Marriot", "TrCovW",
"TraceW", "Friedman", "Rubin", "Cindex", "DB", "Silhouette",
"Duda","PseudoT2", "Beale", "Ratkowsky", "Ball", "PtBiserial",
"Frey", "McClain", "Dunn", "Hubert", "SDindex", "Dindex", "SDbw")))
}
if (any(indice <= 20)||(indice == 23)||(indice == 24)||(indice == 25))
{
resultats = resultats[,c(indice)]
}
if (any(indice == 21)|| (indice == 22))
{
indice3 = indice-1
resultats = resultats[,c(indice3)]
}
if (any(indice == 26) || (indice == 27) || (indice == 28) || ( indice == 29)|| ( indice == 30))
{
indice4 = indice-4
resultats = resultats[,c(indice4)]
}
resultats = round(resultats, digits=4)
res = round(res, digits=4)
resCritical = round(resCritical, digits=4)
# if (numberObsAfter != numberObsBefore)
# {
# cat(paste(numberObsAfter,"observations were used out of", numberObsBefore ,"possible observations due to missing values."))
# }
# if (numberObsAfter == numberObsBefore)
# {
# cat(paste("All", numberObsAfter,"observations were used.", "\n", "\n"))
# }
######################## Summary results #####################################
if(any(indice == 31) || (indice == 32))
{
cat("*******************************************************************", "\n")
cat("* Among all indices: ", "\n")
BestCluster = results1[1,]
c=0
for(i in min.nc:max.nc)
{
vect = which(BestCluster==i)
if(length(vect)>0)
cat("*",length(vect), "proposed", i,"as the best number of clusters", "\n")
if(c<length(vect))
{
j=i
c = length(vect)
}
}
cat("\n"," ***** Conclusion ***** ", "\n", "\n")
cat("* According to the majority rule, the best number of clusters is ",j , "\n", "\n", "\n")
cat("*******************************************************************", "\n")
########################## The Best partition ###################
if (any(method == 1) || (method == 2) || (method == 3) || (method == 4) ||
(method == 5) || (method == 6) || (method == 7)||(method == 9))
partition = cutree(hc, k=j)
else
{
set.seed(1)
partition = kmeans(game,j)$cluster
}
}
if (any(indice==1)||(indice==2)||(indice==3)||(indice==4)||(indice==5)||(indice==6)||(indice==7)
||(indice==8)||(indice==9)||(indice==10)||(indice==11)||(indice==12)||(indice==13)||(indice==14)
||(indice==15)||(indice==16)||(indice==17)||(indice==18)||(indice==19)||(indice==20)
||(indice==21)||(indice==22)||(indice==23)||(indice==24)||(indice==25)||(indice==26)
||(indice==28)||(indice==30))
{
if (any(method == 1) || (method == 2) || (method == 3) || (method == 4) ||
(method == 5) || (method == 6) || (method == 7) || (method == 9))
partition = cutree(hc, k=bestnc)
else
{
set.seed(1)
partition = kmeans(game,bestnc)$cluster
}
}
######################### Summary results ############################
if ((indice == 14)|| (indice == 15)|| (indice == 16)|| (indice == 20)|| (indice == 31)|| (indice == 32))
{
results.final = list(All.index=res,All.CriticalValues=resCritical,Best.nc=resultats, Best.partition=partition)
}
if ((indice == 27)|| (indice == 29))
results.final = list(All.index=res)
if (any(indice==1)||(indice==2)||(indice==3)||(indice==4)||(indice==5)||(indice==6)||(indice==7)
||(indice==8)||(indice==9)||(indice==10)||(indice==11)||(indice==12)||(indice==13)
||(indice==17)||(indice==18)||(indice==19)||(indice==21)||(indice==22)||(indice==23)||(indice==24)
||(indice==25)||(indice==26)||(indice==28)||(indice==30))
results.final = list(All.index=res,Best.nc=resultats, Best.partition=partition)
return(results.final)
} |
7ff6b177188920c2b330ac6165d5af72b23d0a89 | 8639c881573e6892d8d64f6d734a2d81d8d72727 | /estimating_beta.R | efe5648c448d5ba9c5408d136f36835ca0f463ce | [] | no_license | thies/rfirststeps | 7f5fca4111b378356885c81fd7a186cf570a65d2 | c0a52e5e9f994e44ac39cd245a160a978fd34212 | refs/heads/master | 2022-03-03T09:17:41.028200 | 2019-10-04T11:46:15 | 2019-10-04T11:46:15 | 111,908,898 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,911 | r | estimating_beta.R | # download data from Dropbox
# (I obtained return indices from Datastream, put them on Dropbox for exercise)
monthly <- read.csv("https://www.dropbox.com/s/y632b0nyu8bhbts/returns%20monthly.csv?dl=1", as.is=TRUE)
weekly <- read.csv("https://www.dropbox.com/s/uvp2bmlzpuogxve/returns%20weekly.csv?dl=1", as.is=TRUE)
quarterly <- read.csv("https://www.dropbox.com/s/riwlfc3tmkvzzbe/returns%20quarterly.csv?dl=1", as.is=TRUE)
# have a first look at data
head(monthly)
# convert date from string to proper dates
monthly$date <- as.Date(monthly$date)
weekly$date <- as.Date(weekly$date)
# sumstats
summary(monthly)
summary(weekly)
# Plot line graph
plot(monthly$date, monthly$sp500, type="l")
plot(weekly$date, weekly$sp500, type="l", col="red")
# calculate monthly returns
# (take difference of logs)
# We really should work with excess returns,
# but I will skip this step in this example.
mret_sp500 <- diff(ts(log(monthly$sp500)))
mret_msft <- diff(ts(log(monthly$msft)))
plot(mret_sp500, mret_msft, col="red")
# estimate linear model, monthly
monthly_reg_msft <- lm(mret_msft~mret_sp500)
# have a look at coefficients of that model
summary(monthly_reg_msft)
# calculate weekly returns
# (take difference of logs)
wret_sp500 <- diff(ts(log(weekly$sp500)))
wret_msft <- diff(ts(log(weekly$msft)))
# estimate linear model, weekly
weekly_reg_msft <- lm(wret_msft~wret_sp500)
# have a look at coefficients of that model
summary(weekly_reg_msft)
# calculate quarterly returns
# (take difference of logs)
qret_sp500 <- diff(ts(log(quarterly$sp500)))
qret_msft <- diff(ts(log(quarterly$msft)))
# estimate linear model, quarterly
quarterly_reg_msft <- lm(qret_msft~qret_sp500)
# have a look at coefficients of that model
summary(quarterly_reg_msft)
# way nicer tables can be seen with stargazer library
library(stargazer)
stargazer(quarterly_reg_msft, monthly_reg_msft, weekly_reg_msft, type="text")
|
d5fbabfc62a82d5a5c1b685fd75bca715c38ea06 | fda540791ba58168598b8320571356a565f9faf1 | /libs/1_add_vars_sample.R | fd600d5c9652ee4f44d69b1a57aba7b865699a66 | [] | no_license | monzalo14/conciliacion | 5c3e1272090d3575552ab9b58b5b514ab9cfe58f | 5e4670ec32026a85f5bedd0f01decee1cec01394 | refs/heads/master | 2021-01-12T08:58:27.217523 | 2017-05-04T07:16:47 | 2017-05-04T07:16:47 | 76,738,998 | 1 | 3 | null | 2017-02-15T18:40:55 | 2016-12-17T18:05:28 | R | UTF-8 | R | false | false | 3,879 | r | 1_add_vars_sample.R | library(dplyr)
library(reshape2)
library(dummies)
library(lmtest)
library(gmodels)
library(sandwich)
base <- readRDS("../clean_data/sample.RDS")
razones <- list(
'WALMART' = c('WALMART', 'WAL MART'),
'COMERCIAL MEXICANA' = c('COMER', 'COMERCIAL MEXICANA',
'SUMESA', 'FRESKO'),
'ELEKTRA' = c('ELMEX', 'ELEKTRA', 'TEZONTLE'),
'SANBORNS' = c('SANBORN', 'SANBORNS'),
'MANPOWER' = c('MAN POWER', 'MANPOWER'),
'WINGS' = 'WINGS',
'VIPS'= 'VIPS',
'SUBURBIA' ='SUBURBIA',
'PALACIO DE HIERRO' = 'PALACIO DE HIERRO',
'CHEDRAUI' = 'CHEDRAUI',
'ATENTO' = 'ATENTO',
'OXXO' = 'OXXO',
# Gobierno
'QUITAR' = c('IMSS',
'INFONAVIT',
'INSTITUTO MEXICANO DEL SEGURO SOCIAL',
'INSTITUTO NACIONAL DEL FONDO PARA LA VIVIENDA',
'SHCP', 'SECRETARIA DE HACIENDA',
'GORDILLO',
'SNTE', 'SINDICATO NACIONAL DE TRABAJADORES DE LA EDUCACION',
'GOBIERNO DEL', 'DELEGACION POLITICA',
'CONSAR',
#Físicos
'QUIEN', 'RESULTE',
'AGUIRRE', 'KUTZ', 'ISAIAS', 'ESCARPITA')
)
# Funciones auxiliares para juntar razones sociales de empresas
single_match <- function(variable, expresion, nombre){
variable[grepl(expresion, variable)] <- nombre
variable
}
element_match <- function(elemento_lista, nombre, variable){
for (expresion in elemento_lista){
variable <- single_match(variable, expresion, nombre)
}
variable
}
limpia_razones <- function(lista, variable){
for (i in seq_along(lista)){
variable <- element_match(lista[[i]], names(lista)[i], variable)
}
variable[variable == 'QUITAR'] <- NA
as.character(variable)
}
base <- base %>%
mutate_at(vars(starts_with('nombre_d'), -nombre_despido), limpia_razones)
load('../data/top_demandados.RData')
top_dem <- function(x){
x %in% top_dems
}
base <- base %>%
mutate_at(vars(starts_with('nombre_d'), -nombre_despido), top_dem) %>%
mutate(top_demandado = select(., starts_with('nombre_d'), -nombre_despido) %>% rowSums())
base$top_demandado <- as.numeric(base$top_demandado>0)
# Dummy antigüedad mayor a 15 años
base_exp$prima_antig <- as.numeric(as.character(base_exp$prima_antig))
base_exp$antig_15 <- ifelse(base_exp$c_antiguedad>15, 1, 0)
###########################################################################################
trunca99 <- function(x){
cuantil99 <- quantile(x, .99, na.rm=T, type=1)
x [x>cuantil99] <- cuantil99
x
}
quita_negativos <- function(x){
x[x<0] <- 0
x
}
df_exp <- group_by(base_exp, modo_termino) %>%
mutate_each(funs(trunca99), liq_total, liq_total_tope, starts_with("c_")) %>%
data.frame(.) %>%
dummy.data.frame(names=c("junta")) %>%
mutate_each(., funs(quita_negativos), starts_with("c_"))
logs <- c("c_antiguedad", "c_indem")
suma <- function(x){x+1}
df_exp2 <- mutate_each(df_exp, funs(suma), one_of(logs)) %>%
mutate(., ln_c_antiguedad = log(c_antiguedad),
ln_c_indem = log(c_indem))
# Modificación 25/01/2017: para utilizarlos en el modelo de laudo, se agrupan a un dígito los giros de empresa
# 3, 4, 5, 6, 7, 8
# df_exp2$giro_3 <- df_exp2$giro_empresa31 + df_exp2$giro_empresa32 + df_exp2$giro_empresa33
# df_exp2$giro_3[df_exp2$giro_3>0] <- 1
# df_exp2$giro_4 <- df_exp2$giro_empresa43 + df_exp2$giro_empresa46 + df_exp2$giro_empresa48 +df_exp2$giro_empresa49
# df_exp2$giro_4[df_exp2$giro_4>0] <- 1
# df_exp2$giro_5 <- df_exp2$giro_empresa51 + df_exp2$giro_empresa52 + df_exp2$giro_empresa53 +
# df_exp2$giro_empresa54 + df_exp2$giro_empresa55 + df_exp2$giro_empresa56
# df_exp2$giro_5[df_exp2$giro_5>0] <- 1
# df_exp2$giro_6 <- df_exp2$giro_empresa61 + df_exp2$giro_empresa62 + df_exp2$giro_empresa64
# df_exp2$giro_6[df_exp2$giro_6>0] <- 1
# df_exp2$giro_7 <- df_exp2$giro_empresa71 + df_exp2$giro_empresa72
# df_exp2$giro_7[df_exp2$giro_7>0] <- 1
# df_exp2$giro_8 <- df_exp2$giro_empresa81
# df_exp2$giro_8[df_exp2$giro_8>0] <- 1
saveRDS(df_exp2, "../clean_data/sample_added.RDS") |
4a4691bad4b71c1247be116b0b7e8d7b518c1246 | 149933aff364f9ce3219bab2da4466af4ee729b7 | /new file.R | ad8c8334edde7f9c8797677c24aef60541c74ec5 | [] | no_license | molaksh/new | 71e3131fb0ba33294104aa0e02d4a4a91cfa4920 | a9d680fc0a27179e412c0b68e048d8a08066e7bc | refs/heads/master | 2021-07-08T08:08:02.071768 | 2017-10-04T21:12:02 | 2017-10-04T21:12:02 | 105,820,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12 | r | new file.R | 1+1
#change |
c392570794c8165f8eacf215173afb647a223c63 | 0329677920e29e68778c623b05bf4ca69e528c41 | /Part 1.2 - test/Interval Estimate/interval estimate.R | 598c6b456ce5eb60cbeaf49a54c7acff37430b7c | [] | no_license | celestialized/Machine-Learning | b2075b83139f66bc31c02c64cfe27dfbf19e4ab6 | df30af31f04d03d9796974daf82373436fb6460e | refs/heads/master | 2021-09-21T16:55:51.432572 | 2018-08-29T20:33:13 | 2018-08-29T20:33:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,078 | r | interval estimate.R | #--------------------------------------------
#Interval estimate
#--------------------------------------------
#Point Estimate of Population Mean
#---------------------------------------------
library(MASS)
Salary.survey = Salary_Data$Salary
mean(Salary.survey , na.rm=TRUE) # skip missing values
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#----------------------------------------------------
#Interval Estimate of Population Mean with Known Variance
#----------------------------------------------------
library(MASS)
#We first filter out missing values in survey$Height with the na.omit function, and save it in height.response.
height.response = na.omit(survey$Height)
#Then we compute the standard error of the mean.
n = length(height.response)
sigma = 9.48 # population standard deviation
sem = sigma/sqrt(n); sem # standard error of the mean
# the 95% confidence level would imply the 97.5th percentile of the normal distribution at the upper tail
#Therefore, z?????2 is given by qnorm(.975). We multiply it with the standard error of the mean sem and get the margin of error.
E = qnorm(.975)* sem; E # margin of error
#We then add it up with the sample mean, and find the confidence interval as told.
xbar = mean(height.response) # sample mean
xbar + c(-E, E)
#Alternative Solution
install.packages("TeachingDemos")
library(TeachingDemos) # load TeachingDemos package
z.test(Salary.survey, sd=sigma) #One Sample z???test
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Interval Estimate of Population Mean with Unknown Variance
#------------------------------------------------
library(MASS)
#We first filter out missing values in survey$Height with the na.omit function, and save it in height.response.
height.response = na.omit(survey$Height)
#Then we compute the sample standard deviation.
n = length(height.response)
s = sd(height.response) # sample standard deviation
SE = s/sqrt(n); SE # standard error estimate
# the 95% confidence level would imply the 97.5th percentile of the normal distribution at the upper tail
#Therefore, t?????2 is given by qt(.975, df=n-1). We multiply it with the standard error estimate SE and get the margin of error.
E = qt(.975, df=n-1)*SE; E # margin of error
#We then add it up with the sample mean, and find the confidence interval as told.
xbar = mean(height.response) # sample mean
xbar + c(-E, E)
#Alternative Solution
#we can apply the t.test function in the built-in stats package.
t.test(Salary.survey) #One Sample t???test
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Sampling Size of Population Mean
#-------------------------------------------------------------
#Assume the population standard deviation ?? of the student height in survey is 9.48. Find the sample size needed to achieve a 1.2 centimeters margin of error at 95% confidence leve
zstar = qnorm(.975)
sigma = 9.48
E = 1.2
zstar^2 * sigma^2/ E^2
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Point Estimate of Population Proportion
#-----------------------------------------------------------
#We first filter out missing values in survey$Sex with the na.omit function, and save it in gender.response.
library(MASS) # load the MASS package
gender.response = na.omit(survey$Sex)
n = length(gender.response) # valid responses count
#To find out the number of female students, we compare gender.response with the factor 'Female', and compute the sum. Dividing it by n gives the female student proportion in the sample survey.
k = sum(gender.response == "Female")
pbar = k/n; pba
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Interval Estimate of Population Proportion
#-----------------------------------------------------------
#Compute the margin of error and estimate interval for the female students proportion in survey at 95% confidence level.
#We first filter out missing values in survey$Sex with the na.omit function, and save it in gender.response.
library(MASS) # load the MASS package
gender.response = na.omit(survey$Sex)
n = length(gender.response) # valid responses count
#To find out the number of female students, we compare gender.response with the factor 'Female', and compute the sum. Dividing it by n gives the female student proportion in the sample survey.
k = sum(gender.response == "Female")
pbar = k/n; pba
#Then we estimate the standard error
SE = sqrt(pbar*(1-pbar)/n); SE # standard error
E = qnorm(.975)*SE; E # margin of error
#Combining it with the sample proportion, we obtain the confidence interval.
pbar + c(-E, E)
#Alternative Solution
prop.test(k, n) #1???sample proportions test without continuity correction
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Sampling Size of Population Proportion
#--------------------------------------------------
zstar = qnorm(.975)
p = 0.5
E = 0.05
zstar^2 * p * (1-p) / E^2
|
c2b9fbdf55f4dd6f9d4bef80ffbf3087f4b899b2 | e85887c76341d45a3829fc552d6c53536f32b719 | /R/trialr_simulate.R | f2badbf13940b68bb8c1157bf3a8ad9fde720901 | [] | no_license | brockk/trialr | fa8fd43ca43dc79911677ba42c8e50d88a2fc03d | 15fd90d3a779a61454baedcd517e2ce8bb301f92 | refs/heads/master | 2023-03-16T21:41:49.580277 | 2023-03-11T07:36:58 | 2023-03-11T07:36:58 | 69,753,350 | 39 | 13 | null | 2023-09-10T19:11:18 | 2016-10-01T17:39:28 | TeX | UTF-8 | R | false | false | 4,724 | r | trialr_simulate.R |
#' Run a simulation study.
#'
#' This function is a fairly flexible way of running simulation studies in
#' trialr, and beyond. It essentially uses delegates to perform this pattern:
#' \preformatted{
#' for i in 1:N:
#' data = get_data_func()
#' fit = fit_model_func(data)
#' if summarise_func is null:
#' sims[i] = fit
#' else
#' sims[i] = summarise_func(data, fit)
#' end
#' loop
#' return sims
#' }
#'
#' @param N integer, number of simulated iterations to run.
#' @param get_data_func Function that takes no parameters and returns a sampled
#' dataset to be analysed. I.e. the call signature is f().
#' @param fit_model_func Function that accepts the output of
#' \code{get_data_func} as the sole parameter and fits the model or performs the
#' analysis, returning an object of arbitrary type.
#' @param summarise_func Optional. If provided, this function should accept the
#' ouputs of \code{get_data_func} and \code{fit_model_func} as parameters 1 & 2
#' and perform some post-fit processing or simplification. The result of this
#' call is the output from iteration i. If omitted, the fit object from
#' \code{fit_model_func} is simply used as the output from iteration i.
#' @param num_logs Number of log messages to receive about progress. NULL to
#' suppress logging. E.g. if N=100 and num_logs=10, you will get log messages
#' when i=10, 20, 30, etc.
#' @param num_saves Number of interimittent saves to attempt. NULL to
#' suppress saving E.g. if N=100 and num_saves=10, the save_func delegate will
#' be called after iteration i=10, 20, 30, etc.
#' @param save_func Optional. Function that takes the interim list of simulated
#' objects as the sole parameter and saves them somehow. This, combined with
#' \code{num_saves}, allows periodic saving of in-progress results to avoid
#' complete data loss if the simulation study fails for some reason.
#'
#' @return \code{list} of length \code{N}. The items in the list are as returned
#' by \code{summarise_func} or \code{fit_model_func}.
#' @export
#'
#' @examples
#' get_data_func <- function() {
#' group_sizes <- rbinom(n = 5, size = 50, prob = c(0.1, 0.3, 0.3, 0.2, 0.1))
#' group_responses <- rbinom(n = 5, size = group_sizes,
#' prob = c(0.2, 0.5, 0.2, 0.2, 0.2))
#' list(
#' group_responses = group_responses, group_sizes = group_sizes,
#' mu_mean = gtools::logit(0.1), mu_sd = 1, tau_alpha = 2, tau_beta = 20
#' )
#' }
#' fit_model_func <- function(data) {
#' data <- append(data, list(refresh = 0))
#' do.call(stan_hierarchical_response_thall, args = data)
#' }
#' summarise_func <- function(data, fit) {
#' # Probability that estimate response rate exceeds 30%
#' unname(colMeans(as.data.frame(fit, 'prob_response') > 0.3))
#' }
#' \dontrun{
#' sims <- trialr_simulate(N = 20, get_data_func, fit_model_func, summarise_func)
#' # Posterior probabilities that the response rate in each cohort exceeds 30%:
#' do.call(rbind, sims)
#' # Cohorts are in columns; simulated iterations are in rows.
#' }
trialr_simulate <- function(N,
get_data_func,
fit_model_func,
summarise_func = NULL,
num_logs = 10,
num_saves = NULL,
save_func = NULL) {
# Check integrity of inputs
if(N != floor(N) | length(N) > 1 | N <= 0)
stop('N should be a single positive integer.')
# During which iterations should the simulation method log?
if(is.null(num_logs)) {
log_at_i <- N + 1 # i.e. never log
} else {
if(length(num_logs) > 1 | num_logs <= 0) {
log_at_i <- N + 1 # i.e. never log
} else {
if(num_logs >= N)
log_at_i <- 1:N
else
log_at_i <- 1:num_logs * floor(N / num_logs)
}
}
# During which iterations should the simulation method save progress?
if(is.null(num_saves)) {
save_at_i <- N + 1 # i.e. never save
} else {
if(length(num_saves) > 1 | num_saves <= 0) {
save_at_i <- N + 1 # i.e. never save
} else {
if(num_saves >= N)
save_at_i <- 1:N
else
save_at_i <- 1:num_saves * floor(N / num_saves)
}
}
sims <- list()
for(i in 1:N) {
# Log if needed
if(any(i == log_at_i))
print(paste0('Running iteration ', i, ' - ', Sys.time()))
# Sample:
data <- get_data_func()
# Fit:
fit <- fit_model_func(data)
# Stash:
if(is.null(summarise_func)) {
sims[[i]] <- fit
} else {
sims[[i]] <- summarise_func(data, fit)
}
# Save if needed, and possible:
if(any(i == save_at_i) & !is.null(save_func))
save_func(sims)
}
# Return
sims
}
|
5368219972ed59c358d72643bbaa97fc52386b4f | 5f0a682aac5e53706ba993be79a2fa1493b666f8 | /scripts_for_analysis/international_contribution.r | b449d1463df5725f83ecd3a8cbfc580e8964889f | [] | no_license | adcs-conference/adcs_adulthood | 876503d507f9884a637699a5b8f881bc9169e115 | c1f77ac96e817e6b717794b2a01c3855d945623d | refs/heads/master | 2021-09-19T05:57:32.550448 | 2018-07-24T03:42:01 | 2018-07-24T03:42:01 | 13,317,748 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,015 | r | international_contribution.r | setwd("/Users/bevan/phd/QISharedFolder/ADCS2013Impact/analysis")
countries = read.table("international_contribution.txt", header=T)
# # old portion
# pdf("../latex/figures/international_contributions_portion.pdf", width=4, height=2)
# par(mar=c(3,3.5,0,0))
# plot(countries$Year, countries$International, cex=0.8, type="o", xlim=c(1996, 2012), ylim=c(0,0.5), ylab="", xaxt="n", yaxt="n", xlab="")
# #title(ylab="Portion of non-Aus. Authors", cex.lab=0.7)
# mtext(side=2, "Portion of non-Aus. Authors", line=2.5, cex=0.7)
# axis(side=1, at=c(1996:2012), las=2, cex.axis=0.7)
# axis(side=2, at=c(0.0, 0.1,0.2,0.3,0.4,0.5), las=1, cex.axis=0.7, lab=c("0%", "10%","30%","30%","40%","50%"), las=TRUE)
# abline(h=mean(countries$International), col = "black", lty=2)
# dev.off()
pdf("../latex/figures/international_contributions_exNZ.pdf", width=4, height=5.83)
par(mar=c(9.15,4,0.5,0))
barplot(rbind(countries$Int,countries$NZ,countries$Oz),ylim=c(0,71), beside=FALSE,legend=TRUE, las=3, names=paste(countries$Location," ", countries$Year), pch=4, col=c("darkgray", "white", "lightgrey"), ylab="Number of authors")
abline(h=mean(countries$Oz))
abline(h=mean(countries$Int), lty=2)
abline(h=mean(countries$NZ), lty=3)
#legend("topleft", legend=c("Aus.", "non-Aus.","Mean Aus.","Mean non-Aus."), horiz=F, border=c("black","black", NA, NA), fill=c("gray", "white", NA, NA), bty="n", cex=0.9, lty=c(NA,NA,1,2))
legend("topleft", legend=c("Australian", "NZ", "International"), fill=c("lightgray", "white", "darkgrey"), bty="n", cex=0.85)
legend("topright", legend=c("Mean Australian", "Mean NZ", "Mean International"), lty=c(1,3,2), bty="n", cex=0.85)
dev.off()
pdf("../latex/figures/international_contributions.pdf", width=8, height=6)
par(mar=c(9.15,4,0.5,0))
barplot(rbind(countries$Int,countries$NZ,countries$Oz),ylim=c(0,71), beside=FALSE,legend=TRUE, las=3, names=paste(countries$Location," ", countries$Year), pch=4, col=c("darkgray", "white", "lightgrey"), ylab="Number of authors")
abline(h=mean(countries$Oz))
abline(h=mean(countries$Int), lty=2)
abline(h=mean(countries$NZ), lty=3)
#legend("topleft", legend=c("Aus.", "non-Aus.","Mean Aus.","Mean non-Aus."), horiz=F, border=c("black","black", NA, NA), fill=c("gray", "white", NA, NA), bty="n", cex=0.9, lty=c(NA,NA,1,2))
legend("topleft", legend=c("Australian", "NZ", "International"), fill=c("lightgray", "white", "darkgrey"), bty="n", cex=0.85)
legend("topright", legend=c("Mean Australian", "Mean NZ", "Mean International"), lty=c(1,3,2), bty="n", cex=0.85)
dev.off()
pdf("../../../papers/adcs2013-impact/presentation/authors_by_year.pdf", width=8, height=6)
par(mar=c(9.15,4,0.5,0))
barplot((countries$Int+countries$NZ+countries$Oz),ylim=c(0,71), beside=FALSE,legend=TRUE, las=3, names=paste(countries$Location," ", countries$Year), pch=4, col=c("white"), ylab="Number of authors")
abline(h=mean(countries$Oz))
dev.off()
mean(countries$Oz)
mean(countries$NZ + countries$Int)
sd(countries$NZ + countries$Int)
mean(countries$Int)
sd(countries$Int)
|
47c8fd31408cf7d8ba51c464025c21b4dd57124a | c91c755b8a7fa29e55afd9822e138ca2c91c4d1b | /DS_project.R | baf71d648c54b31c4554a345ea1e06ee2a6b378d | [] | no_license | datasciences1/aact | d6017be4b81d6864cac055cf9e1a8a075894b8eb | 8a5c80a20fe68dd6659c8c451e9d85c2874c46b9 | refs/heads/master | 2020-03-06T22:35:03.564257 | 2018-08-03T08:16:28 | 2018-08-03T08:16:28 | 127,106,922 | 0 | 1 | null | null | null | null | WINDOWS-1258 | R | false | false | 15,394 | r | DS_project.R | ########################################################
##
## parse à website
##
########################################################
library(rvest)
# https://clinicaltrials.gov/ct2/show?cond=cancer&rank=7
# https://clinicaltrials.gov/ct2/show?rank=1
#########################################################################################
# Retrieve the first page in order to get the number of studies
file <- "https://clinicaltrials.gov/ct2/show?rank=1"
page <- read_html(file)
# Number of pages to retrieve
code <- html_nodes(page,xpath = '//*[@class="results-summary"]')
n <- html_text(code, trim = TRUE)
start <- gregexpr(pattern ='of',n)
end <- gregexpr(pattern ='for',n)
nb <- as.numeric(substr(n,start[[1]][1]+3, end[[1]][1]-1))
# List of websites to scrap
# boucle de 1 a nb à stocker dans un tableau
code <- html_nodes(page,xpath = '//*[@id="sponsor"]')
sponsor <- html_text(code, trim = TRUE)
code <- html_nodes(page,xpath = '//*[@class="body3 indent2"]')
desc <- html_text(code, trim = TRUE)
code <- html_nodes(page,xpath = '//*[@headers="studyInfoColData"]')
design <- html_text(code, trim = TRUE)
########################################################
##
## parse json file: Open FDA org
##
########################################################
#install.packages("rjson")
suppressWarnings(library("rjson"))
setwd("D:/4. learning/05. STAT/04. DS-Training/1. Project/")
json_file <- "drug-event-0008-of-0008.json"
json_data <- fromJSON(paste(readLines(json_file), collapse=""))
## Number of event in the file
length(json_data$results)
## Number of drug per event
# for(i in 1:length(json_data$results)) print(json_data$results[[i]]$patient$drug)
product <- rep( list(list()), length(json_data$results) )
reaction <- rep( list(list()), length(json_data$results) )
weight <- rep( list(list()), length(json_data$results) )
sex <- rep( list(list()), length(json_data$results) )
company <- rep( list(list()), length(json_data$results) )
#name <- list()
#subst <- list()
## Number of sponsor per drug per event
for(i in 1:length(json_data$results)){
for (j in 1:length(json_data$results[[i]]$patient$reaction)){
reaction[[i]][j] <- json_data$results[[i]]$patient$reaction[[j]]$reactionmeddrapt
weight[[i]][j] <- json_data$results[[i]]$patient$patientweight
sex[[i]][j] <- json_data$results[[i]]$patient$patientsex
}
for (k in 1:length(json_data$results[[i]]$patient$drug)) product[[i]][k] <- json_data$results[[i]]$patient$drug[[k]]$medicinalproduct
}
########################################################
##
## Kaggle AE
##
########################################################
setwd("E:/4. learning/05. STAT/04. DS-Training/1. Project/adverse-pharmaceuticals-events/aeolus_v1")
require(data.table)
library(sqldf)
concept <- fread("concept.tsv")
voca <- fread("vocabulary.tsv")
voca
################################################################################################################################################################################################################################
################################################################################################################################################################################################################################
########################################################
##
## AACT clinical data base query
##
########################################################
#install.packages("RPostgreSQL")
suppressWarnings(library(RPostgreSQL))
drv <- dbDriver('PostgreSQL')
con <- dbConnect(drv, dbname="aact",host="aact-db.ctti-clinicaltrials.org", port=5432, user="aact", password="aact" )
aact_sample <- dbGetQuery(con, "select * from studies where nct_id='NCT00920790'")
dbGetQuery(con, "select nct_id, study_type, official_title, overall_status, phase, source, has_dmc from studies where lower(source) like '%sanofi%'")
dbGetQuery(con, "select count(*) from studies")
dbGetQuery(con, "select nct_id, title, units, param_type, param_value, dispersion_type from outcome_measurements where nct_id='NCT00920790'")
#***************************************************************************************************************************************************
#***************************************************************************************************************************************************
#***************************************************************************************************************************************************
#***************************************************************************************************************************************************
#From txt file uploaded
setwd('D:/4. learning/05. STAT/04. DS-Training/0. Project/database/20170811_pipe-delimited-export')
#setwd('E:/4. learning/05. STAT/04. DS-Training/0. Project/database/20170811_pipe-delimited-export')
suppressWarnings(library(data.table))
suppressWarnings(library(readxl))
file <- "0.summary.xlsx"
tbl <- read_excel(path=file, col_names= TRUE, na="", n_max= Inf)
aact <- data.frame(name = tbl[which(tbl$Status=='Y'),]$Name,column = tbl[which(tbl$Status=='Y'),]$Columns)
for(i in 1:nrow(aact)){
nm <- paste("'",aact$name[i],"'",sep="")
input <- paste("'",aact$name[i],".txt'",sep="")
xpr <- paste("c('",gsub(",","','",gsub(" ","",aact$column[i])),"')",sep="")
nm <- eval(parse(text=nm))
input<- eval(parse(text=input))
col <- eval(parse(text=xpr))
assign(nm, fread(input, header=TRUE, select= col ))
}
#for(i in 1:length(list.files())){
# assign(substr(list.files()[i],1,nchar(list.files()[i])-4), fread(list.files()[i]))
#}
########################################################
## SQLLite DB creation
########################################################
# creation and connection to the sqlite db
library(sqldf)
db <- dbConnect(SQLite(), dbname="aact.sqlite")
#dbSendQuery(conn = db, "CREATE TABLE baseline_counts(nct_id TEXT, count INTEGER)")
dbWriteTable(conn = db, name = "baseline_counts", value = baseline_counts, row.names = FALSE)
dbWriteTable(conn = db, name = "baseline_measurements", value = baseline_measurements, row.names = FALSE)
dbWriteTable(conn = db, name = "calculated_values", value = calculated_values, row.names = FALSE)
dbWriteTable(conn = db, name = "conditions", value = conditions, row.names = FALSE)
dbWriteTable(conn = db, name = "countries", value = countries, row.names = FALSE)
dbWriteTable(conn = db, name = "design_group_interventions", value = design_group_interventions, row.names = FALSE)
dbWriteTable(conn = db, name = "design_groups", value = design_groups, row.names = FALSE)
dbWriteTable(conn = db, name = "designs", value = designs, row.names = FALSE)
dbWriteTable(conn = db, name = "drop_withdrawals", value = drop_withdrawals, row.names = FALSE)
dbWriteTable(conn = db, name = "eligibilities", value = eligibilities, row.names = FALSE)
dbWriteTable(conn = db, name = "intervention_other_names", value = intervention_other_names, row.names = FALSE)
dbWriteTable(conn = db, name = "interventions", value = interventions, row.names = FALSE)
dbWriteTable(conn = db, name = "milestones", value = milestones, row.names = FALSE)
dbWriteTable(conn = db, name = "outcome_counts", value = outcome_counts, row.names = FALSE)
dbWriteTable(conn = db, name = "outcome_measurements", value = outcome_measurements, row.names = FALSE)
dbWriteTable(conn = db, name = "outcomes", value = outcomes, row.names = FALSE)
dbWriteTable(conn = db, name = "reported_events", value = reported_events, row.names = FALSE)
dbWriteTable(conn = db, name = "result_groups", value = result_groups, row.names = FALSE)
dbWriteTable(conn = db, name = "sponsors", value = sponsors, row.names = FALSE)
dbWriteTable(conn = db, name = "studies", value = studies, row.names = FALSE)
## Display on a map
# install.packages("ggmap")
# install.packages("maptools")
# install.packages("maps")
suppressWarnings(library(ggmap))
suppressWarnings(library(maptools))
suppressWarnings(library(ggplot2))
suppressWarnings(library(maps))
suppressWarnings(library(sqldf))
# Regroup countries
cntrs <- sqldf("select nct_id,
case when name in ('American Samoa ','Guam','Northern Mariana Islands','United States','United States Minor Outlying Islands','Virgin Islands (U.S.)') then 'USA'
when name in ('Aruba','Netherlands Antilles') then 'Netherland'
when name in ('Bermuda','Cayman Islands','Gibraltar','Montserrat','United Kingdom') then 'UK'
when name in ('Congo, The Democratic Republic of the','The Democratic Republic of the Congo') then 'Congo'
when name in ('Czechia','Czech Republic') then 'Congo'
when name in ('Faroe Islands','Greenland') then 'Denmark'
when name in ('French Polynesia','Guadeloupe','Martinique','New Caledonia','Réunion') then 'France'
when name in ('Holy See (Vatican City State)') then 'Vatican'
when name in ('Hong Kong','Macau','Taiwan') then 'China'
when name like '%Korea%' then 'North Korea'
when name in ('Former Yugoslavia and Macedonia','The Former Yugoslav Republic of') then 'Macedonia'
when name in ('Federated States of Micronesia','Nauru','Palau') then 'Macedonia'
when name in ('Palestinian Territories','Occupied and Palestinian Territory, Occupied') then 'Palestine'
when name in ('Syrian Arab Republic') then 'Syria'
when name in ('Russian Federation') then 'Russia'
else name end as country
from countries ")
mdat <- map_data('world')
ct <- as.data.frame(table(cntrs$country))
colnames(ct) <- c("country", "value")
mpdt <- sqldf("select m.region, avg(m.lat) as lat, avg(m.long) as long from mdat as m group by region")
dat <- sqldf("select c.*, m.lat, m.long from ct as c left join mpdt as m on c.country=m.region ")
ggplot() +
geom_polygon(dat=mdat, aes(long, lat, group=group), fill="blue") +
geom_point(data=dat,
aes(y=lat,x=long, map_id=country, size=value), col="red")
##############################################################################################
##############################################################################################
map("world", fill=TRUE, col="white", bg="lightblue", ylim=c(-120, 120), mar=c(0,0,0,0))
points(ct$Var1[1], cex = ct$Freq[1], pch = 20)
## start a model based on 5 variables (sponsor, nb on countries, in criteria, ex criteria, therapeutic area)
#derive in study ==> variable outcome as such
# 3 - completed ["Completed", "Approved for marketing"]
# 2 - ongoing ["Recruiting", "Not yet recruiting", "Active, not recruiting","Enrolling by invitation","Suspended"]
# 1 - unknown ["Unknown status", "Available","No longer available","Temporarily not available"]
# 0 - not completed ["Terminated", "Withdrawn"]
suppressWarnings(library(sqldf))
trial <- sqldf("select nct_id, study_type, overall_status, phase, number_of_arms, has_dmc,
case when overall_status in ('Terminated', 'Withdrawn') then 0
when overall_status in ('Approved for marketing','Completed') then 1
when overall_status in ('Recruiting', 'Not yet recruiting', 'Active, not recruiting','Enrolling by invitation','Suspended') then 2
when overall_status in ('Unknown status', 'Available','No longer available','Temporarily not available','') then 3 end as outcome,
case when study_type in ('Expanded Access') then 1
when study_type in ('Interventional') then 2
when study_type in ('Observational', 'Observational [Patient Registry]') then 3
when study_type in ('Unknown','') then 4 end as type
from studies ")
region <- sqldf("select nct_id, count(*) as nb_rg from cntrs group by nct_id")
trial <- sqldf("select t.*, r.nb_rg from trial as t left join region as r on t.nct_id = r.nct_id ")
ae <- sqldf("select nct_id, count(*) as nb_ae from reported_events group by nct_id")
trial <- sqldf("select t.*, a.nb_ae from trial as t left join ae as a on t.nct_id = a.nct_id ")
trial <- sqldf("select *
from trial
where outcome is not null and nb_rg is not null and nb_ae is not null and outcome in (0,1)")
# studies
# nct_id, study_type, overall_status, phase, number_of_arms, has_dmc
table(trial$outcome,useNA = "always")
table(trial$type,useNA = "always")
# Calculation nb of inclusion criteria and exclusion criteria
temp <- strsplit(eligibilities$criteria,split = 'Exclusion Criteria:')
set.seed(123)
library(caret)
trainnew<-createDataPartition(y=trial$outcome,p = .8,list = F)
rms_train <- trial[trainnew,] #80% training set
rms_test <- trial[-trainnew,] #20% test set
dim(rms_train); dim(rms_test)
## Logistic Regression
rms_fit <- glm(formula =outcome~type+nb_rg+nb_ae,data=rms_train)
summary(rms_fit)
## Random Forest
library(randomForest)
set.seed(123)
model.rf <- randomForest(y=rms_train$outcome,x=rms_train[,c("type","nb_rg","nb_ae")],importance=T,keep.forest = T)
plot(model.rf)
legend("topright",colnames(model.rf$err.rate),col=1:3,fill=1:3)
varImpPlot(model.rf,scale = F)
pred_cart<-predict(model.rf,rms_train)
c<-confusionMatrix(table(pred_cart,rms_train$outcome))
c
acc<-round(c$overall[[1]]*100,2)
test_set$Survived<-predict(model.rf,test_set)
|
dc7c56f0d45ed49da717c1451a3ccfdb21b1dea0 | 3c322eea1368ca18679f75255eae906a3b78b3f1 | /Analysis/multiplots.R | 6473cb79e177471c3fa1264d6b02f954de7924b8 | [
"BSD-2-Clause"
] | permissive | jjhartmann/pullreqs | 2b2cc9e3b4b4b6ee22ed87b90449558f4da0ca1d | 33b46b96b5d0f039b92d7c954266bc768c017b1d | refs/heads/master | 2021-05-07T21:33:32.926311 | 2017-12-21T02:06:51 | 2017-12-21T02:06:51 | 109,027,870 | 0 | 0 | null | 2017-10-31T17:08:56 | 2017-10-31T17:08:56 | null | UTF-8 | R | false | false | 2,812 | r | multiplots.R | library(ggplot2)
library(grid)
library(reshape)
library(ellipse)
source("variables.R")
# Plot a list of plots using n columns
multiplot <- function(plots, cols=1, title = "") {
require(grid)
numPlots = length(plots)
if (numPlots == 0){
print("WARN: No items to be plotted")
return
}
print(sprintf("Plotting %d plots using %d columns", numPlots, cols))
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
grid.text(title, gp=gpar(fontsize=12, col="grey"))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# Plot a histogram for var. It must exist in dataframe data.
plot.histogram <- function(data, var, title = var)
{
print(sprintf("Plotting histogram for column %s", title))
# Introduce some minor difference so that the data
# gets correctly assigned to bins and plotted
if (diff(range(data[var])) == 0) {
print(sprintf("Fixing range for column %s", title))
data[var][1,] <- 1
}
p <- ggplot(data, aes_string(x = var))
p <- p + geom_histogram(colour="black", fill="white") + scale_x_log10()
p <- p + ggtitle(title)
p
}
# Plot histograms for all files/variables combinations in the provided dataframe
# Each entry in the dataframe is expected to have equal amount of columns
# Specify columns to print as a vector of column indices (c(4:7, 8)).
# By default, all columns are being printed
plot.hist.all_dataframes <- function(dfs , columns = NULL)
{
if (! is.vector(columns)) {
stop(print("Inputs columns is not a vector"))
}
cols <- colnames(dfs[[1]])[columns]
print(sprintf("Plotting %s columns", length(cols)))
# Create a plot per variable name for all dataframes
lapply(cols, function(x){
print(sprintf("Plotting column %s", x))
items <- Filter(function(y){ x %in% colnames(y)}, dfs)
multiplot(lapply(items, function(z){
print(sprintf("Plotting project %s -> %s", z$project_name[[1]], x))
plot.histogram(z, x, title = z$project_name[[1]])}),
2, title = x)
})
}
# Plot a multi-correlation plot for each dataframe
plot.multicor.all_dataframes <- function(dataframes, columns = NUL)
{
for (i in 1:length(dataframes)) {
plot.multicor(dataframes[[i]][5:18], as.character(dataframes[[i]]$project_name[1]))
}
}
|
12088dddbc556ad122dc7ccab969342d036451ff | 1428190ede591943b559046b978a02a475197d96 | /man/get_colormind_colors.Rd | 6e65c55a3182a7b37114af05439146741050fb4b | [
"MIT"
] | permissive | MattiasHenders/colormind | dadf38423a781a37a54e16e976474bfe6065edf7 | a4c96c1a65afd95d9990f67c9bdcfd54a7b634e8 | refs/heads/master | 2023-03-17T14:45:25.893637 | 2020-02-18T13:00:43 | 2020-02-18T13:00:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,096 | rd | get_colormind_colors.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.R
\name{get_colormind_colors}
\alias{get_colormind_colors}
\title{Retrieve color palette from colormind, give suggestions}
\usage{
get_colormind_colors(
x = NULL,
model = "default",
user_agent = "colormind R package github.com/dmi3kno/colormind",
verbose = TRUE,
...
)
}
\arguments{
\item{x}{character vector of valid colors in hex or string (color names) format,
which should serve as starting values for generated palette. Can contain missing values.
Maximum length of vector is 5.}
\item{model}{one of color models returned by `get_colormind_models()`. Default value is "default".}
\item{user_agent}{Optional user agent string to be used when querying colormind.io}
\item{verbose}{optional argument for suppressing messages. Default is `TRUE`}
\item{...}{other parameters passed to `config` argument of `httr::POST`}
}
\value{
vector of colors in hex format
}
\description{
Retrieve color palette from colormind, give suggestions
}
\examples{
if(curl::has_internet())
get_colormind_colors()
}
|
f6f2385e0cceccc05e56cc5124ae798cdfe46830 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/analysisPipelines/examples/univarCatDistPlots.Rd.R | 97b5330b4a79b19847289e179f1befee0c7ad3be | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 208 | r | univarCatDistPlots.Rd.R | library(analysisPipelines)
### Name: univarCatDistPlots
### Title: Univariate Categoric Distribution
### Aliases: univarCatDistPlots
### ** Examples
univarCatDistPlots(data = iris, uniCol = "Species")
|
e123e7cc09760f665d1214689bd86c17fb114926 | f14e3a8823d00a12c25493ff12a7303c8f8fe305 | /man/plot.trim.totals.Rd | f10761dad392f94865b1fe483967869e73245f47 | [] | no_license | cran/rtrim | 6565c068369efec0e9867b5fe397a641eb859638 | 80239e3f7cbeb66b9540284eed9fa1bd946d4666 | refs/heads/master | 2020-06-17T16:12:35.734973 | 2020-04-21T11:20:02 | 2020-04-21T11:20:02 | 74,989,195 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,967 | rd | plot.trim.totals.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trim_post.R
\name{plot.trim.totals}
\alias{plot.trim.totals}
\title{Plot time-totals from trim output.}
\usage{
\method{plot}{trim.totals}(
x,
...,
names = NULL,
xlab = "auto",
ylab = "Time totals",
leg.pos = "topleft",
band = "se"
)
}
\arguments{
\item{x}{an object of class \code{trim.totals}, as resulting from e.g. a call to \code{totals}.}
\item{...}{optional additional \code{trim.totals} objects.}
\item{names}{optional character vector with names for the various series.}
\item{xlab}{x-axis label. The default value of "auto" will be changed into "Year" or "Time Point", whichever is more appropriate.}
\item{ylab}{y-axis label.}
\item{leg.pos}{legend position, similar as in \code{\link[graphics]{legend}}.}
\item{band}{Defines if the uncertainty band will be plotted using standard errors ("se") or confidence intervals ("ci").}
}
\description{
This function plots a time series of one or more \code{trim.totals} objects, i.e. the output of \code{totals}.
Both the time totals themselves, as the associated standard errros will be plotted,
the former as a solid line with markers, the latter as a transparent band.
}
\details{
Additionally, the observed counts will be plotted (as a line) when this was asked for in the call to \code{totals}.
Multiple time-total data sets can be compared in a single plot
}
\examples{
# Simple example
data(skylark2)
z <- trim(count ~ site + year, data=skylark2, model=3)
plot(totals(z))
# Extended example
z1 <- trim(count ~ site + year + habitat, data=skylark2, model=3)
z2 <- trim(count ~ site + year, data=skylark2, model=3)
t1 <- totals(z1, obs=TRUE)
t2 <- totals(z2, obs=TRUE)
plot(t1, t2, names=c("with covariates", "without covariates"), main="Skylark", leg.pos="bottom")
}
\seealso{
Other graphical post-processing:
\code{\link{heatmap}()},
\code{\link{plot.trim.index}()}
}
\concept{graphical post-processing}
|
d461b26ac659d026d3bf044611e593b62cb36f1e | bc83df3b565b2d2d54b833f530f81f339f9af4c1 | /Cleaning data in R/script.r | 21da64b37d5dcf45a6603488570c0fd618f92c0e | [] | no_license | abhiru-wije/Datacamp---Data-anayltics-with-R | 8742fe358a56092c3dc35a0aef29d9b306e72164 | 7bbd55a68acfc4a889e69b14714103317820a581 | refs/heads/master | 2023-08-07T00:54:06.122488 | 2021-10-06T23:00:13 | 2021-10-06T23:00:13 | 401,614,700 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,084 | r | script.r | #1
# Glimpse at bike_share_rides
glimpse(bike_share_rides)
# Summary of user_birth_year
summary(bike_share_rides$user_birth_year)
#2
# Glimpse at bike_share_rides
glimpse(bike_share_rides)
# Summary of user_birth_year
summary(bike_share_rides$user_birth_year)
# Convert user_birth_year to factor: user_birth_year_fct
bike_share_rides <- bike_share_rides %>%
mutate(user_birth_year_fct = as.factor(user_birth_year))
# Assert user_birth_year_fct is a factor
assert_is_factor(bike_share_rides$user_birth_year_fct)
# Summary of user_birth_year_fct
summary(bike_share_rides$user_birth_year_fct)
#3
bike_share_rides <- bike_share_rides %>%
# Remove 'minutes' from duration: duration_trimmed
mutate(duration_trimmed = str_remove(duration, "minutes"),
# Convert duration_trimmed to numeric: duration_mins
duration_mins = as.numeric(duration_trimmed))
# Glimpse at bike_share_rides
glimpse(bike_share_rides)
# Assert duration_mins is numeric
assert_is_numeric(bike_share_rides$duration_mins)
# Calculate mean duration
mean(bike_share_rides$duration_mins)
#4
|
bf95b8fb67c3cabba4091a0a89550e83cbb82c54 | 24470f3a17b5631d52db3debbfb72c7061e7a8a9 | /R/canberraDistanceW.details.R | 4a873cbd4fb113a93ce8e4e6b216fb5bce8c91d3 | [] | no_license | cran/LearnClust | 008141f99f7e0903c36a410ac0be6d806dff1144 | 10cd7f8a8a1e9d7321228380fe57692942fad9aa | refs/heads/master | 2023-01-20T17:05:52.307115 | 2020-11-29T21:50:02 | 2020-11-29T21:50:02 | 300,194,440 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,205 | r | canberraDistanceW.details.R | #' @title To calculate the Canberra distance applying weights .
#' @description To explain how to calculate the Canberra distance between clusters applying weights given.
#' @param cluster1 is a cluster.
#' @param cluster2 is a cluster.
#' @param weight is a numeric vector.
#' @details The function calculates the Canberra distance value from \code{cluster1} and \code{cluster2}, applying weights to the cluster's components.
#' @author Roberto Alcántara \email{roberto.alcantara@@edu.uah.es}
#' @author Juan José Cuadrado \email{jjcg@@uah.es}
#' @author Universidad de Alcalá de Henares
#' @return canberra distance applying weights value. Explanation.
#' @examples
#'
#' cluster1 <- matrix(c(1,2),ncol=2)
#' cluster2 <- matrix(c(1,3),ncol=2)
#'
#' weight1 <- c(0.4,0.6)
#' weight2 <- c(2,12)
#'
#' canberradistanceW.details(cluster1,cluster2,weight1)
#'
#' canberradistanceW.details(cluster1,cluster2,weight2)
#'
#' @export
canberradistanceW.details <- function(cluster1,cluster2,weight){
message("\n This function calculates the canberra distance applying some weight to each element in the clusters.\n")
message("\n It allows the algorithm to use some categories more importante than the others. \n")
res <- 0
if(is.null(weight)){
message("\n Due to there is not weight, the formula does not change. \n")
buffer <- 0
for (index in c(1:ncol(cluster1))) {
if((abs(cluster2[index]) + abs(cluster1[index])) == 0){
aux <- 0
} else {
aux <- (abs((cluster2[index] - cluster1[index]))/(abs(cluster2[index]) + abs(cluster1[index])))
}
buffer <- buffer + aux
}
res <- buffer
} else {
message("\n Due to there is weight, the formula has to multiply values by each weight. \n")
buffer <- 0
for (index in c(1:ncol(cluster1))) {
if((abs(cluster2[index]) + abs(cluster1[index])) == 0){
aux <- 0
} else {
aux <- (abs((cluster2[index] - cluster1[index]))/(abs(cluster2[index]) + abs(cluster1[index])))
}
buffer <- buffer + (weight[[index]] * aux)
}
res <- buffer
}
message("\n Canberra distance is ", res," \n")
res
}
|
7b710b8308ee056fa1278a309c78fc30d7cdfc02 | 1da1269745b6ce6806ffd7a15668fc27470cd921 | /R/tri_tri_submission_sic.R | eaafffbadbcc85a8d071dcf04c03a05618a58273 | [] | no_license | markwh/envirofacts | d0c3bb7495060fd00b825c1e72602479f8a92b72 | 815ba95808a37f552d9a7041be532817e4766b90 | refs/heads/master | 2021-01-10T07:14:32.874354 | 2019-03-27T02:28:15 | 2019-03-27T02:28:15 | 50,798,175 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 723 | r | tri_tri_submission_sic.R | #' Retrieve tri submission sic data from tri database
#'
#' @param TRI_FACILITY_ID e.g. '00602BXTRF111CO'. See Details.
#' @param DOC_CTRL_NUM e.g. '1388025052414'. See Details.
#' @param SIC_CODE e.g. 'NA'. See Details.
#' @param SIC_SEQUENCE_NUM e.g. '2'. See Details.
#' @param PRIMARY_IND e.g. '0'. See Details.
#' @export
tri_tri_submission_sic <- function(TRI_FACILITY_ID = NULL, DOC_CTRL_NUM = NULL, SIC_CODE = NULL,
SIC_SEQUENCE_NUM = NULL, PRIMARY_IND = NULL) {
args <- list(TRI_FACILITY_ID = TRI_FACILITY_ID, DOC_CTRL_NUM = DOC_CTRL_NUM,
SIC_CODE = SIC_CODE, SIC_SEQUENCE_NUM = SIC_SEQUENCE_NUM, PRIMARY_IND = PRIMARY_IND)
ret <- envir_get("tri_submission_sic", args)
ret
}
|
4db6fba92ce9f746108429d560039e59cc536000 | a57b7b949fa6dba0b7d6f56cbe45c0ad46481537 | /R/timestamp.R | 1399c84178a05c9748422e00800f0ee3800e20b5 | [
"MIT"
] | permissive | davebraze/FDButils | 12cf914b5bb04dd71f4ea60986650e4b608a083d | 7a3c5bb9cd5ef6dbe4dcd522eb1e1772f71bff46 | refs/heads/master | 2023-03-12T02:14:19.146630 | 2023-02-20T19:52:18 | 2023-02-20T19:52:18 | 57,219,505 | 1 | 2 | MIT | 2017-12-28T14:59:29 | 2016-04-27T14:23:14 | R | UTF-8 | R | false | false | 554 | r | timestamp.R | ##' @title Get a human readable timestamp as string.
##'
##' @description
##' Return a human readable timestamp as a string: 'YYYYMMDD-HHMMSS'.
##'
##' @details
##' Convenience function to get a human readable timestamp string: 'YYYYMMDD-HHMMSS'.
##' This is mostly useful for including in filenames.
##'
##' @return A string corresponding to a human readable timestamp: 'YYYYMMDD-HHMMSS'.
##' @author David Braze \email{davebraze@@gmail.com}
##' @export
##' @examples
##' timestamp()
timestamp <- function() {
format(Sys.time(), "%Y%m%d-%H%M%OS")
}
|
2820f35573b4e9c22d476b2f71fe29588384beb1 | 7fbe3abebe3ced7f19cb2af802a6944ce592bd7f | /Problem set 3.R | d68d8518097bf5b9e7560997918efc7cf741c08c | [] | no_license | erikpark/S670 | 254f493c428bc8bc5d9119e76432efe88dd58ab5 | 9b3a24f24114a898408f643994dc8c2253b60194 | refs/heads/master | 2021-08-24T02:17:32.989845 | 2017-12-07T15:53:53 | 2017-12-07T15:53:53 | 113,470,852 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 691 | r | Problem set 3.R | problemset3 <- read.table("/media/removable/USB Drive/S670/s670-ps3-data.txt", head=TRUE)
library(ggplot2)
summary(problemset3)
head(problemset3)
plot1 <- ggplot(problemset3, aes(x = x, y = (y1)^-1)) + geom_point()
plot1
plot1 + geom_smooth()
plot1 + geom_smooth(method = "lm", formula = y ~ x + I(x^2))
plot1 + geom_smooth(method.args=list(degree=1))
#Bingo
plot2 <- ggplot(problemset3, aes(x = x, y = log10(y2))) + geom_point()
plot2
plot2 +geom_smooth(method = "lm")
plot2 + geom_smooth(method.args=list(degree = 1, family = "symmetric"))
#Bingo
plot3 <- ggplot(problemset3, aes(x = x, y = y3)) + geom_point()
plot3
plot3 + geom_smooth()
plot3 + geom_smooth(span=.1)
#Bingo |
e10732679f86b41a9b1fca4f28f4a69972f4e8ad | da7981b05be3dfc035edab35c536c2dfd8ec4801 | /cycle_stage_plots/app.R | 900008e80c68eba87ff0b0362828e8e9a8c8c83e | [] | no_license | jessicachung/mivf_shiny_apps | 2a916736acb265638a58ce883d775e045225b53b | baa6520edd27765e371a9f1d0509cbbc24814a68 | refs/heads/master | 2021-01-12T11:02:18.053616 | 2020-12-02T07:59:19 | 2020-12-02T07:59:19 | 72,795,938 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,974 | r | app.R | ############################################################
## Expression along cycle stage
############################################################
# Plot expression a given probe along cycle stage
library(shiny)
library(ggplot2)
library(dplyr)
library(stringr)
library(reshape2)
load("data/combat_exprs.rda")
phenotype_df <- read.table("data/combat_phenotype.tsv", header=TRUE, sep="\t",
stringsAsFactors=FALSE)
probe_df <- read.table("data/illumina_v4_annotation.tsv", header=TRUE, sep="\t",
stringsAsFactors=FALSE)
# Probes of interest from Jane
probes_raw <- "ILMN_1764096
ILMN_3194087
ILMN_3238259
ILMN_3272768
ILMN_1734552
ILMN_1652431
ILMN_1696183
ILMN_1743290
ILMN_1786015
ILMN_1807529
ILMN_2091454
ILMN_2169736
ILMN_2367126
ILMN_1740706
ILMN_2060719
ILMN_1784217
ILMN_1729033
ILMN_1782743"
# Make list of probes of interest for drop down menu
probes_of_interest <- probes_raw %>% str_split("\n") %>% unlist
probe_list <- list()
for (p in probes_of_interest) {
probe_list[[p]] <- p
}
# Remove samples with cycle_stage 8,9,10
phenotype_df <- phenotype_df %>%
# filter(cycle_stage %in% 1:7) %>%
mutate(cycle_stage=factor(cycle_stage))
combat_exprs <- combat_exprs[,phenotype_df[,"sample_id"]]
# TODO: Replace with real p-values later
probe_df <- probe_df %>% mutate(mean_pval=NA)
expression_cycle <- function(exprs, pheno, probe, title="") {
dat <- exprs[probe,,drop=FALSE] %>% melt %>%
S4Vectors::rename(Var2="sample_id")
stopifnot(dat[,"sample_id"] == pheno[,"sample_id"])
dat <- cbind(dat, cycle=pheno[,"cycle_stage"])
dat_means <- dat %>% group_by(cycle) %>% summarise(mean=mean(value))
g <- ggplot(dat, aes(x=cycle, y=value, color=cycle)) +
geom_point() +
geom_point(data=dat_means, aes(x=cycle, y=mean), col="red", size=6, alpha=0.5) +
geom_text(data=dat_means, size=3,
aes(x=cycle, y=mean, label=sprintf(" %0.2f", mean))) +
labs(title=title)
return(list(plot=g, cycle_means=dat_means))
}
############################################################
## UI
ui <- fluidPage(
titlePanel("Expression across cycle stage"),
sidebarLayout(
sidebarPanel(
# Choose input type (radioButtons or drop down menu?)
radioButtons("input_type",
label = h4("Input type:"),
choices = list("Probe ID from list" = 1,
"Enter probe ID" = 2),
selected = 1),
# Toggle input type for drop down menu
conditionalPanel(
condition="input.input_type == 1",
radioButtons("probe_select",
label = h4("Illumina probe ID"),
choices = probe_list,
selected = probe_list[[1]])
),
# Toggle input type for probe text input
conditionalPanel(
condition="input.input_type == 2",
textInput("probe_text",
label = h4("Illumina probe ID"),
value = "ILMN_1774828")
),
# Submit button
actionButton("submit",
label="Sumbit")
),
mainPanel(
# Plot
plotOutput("plot"),
hr(),
# Probe info table
h3("Selected probe:"),
tableOutput("probe_info")
)
),
hr(),
# Given gene name, search for probes
textInput("gene_search",
label = h4("Enter a gene name to search for probes:"),
value = "e.g. VEZT"),
# Probe search table
tableOutput("gene_search_table")
)
############################################################
## Server
server <- function(input, output){
# Set reactive values
rv <- reactiveValues(
probe_name = probe_list[[1]]
)
# Update probe name when submit button is pressed
observeEvent({input$submit; input$probe_select}, {
if (input$input_type == "1") {
rv$probe_name <- input$probe_select
} else {
rv$probe_name <- input$probe_text
}
})
output$plot <- renderPlot({
print(rv$probe_name)
expression_cycle(exprs=combat_exprs, pheno=phenotype_df, probe=rv$probe_name,
title=rv$probe_name)
})
# Probe info table
output$probe_info <- renderTable({
probe_df %>% filter(IlluminaID == rv$probe_name) %>%
dplyr::select(IlluminaID, SymbolReannotated, GenomicLocation, ProbeQuality, mean_pval) %>%
S4Vectors::rename(SymbolReannotated="Symbol", mean_pval="MeanDetectionPValue")
})
# Search table
output$gene_search_table <- renderTable({
probe_df %>% filter(SymbolReannotated == input$gene_search) %>%
dplyr::select(IlluminaID, SymbolReannotated, GenomicLocation, ProbeQuality, mean_pval) %>%
S4Vectors::rename(SymbolReannotated="Symbol", mean_pval="MeanDetectionPValue")
})
}
############################################################
## Run
shinyApp(ui=ui, server=server)
|
ebc879a04c1816cea1a5b0c0eed1d70a5eb36777 | 4d8cae963564011381645b95888b65f49ca8aa8a | /cachematrix.R | 4dcea0076c08800006a9114333b1791ad12b05a1 | [] | no_license | djquinn/ProgrammingAssignment2 | 036232fdad0382bcde07625035151b9183ad17aa | 6f8bae8d1ab9b08a01ce378fe901e8199bf1de67 | refs/heads/master | 2021-01-24T19:52:20.475475 | 2015-11-22T21:02:17 | 2015-11-22T21:02:17 | 46,639,225 | 0 | 0 | null | 2015-11-22T00:00:26 | 2015-11-22T00:00:24 | null | UTF-8 | R | false | false | 1,982 | r | cachematrix.R | ##David Quinn, Prog Assignment No 2
## 2 Functions:
## "makeCacheMatrix" - creates a "matrix like" object that stores an invertible matrix
## and, if subsequently called by cacheSolve, its inverse in global memory
## "cacheSolve" - returns the inverse of matrix portion of the "matrix-like" object stored
## with "makeCacheMatrix". If the inverse was previously computed and stored
## in memory it will pull the value of the inverse from memory. Else, it will compute the inverse
## and store it in global memory.
## Creates a list object that stores a matrix and, if inverted with cacheSolves,
## stores the inverse in global memory/cache as well
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
setmat <- function(y) {
x <<- y
inv <<- NULL
}
getmat <- function() x
setinv <- function(inverse) inv <<- inverse #used to store inverse in global cache/memory
getinv <- function() inv #pulls inverse from cache when called
list(setmat=setmat,getmat=getmat,setinv=setinv,getinv=getinv)
}
## Function takes in a "matrix"-like object that is created by a previous call
# of "makeCacheMatrix" fuction. If the inverse of the matrix part of the "matrix"-like
# object has been calculated and stored in memory/cache, returns that cached value.
# If not, the function calculates and returns the inverse of the matrix and stores it in
# global cache/memory using the "makeCacheMatrix.setInv()" function
# Assumption: the input object "x" is a "matrix-like" object created by the "makeCacheMatrix"
# function "makeCacheMatrix"
# Assumption: The matrix passed to/created by "makeCacheMatrix" is invertible
cacheSolve <- function(x, ...) {
inv<-x$getinv() #if inverse is in memory, will return non-NULL value
if(!is.null(inv)){ #if inverse in memory, return the cached value
message("Getting cached Inverse data")
return(inv)
}
matrixData<-x$getmat()
inv<-solve(matrixData,...)
x$setinv(inv)
inv
}
|
f0995b10a32d8ae2da8b7046a3a091ceba7e88c2 | d1f7a1b1efe67ec0e01483262f80598479d45fc5 | /man/scSEGIndex.Rd | ffffdefe3628c9584653a84ec04ffa0dee7a0b2a | [] | no_license | gravitytrope/scMerge | 56411847bc937043cbb333dd2f7e9e4182621d73 | 8bbfe99711d4a7329d1b7edf6576dd261023c883 | refs/heads/master | 2020-04-12T00:45:44.224553 | 2018-11-23T07:06:40 | 2018-11-23T07:06:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 858 | rd | scSEGIndex.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scSEGIndex.R
\name{scSEGIndex}
\alias{scSEGIndex}
\title{scSEGIndex}
\usage{
scSEGIndex(exprsMat, cell_type = NULL, ncore = 1)
}
\arguments{
\item{exprsMat}{The log-transoformed single-cell data (assumed to be no batch effect and covered a wide range of cell types). A n by m matrix, where n is the number of genes and m is the number of cells}
\item{cell_type}{A vector indicating the cell type information for each cell in the gene expression matrix. If it is \code{NULL}, the function calculates the scSEG index without using F-statistics.}
\item{ncore}{Number of cores that are used in parallel}
}
\value{
A matrix of stably expressed features.
}
\description{
Calculate single-cell Stably Expressed Gene (scSEG) index
}
\author{
Shila Ghazanfar, Yingxin Lin, Pengyi Yang
}
|
31eb2ac2178047b0765363af494a8cc224fafe23 | 0eaa7d5e8b578e6db06bc5530b27449988f98380 | /silhouetteNClus.R | 6e8f2f4f1313909aed08f0aa3dbeed371fdef620 | [] | no_license | dcgamboap/GCC-measure | 2da54b29c85db75532ccd9b1479a77c361c73cc6 | cf1f7b647b9087f8f7f000c76d3cb5e57636b61f | refs/heads/master | 2020-04-17T07:44:17.755739 | 2019-04-30T06:14:36 | 2019-04-30T06:14:36 | 166,381,906 | 0 | 1 | null | 2019-02-28T12:40:39 | 2019-01-18T09:44:23 | R | ISO-8859-1 | R | false | false | 2,320 | r | silhouetteNClus.R | #.............................................................................
#
# silhouetteNClus.R
#
# Fecha de Creación : 19-04-2018
#
# Autor: Carolina Gamboa.
#
# Descripción: Esta función calcula el indice de silueta para determinadas
# cantidades de grupos, y determina la cantidad de grupos optima
# usando el estadístico de silueta.
#
# Entradas: Tres parámetros. K : Cantidad máxima de grupos a analizar
# dis : Matriz de distancias.
# method: Método Jerárquico "single", "average"
#
# Output: Dos parámetros. K : Cantidad de grupos
# list : valor del índice de silueta para cada k
#
#.............................................................................
#library
library(cluster)
silhouetteNClus <- function(K, dis, method){
if(class(dis)!="dist") dis <- as.dist(dis)
silIndex <- data.frame(k = 1, silIndex = 0)
if(method == "complete"){
hclust.dist <- hclust(dis, method="complete")
Cl.hclust <- cutree(hclust.dist, 2:K)
for (jj in 2:(K)) {
coef <- silhouette(Cl.hclust[, jj-1], dis)
jjSilIndex <- mean(coef[, "sil_width"])
silIndex <- rbind(silIndex, data.frame(k = jj, silIndex = jjSilIndex))
}
}
if(method == "average"){
hclust.dist <- hclust(dis, method="average")
Cl.hclust <- cutree(hclust.dist, 2:K)
for (jj in 2:(K)) {
coef <- silhouette(Cl.hclust[, jj-1], dis)
jjSilIndex <- mean(coef[, "sil_width"])
silIndex <- rbind(silIndex, data.frame(k = jj, silIndex = jjSilIndex))
}
}
if(method == "single"){
hclust.dist <- hclust(dis, method="single")
Cl.hclust <- cutree(hclust.dist, 2:K)
for (jj in 2:(K)) {
coef <- silhouette(Cl.hclust[, jj-1], dis)
jjSilIndex <- mean(coef[, "sil_width"])
silIndex <- rbind(silIndex, data.frame(k = jj, silIndex = jjSilIndex))
}
}
maxPos <- which(silIndex[, "silIndex"]==max(silIndex[, "silIndex"]))
cluster <- list(K = silIndex[maxPos, "k"], coef = silIndex)
return(cluster$K)
}
|
a8f41c5794cdb943708b4df8bee56b07bbbaeaa8 | a9c7f3d8e5210ec7ad420abab976e641f195c3bd | /functions/alignSeqs.R | 6a603f20610e55fb6dc9bc90e5c8745c6c514181 | [] | no_license | kkeyvan/MeDIP-seq | e629bb1a1d0b1ebd0a8b49ba93c738d829ed844a | 5247fced92bd116ec76ea1d6824ca8c499e15057 | refs/heads/master | 2023-06-08T10:43:21.062145 | 2021-02-17T16:51:26 | 2021-02-17T16:51:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,417 | r | alignSeqs.R |
# This function takes a DNAStringSet formatted set of sequence reads, a corresponding set of
# cigar strings, and a vector of sequence start positions. It returns the aligned sequences
# and a number corresponding to the number of bases deleted from the reads that were not present
# in the reference. Since getGSeq() isn't working, this doesn't work well either.
alignSeqs <- function(seqs, cigars, startPos, sampleName = "", refStart = NULL, refEnd = NULL) {
minPos <- min(c(startPos, refStart), na.rm=T)
maxPos <- max(c(startPos, refEnd), na.rm=T)
seqs <- seqs[!is.na(cigars)]
startPos <- startPos[!is.na(cigars)]
cigars <- cigars[!is.na(cigars)]
startGap <- sapply(startPos - minPos, function(i) paste(rep("-", i), collapse = ""))
endGap <- sapply(maxPos - startPos, function(i) paste(rep("-", i), collapse = ""))
processed <- sapply(1:length(seqs), function(i) {
list(getGSeq(seq = seqs[i], cigar = cigars[i]))
})
processed <- processed[!sapply(processed, is.null)]
cSeqs <- sapply(processed, function(i) i[[1]])
nDel <- sapply(processed, function(i) i[[2]])
alignedSeqs <- DNAStringSet(paste(startGap, cSeqs, endGap, sep=""))
names(alignedSeqs) <- paste(sampleName, "_" , 1:length(alignedSeqs), sep="")
alignedSeqs <- subseq(alignedSeqs, start = refStart - minPos +1, width=refEnd-refStart) # trim seqs
return(list(alignedSeqs = alignedSeqs, nDel = sum(nDel)))
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.