blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20a56f06559b40e6a9d2571819ae62dc16185a5f | 79083a001c5cbeb6ed35dd9c5812a7a5dbd6e4d9 | /R/plot_posterior_prior.R | 0e1cf3660309a3cfe9c5c78be84e879dc7195c38 | [] | no_license | vsrikrish/IAMUQ | d0958841e36441225890312515803aefe96fd7d3 | a21e7c539c045e734f96e923bdb0277a28dcf90d | refs/heads/master | 2021-08-15T01:02:10.066475 | 2021-08-05T20:17:08 | 2021-08-05T20:17:08 | 167,974,974 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,324 | r | plot_posterior_prior.R | library(reshape2)
library(ggplot2)
library(IAMUQ)
source('R/calib_priors.R')
nsamp <- 1e5 # set desired number of samples
mcmc_out <- readRDS('output/mcmc_base-gwp-co2-pop.rds')
mcmc_length <- nrow(mcmc_out[[1]]$samples)
burnin <- 5e5
post <- do.call(rbind, lapply(mcmc_out[1:4], function(l) l$samples[(burnin+1):mcmc_length,]))
parnames <- colnames(post)
# obtain ensemble of posterior samples
idx <- sample(1:nrow(post), nsamp, replace=TRUE)
samps <- post[idx, ]
post_samps <- as.data.frame(samps)
# set up prior list
prior_df <- set_prior_params(parnames)
priors <- IAMUQ::create_prior_list(prior_df)
# sample from priors
pri_samps <- as.data.frame(do.call(cbind, lapply(priors,
function(p) do.call(match.fun(p[['rand.fun']]),
c(list(n=nsamp),
p[-which(names(p) %in% c('type', 'dens.fun', 'quant.fun', 'rand.fun'))]))
)))
# combine samples into a list for melting
all_samps <- list('Posterior'=post_samps, 'Prior'=pri_samps)
all_melt <- melt(all_samps)
colnames(all_melt)[3] <- 'Distribution'
colnames(all_melt)[1] <- 'Variable'
# plot
# set theme
th <- theme_bw(base_size=10) + theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
theme_set(th)
# create labeller for facet labels to convert variable code names to mathematical symbols
var_to_sym <- c('psi1' = expression(psi[1]),
'psi2' = expression(psi[2]),
'psi3' = expression(psi[3]),
'P0' = expression(P[0]) ,
'lambda' = expression(lambda),
's' = expression(s),
'delta' = expression(delta),
'alpha' = expression(alpha),
'As' = expression(A[s]),
'pi' = expression(pi),
'A0' = expression(A[0]),
'rho2' = expression(rho[2]),
'rho3'= expression(rho[3]),
'tau2' = expression(tau[2]),
'tau3' = expression(tau[3]),
'tau4' = expression(tau[4]),
'kappa' = expression(kappa),
'sigma_pop' = expression(sigma[1]) ,
'sigma_prod' = expression(sigma[2]) ,
'sigma_emis' = expression(sigma[3]),
'a_11' = expression(a[11]),
'a_22' = expression(a[22]),
'a_33' = expression(a[33]),
'a_21' = expression(a[21]),
'a_31' = expression(a[31]),
'a_12' = expression(a[12]),
'a_13' = expression(a[13]),
'a_23' = expression(a[23]),
'a_32' = expression(a[32]),
'eps_pop' = expression(epsilon[1]),
'eps_prod' = expression(epsilon[2]),
'eps_emis' = expression(epsilon[3])
)
levels(all_melt$Variable) <- var_to_sym[levels(all_melt$Variable)]
p <- ggplot(all_melt) + stat_density(aes(x=value, color=Distribution), geom='line', position='identity') + facet_wrap(vars(Variable), scales='free', labeller=label_parsed, ncol=4) + scale_color_brewer(palette='Dark2') + theme(legend.position='bottom') + scale_y_continuous('Density') + scale_x_continuous('Parameter Value')
pdf('figures/prior_post-dist.pdf', height=7, width=7)
print(p)
dev.off()
png('figures/prior_post-dist.png', height=7, width=7, res=300, units='in')
print(p)
dev.off()
|
05d85c3e0d5f70ca0d0ba3fe64b38f6bfb228286 | 169271965877a4e2fdc148de2e2d6cc73117b695 | /revisions/code/figures/fixed_effects/Fig6_drivers_fixed_vs_random_effects.R | 50bdccc6a7f635a2f56f709fc0be2960c9b20e92 | [] | no_license | cfree14/sst_productivity | 0eeacf540538aab9df0acf55b992590e290b7306 | cd1d12abe291da2748d7bc76e69b7a182651ba11 | refs/heads/master | 2021-10-21T15:10:32.142668 | 2019-03-04T18:05:27 | 2019-03-04T18:05:27 | 142,471,993 | 6 | 2 | null | null | null | null | UTF-8 | R | false | false | 6,084 | r | Fig6_drivers_fixed_vs_random_effects.R |
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(mblm) # For Thiel-Sen slope
library(plyr)
library(dplyr)
library(freeR)
library(reshape2)
library(quantreg)
library(RColorBrewer)
# Directories
datadir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/sst_productivity/revisions/output"
plotdir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/sst_productivity/revisions/figures"
# Read data
data <- read.csv(paste(datadir, "ramldb_v3.8_sp_ar1_pella0.55_cobe_re_fe_merged.csv", sep="/"), as.is=T)
data$sst_c_trend <- data$sst_c_trend*10
# Plot data
################################################################################
# Fit and plot quantile regression
# x <- data$ffmsy_avg; y <- betaT; xlim <- c(0,5)
fit_plot_qr <- function(x, y, xlim){
qrfit <- rq(y ~ x, tau=0.5)
qrfit_lo <- rq(y ~ x, tau=0.025)
qrfit_hi <- rq(y ~ x, tau=0.975)
curve(coef(qrfit)[1]+coef(qrfit)[2]*x, from=xlim[1], to=xlim[2], n=50, add=T, lwd=1.2)
curve(coef(qrfit_lo)[1]+coef(qrfit_lo)[2]*x, from=xlim[1], to=xlim[2],n=50, add=T, lwd=1.2, lty=2)
curve(coef(qrfit_hi)[1]+coef(qrfit_hi)[2]*x, from=xlim[1], to=xlim[2], n=50, add=T, lwd=1.2, lty=2)
}
# Models
range(data$betaT_f)
range(data$betaT_r)
models <- c("fixed", "random")
ylims <- matrix(c(-5, 5,
-1, 1), ncol=2, byrow=T)
# Setup figure
figname <- "FE_Fig6_drivers_fixed_vs_random_effects.png"
png(paste(plotdir, figname, sep="/"), width=6.5, height=3.8, units="in", res=600)
par(mfrow=c(2,3), mar=c(2.5,2.5,0.5,0.5), mgp=c(2,0.7,0), oma=c(1,2,0,0))
# Loop and plot
for(i in 1:length(models)){
# Reset par
# par(mgp=c(2,0.7,0))
# Get data
model <- models[i]
if(model=="random"){
betaT <- data$betaT_r
colors <- revalue(data$betaT_inf_r, c("positive"="blue", "negative"="red", "none"="grey60"))
xlabs <- c(expression("F/F"["MSY"]*" mean"), "Maximum age (yr)", "SST trend (°C/yr)")
title <- "Random effects"
}else{
betaT <- data$betaT_f
colors <- revalue(data$betaT_inf_f, c("positive"="blue", "negative"="red", "none"="grey60"))
xlabs <- rep("", 3)
title <- "Fixed effects"
}
# Overfishing
#####################################################
# Plot BetaT ~ overfishing
plot(x=data$ffmsy_avg, y=betaT, bty="n", las=1, col=colors, cex.axis=0.8,
xlim=c(0,5), ylim=ylims[i,], xlab=xlabs[1], ylab="", xpd=NA)
lines(x=c(0, 5), y=c(0,0), lty=3)
mtext(title, side=3, adj=0.1, line=-1.3, cex=0.7, font=2)
fit_plot_qr(x=data$ffmsy_avg, y=betaT, xlim=c(0,5))
# if(i==1){mtext("A", side=3, adj=0.95, line=-2, cex=0.8, font=2)}
# Maximum age
#####################################################
# Plot BetaT ~ maximum age
plot(x=data$tmax_yr, y=betaT, bty="n", las=1, col=colors, cex.axis=0.8,
xlim=c(0,100), ylim=ylims[i,], xlab=xlabs[2], ylab="", xpd=NA)
lines(x=c(0, 100), y=c(0,0), lty=3)
fit_plot_qr(x=data$tmax_yr, y=betaT, xlim=c(0,100))
# Plot thermal niche
#####################################################
# Species
spp <- c("Gadus morhua", "Clupea harengus")
cols <- brewer.pal(4, "Set1")[3:4]
# Setup empty
plot(1:10, 1:10, type="n", bty="n", las=1,
cex.axis=0.8, xpd=NA,
xlim=c(0, 20), ylim=ylims[i,],
xlab=c("","Mean temperature (°C)")[i], ylab="")
lines(x=c(0, 20), y=c(0,0), lty=3)
if(i==2){
legend("topright", bty="n", col=cols, pch=16, lty=1, cex=0.7,
legend=c("Atlantic cod (n=12)", "Atlantic herring (n=10)"))
}
# Loop through species
for(j in 1:length(spp)){
# Subset data
sci_name <- spp[j]
sdata <- subset(data, species==sci_name)
# Add points
betaT_cols <- c("betaT_f", "betaT_r")
points(sdata$sst_c_avg, sdata[,betaT_cols[i]], pch=16, col=cols[j], cex=1.2)
# Fit and plot Thiel-Sen slope
if(i==1){
tsfit <- mblm(betaT_f ~ sst_c_avg, sdata, repeated=F)
}else{
tsfit <- mblm(betaT_r ~ sst_c_avg, sdata, repeated=F)
}
# pvalue <- roundf(summary(tsfit)$coefficients[2,4],2)
# lty <- ifelse(pvalue<0.1, 1, 2)
curve(coef(tsfit)[1] + coef(tsfit)[2]*x, from=0, to=20, n=100, add=T, lty=1, col=cols[j], lwd=1.1)
}
# Plot overfishing*warming interaction
#####################################################
# # Add fixed colors
# summary(data$betaT_f)
# betaT_breaks <- c(-16,-8,-4,-3,-2,-1,0,1,2,3,4,8,16)
# data$betaT_bin_f <- cut(data$betaT_f, breaks=betaT_breaks)
# colors <- colorpal(brewer.pal(11, "RdBu"), nlevels(data$betaT_bin_f))
# colors_tr <- rgb(t(col2rgb(colors))/255, alpha=0.7)
# data$betaT_bin_color_f <- colors[data$betaT_bin_f]
#
# # Add random colors
# summary(data$betaT_r)
# betaT_breaks <- seq(-0.75,0.75,0.25)
# data$betaT_bin_r <- cut(data$betaT_r, breaks=betaT_breaks)
# colors <- colorpal(brewer.pal(11, "RdBu"), nlevels(data$betaT_bin_r))
# colors_tr <- rgb(t(col2rgb(colors))/255, alpha=0.7)
# data$betaT_bin_color_r <- colors[data$betaT_bin_r]
#
# # Assign colors
# if(i==1){
# colors <- data$betaT_bin_color_f
# }else{
# colors <- data$betaT_bin_color_r
# }
#
# # F/FMSY*warming interaction
# ##########################################
#
# # Plot F/FMSY * SST trend interaction
# plot(ffmsy_avg ~ sst_c_trend, data, bty="n", las=1,
# cex.axis=1.1, cex.lab=1.1,
# xlim=c(-0.2, 0.8), ylim=c(0,5),
# xlab="Temperature trend (°C/10yr)", ylab=expression("Mean F/F"["MSY"]),
# pch=21, cex=1.2, bg=colors, xpd=NA)
# lines(x=c(-0.2, 0.8), y=c(1, 1), lty=3, lwd=1.2)
# lines(x=c(0,0), y=c(0,5), lty=3, lwd=1.2)
#
# # Add sample size
# n1 <- nrow(filter(data, !is.na(ffmsy_avg) & !is.na(sst_c_trend)))
# text(labels=paste0("n=", n1), x=-0.2+(0.8--0.2)*1.05, y=0, pos=2, cex=1, xpd=NA)
}
# Axis labels
mtext(expression("SST influence (θ"["i"]*")"), outer=T, side=2, adj=0.5, line=0, cex=0.8)
# Off
dev.off()
graphics.off()
|
bf6bf070d323684e34091bcd7dd9c715c1f292c6 | e08feba647b37a30417755c52c8cb7971d92036d | /man/str.combine.Rd | 5a2690797e4f7415ca41e127d14d1d7e88d88aad | [] | no_license | skranz/YamlObjects | 17e984850a7c0acba003916503c211d3e3c9014d | 295eb4fa8db053df10e29362535f629e006cc309 | refs/heads/master | 2021-01-10T18:58:09.827847 | 2015-10-17T03:45:59 | 2015-10-17T03:45:59 | 26,432,913 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 309 | rd | str.combine.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{str.combine}
\alias{str.combine}
\title{Like paste0 but returns an empty vector if some string is empty}
\usage{
\method{str}{combine}(..., sep = "", collapse = NULL)
}
\description{
Like paste0 but returns an empty vector if some string is empty
}
|
df4e9a10739c8f1d270044815b2ca23e9b6e0301 | 9775c80596a7d0c57aee2cea47885cf886e36745 | /inst/shiny/ui.R | d2c8b9b4cc050a621887ce63c98029a0dfc6593d | [] | no_license | OJWatson/scraper | 20ec12f84809c11a919b60e8f1b8aa3fe339bba7 | c8e8b00aff76c26e4fe8ef9d84a09052798d85c6 | refs/heads/master | 2020-04-13T17:09:11.527436 | 2016-12-12T09:28:05 | 2016-12-12T09:28:05 | 61,437,777 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 509 | r | ui.R | library(shiny)
library(shinyjs)
library(scraper)
tagList(
useShinyjs(),
tags$head(
tags$script(src = "shorten.js"),
tags$link(href = "style.css", rel = "stylesheet")
),
div(id = "loading-content", h1("Loading...")),
navbarPage(
title = 'Mendeley DataTable',
tabPanel('Imperial', DT::dataTableOutput('ex1'),
tags$head(tags$script(src=c("shorten.js")))),
tabPanel('Cambridge', DT::dataTableOutput('ex2')),
tabPanel('All', DT::dataTableOutput('ex3'))
)
)
|
2f7e754559c5ba7a50d4410268c11491f7944e20 | 84b6750d3884251925ee54c3a0bd9f4f094cae0a | /man/plot-methods.Rd | a15a458c677a22a15b712b6ce61ebb3225d993cc | [] | no_license | Nth-iteration-labs/ofmlr | c82c94549fa2d38cfd6ec99a36beeccaa6853e08 | ab51ccb94cff96226d44263a80e0c2c7fc9d7490 | refs/heads/master | 2022-12-16T20:32:32.384286 | 2020-09-23T06:28:44 | 2020-09-23T06:28:44 | 74,559,669 | 4 | 1 | null | 2020-09-23T06:28:45 | 2016-11-23T09:09:59 | R | UTF-8 | R | false | true | 2,185 | rd | plot-methods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1-onlineLogMixture.R, R/2-multiMixture.R
\docType{methods}
\name{plot,online_log_mixture,missing-method}
\alias{ANY-method}
\alias{plot,}
\alias{plot,multi_online_log_mixture,missing-method}
\alias{plot,online_log_mixture,missing-method}
\title{Plot method for the online_log_mixture class}
\usage{
\S4method{plot}{online_log_mixture,missing}(x, y, params = FALSE,
omit = 100, param.y = c(-5, 5), ...)
\S4method{plot}{multi_online_log_mixture,missing}(x, y, params = FALSE,
omit = 100, param.y = c(-5, 5), ...)
}
\arguments{
\item{x}{The online_log_mixture object}
\item{y}{NULL}
\item{params}{Boolean, if TRUE the trace of the parameter values will also be printed}
\item{omit}{Number of observations to omit from the log likelihood and l2 Norm traces}
\item{.y}{a vector with the min and max values of the plot of the beta parameters}
\item{x}{An object of type multi_online_log_mixture}
\item{y}{NULL}
\item{params}{Boolean, if TRUE the trace of the parameter values will also be printed}
\item{omit}{Number of observations to omit from the log likelihood and l2 Norm traces}
\item{.y}{a vector with the min and max values of the plot of the beta parameters}
}
\description{
Plot an object of type online_log_mixture.
This will only produce a plot when \code{trace!=FALSE}
The plots will be of the log-likelihood of the model over
the number of observations and the average change in L2
norm of the model parameters. Also, when \code{params=TRUE}
plots of the parameter estimates over time will also be produced.
Will create a plot of each of the models stored in
the model comparison class that you can browse one by
one.
}
\examples{
M2 <- online_log_mixture(3,3, trace=1)
for(i in 1:10000){
X <- runif(3,-2,2)
y <- rbinom(1, 1, inv_logit(c(0,-2,2)\%*\%X))
M2 <- add_observation(M2, y, X, 0)
}
plot(M2, params=TRUE)
M1 <- online_log_mixture(2,1, trace=1)
models <- multi_online_log_mixture(M1)
models <- add_model(models, online_log_mixture(2,2, trace=1))
for(i in c(1:100)){
models <- add_observation(models, rbinom(1,1,.5), rnorm(2,0,1))
}
plot(models, params=TRUE, omit=0)
}
|
d8e68439196f72ac4f5394a9c9dc726fcd5c8983 | d354983f75228b3aa82eea518f42de95e3fa32f7 | /functions/lapply.R | c2721c1ac889b7008a96b3bb23b9988b98af2567 | [] | no_license | ReneNyffenegger/about-r | f1f1d1f6c52f0446207978e78436ccbd91b89d20 | ae511ae632e1f8827cab91d4e36c1a9349fda5ab | refs/heads/master | 2022-01-14T10:19:45.230836 | 2021-12-27T19:50:37 | 2021-12-27T19:50:37 | 22,269,629 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 261 | r | lapply.R | func <- function(x) {
if (x < 0.25) {
return (1-4*x)
}
if (x < 0.50) {
return (-1 + 4*x)
}
if (x < 0.75) {
return (3 - 4*x)
}
return (-3 + 4*x)
}
x <- 0:20/20
y <- lapply(x, func)
X11()
plot(x, y)
lines(x,y, col='red')
locator(1)
|
139b266303a34747563843982964aa16daa89d33 | 24fcc7a9446871f5affbc82d3ae1ed20d6a7c8aa | /man/peak_season_offset.Rd | e0fd6f6fb18ab7074dacad2597c868208acfa908 | [
"MIT"
] | permissive | mrc-ide/malariasimulation | 3188657f6ff9da4ea35646189d0bd75d6e35aa52 | 397a7b7efe90958dd01f97110a1d16c71d041f33 | refs/heads/master | 2023-08-23T11:29:10.050424 | 2023-07-03T15:58:32 | 2023-07-03T15:58:32 | 233,609,741 | 10 | 10 | NOASSERTION | 2023-08-17T15:48:41 | 2020-01-13T14:06:17 | R | UTF-8 | R | false | true | 405 | rd | peak_season_offset.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mosquito_biology.R
\name{peak_season_offset}
\alias{peak_season_offset}
\title{Calculate the yearly offset (in timesteps) for the peak mosquito
season}
\usage{
peak_season_offset(parameters)
}
\arguments{
\item{parameters}{to work from}
}
\description{
Calculate the yearly offset (in timesteps) for the peak mosquito
season
}
|
21ce1416abf270850f6e040768de40eed01cec5f | 0e00ed988bbb9c9ba18c0cfb2784699bad141e51 | /R/get_images.R | 59e5a95224c59d08827b38178144639ae9adb0d1 | [] | no_license | databrew/traveldash | 58e3148e48ac32bd460d68fd8bae750772b4adaf | 563558f83cc602dba6c3808acb07db28871e9faa | refs/heads/master | 2021-05-14T06:14:33.273018 | 2018-11-20T09:22:12 | 2018-11-20T09:22:12 | 116,237,230 | 2 | 3 | null | 2018-04-09T13:05:12 | 2018-01-04T08:52:16 | PLpgSQL | UTF-8 | R | false | false | 631 | r | get_images.R | library(openxlsx)
library(RPostgreSQL)
library(yaml)
library(pool)
library(lubridate)
library(magick)
get_images <- function(pool){
conn <- poolCheckout(pool)
start_time <- Sys.time()
images <- dbGetQuery(conn,paste0("select person_id,short_name,image_data from pd_wbgtravel.people where image_data is not null;"))
images[["binaries"]] <- lapply(images$image_data,postgresqlUnescapeBytea)
images[["person_image"]] <- lapply(images$binaries,image_read)
end_time <- Sys.time()
print(paste0("get_images(): Database upload/download time: ", end_time - start_time))
poolReturn(conn)
return(images)
}
|
ab25ce2fbb3864382400a3c660df9af1db58e776 | e3b939c09405131064b8c4195fca87cb5a8f023b | /experiments/code/theory_brier_score.R | b15774d42367b3afc458a80e30c4ae48df9d7449 | [] | no_license | PhilippPro/ntree_randomForest | 2da543e893610aefd6debe5dfd91b3b8723a3247 | 5053fcb50a98a27e58082e5a774ca308a6cc2a81 | refs/heads/master | 2020-06-14T23:19:02.123493 | 2017-05-10T08:43:35 | 2017-05-10T08:43:35 | 75,399,631 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,522 | r | theory_brier_score.R | # R-Code for the Brier-Score
# Vergleiche Brier Score für randomForest nach 50 und nach 1000 Bäumen
library(mlr)
library(OpenML)
lrn = list(makeLearner("classif.randomForest", id = "50_rf", par.vals = list(ntree = 50), predict.type = "prob"),
makeLearner("classif.randomForest", id = "1000_rf", par.vals = list(ntree = 1000), predict.type = "prob"))
rdesc = makeResampleDesc(method = "RepCV", predict = "test", reps = 100, folds = 5)
configureMlr(on.learner.error = "warn", show.learner.output = FALSE)
dir = "/home/probst/Paper/Ntree_RandomForest/experiments"
load(paste(dir,"/results/clas.RData", sep = ""))
tasks = rbind(clas_small)
OMLDATASETS = tasks$data.id[!(tasks$task.id %in% c(1054, 1071, 1065))] # Cannot guess task.type from data! for these 3
bmr = list()
for(i in 1:length(OMLDATASETS)) {
print(i)
oml.dset = getOMLDataSet(OMLDATASETS[i])
task = convertOMLDataSetToMlr(oml.dset)
bmr[[i]] = benchmark(lrn, task, resamplings = rdesc, measures = list(acc, ber, multiclass.brier, logloss, multiclass.au1u),
keep.pred = FALSE, models = FALSE, show.info = FALSE)
print(bmr[[i]])
}
i = 29 # passt
i = 54
i = 147
i = 182
# logloss
i = 126
i = 129
# AUC
i = 14
i = 19
i = 62 # AUC -> AUC scheint schlechter werden zu können!
i = 95
#
bmr[[1]]$results
leer = logical(length(bmr))
for(i in 1: length(bmr))
leer[i] = getBMRAggrPerformances(bmr[[i]])[[1]][[2]][4] < getBMRAggrPerformances(bmr[[i]])[[1]][[1]][4]
which(!leer)
save(bmr, file = paste0(dir,"/results/bmr_brier_score.RData"))
|
8305c0bdc3c944d386c07a4c775df9663699bf9c | 4697292c821029c438d7f3675f3d5e69867d3c15 | /intrinsic-rank/code/elbows.R | 1e982c0b2bb3984688fde1e7c63b3f35de48cb17 | [] | no_license | varun-projects/PhD-Thesis---Cross-Validation-for-Unsupervised-Learning | f2f1f89305def2e4dde9a9d710649c958f6ae264 | 7e3fe78b8826d231d849c3c0785e58949447416b | refs/heads/master | 2022-04-17T18:14:55.817601 | 2010-01-29T17:31:00 | 2010-01-29T17:31:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,085 | r | elbows.R |
source( "../../common/code/spiked-data" )
plot_scree <- function( d.est, frob2, spec2, elbow=NA ) {
np <- length( d.est )
kmax.f <- length( frob2 ) - 1
kmax.2 <- length( spec2 ) - 1
frob2.min <- which.min( frob2 ) - 1
spec2.min <- which.min( spec2 ) - 1
frob2 <- frob2 / min( frob2 )
spec2 <- spec2 / min( spec2 )
p <-
( ggplot( data.frame( x=c(0:np), y=0, type=factor("f1", "f2", "f3") ),
aes( x, y, colour=type ) )
+ facet_grid( type ~ ., scales="free_y")
+ geom_vline( aes( xintercept=xint, colour=I("f1") ), linetype="dashed",
data=data.frame( xint=elbow,
type=factor( c("f1", "f2", "f3") ) ) )
+ geom_vline( aes( xintercept=xint, colour=I("f2") ), linetype="dotdash",
data=data.frame( xint=frob2.min,
type=factor( c("f1", "f2", "f3") ) ) )
+ geom_vline( aes( xintercept=xint, colour=I("f3") ), linetype="twodash",
data=data.frame( xint=spec2.min,
type=factor( c("f1", "f2", "f3") ) ) )
+ layer( data=data.frame( x=1:np, y=(d.est^2 / sum( d.est^2 ) ), type="f1" ),
geom=c("point") )
+ layer( data=data.frame( x=0:kmax.f, y=frob2, type="f2" ),
geom="point" )
+ layer( data=data.frame( x=0:kmax.2, y=spec2, type="f3" ),
geom="point" )
+ theme_bw()
+ xlab( "Rank" )
+ ylab( paste( " Resid. Spec. Sq. ",
"Resid. Frob. Sq. ",
"Singular Value Sq.")
)
+ opts( strip.background=theme_blank(),
strip.text.y=theme_blank(),
legend.position="none" )
)
p
}
scree_sim <- function( spike, n, p, ... ) {
np <- min( n, p )
sim <- spiked.data( spike, n, p,
left="uniform",
right="uniform",
noise="white" )
resid <- sim$signal
spec2 <- rep( NA, np+1 )
frob2 <- rep( NA, np+1 )
if( np > 0 ) {
spec2[1] <- svd( resid, nu=0, nv=0 )$d[1]^2
frob2[1] <- sum( resid^2 )
}
for( i in seq_len( np ) ) {
u <- sim$u.est[,i,drop=FALSE]
v <- sim$v.est[,i,drop=FALSE]
d <- sim$d.est[i]
resid <- resid - (d * u) %*% t(v)
spec2[i+1] <- svd( resid, nu=0, nv=0 )$d[1]^2
frob2[i+1] <- sum( resid^2 )
}
frob2 <- frob2
spec2 <- spec2
plot_scree( sim$d.est, frob2, spec2, ... )
}
n <- 100
p <- n
spike.right <- seq(5, 0.25, length=20)
spike.left <- c(20, 15, 10, spike.right)
elbow.right <- 13
elbow.left <- 4
set.seed( 0, "Mersenne-Twister" )
pdf( "../plots/scree-elbow-right.pdf", width=3, heigh=5.75 )
print( scree_sim( spike.right, n, p, elbow=elbow.right ) )
dev.off()
set.seed( 2, "Mersenne-Twister" )
pdf( "../plots/scree-elbow-left.pdf", width=3, heigh=5.75 )
print( scree_sim( spike.left, n, p, elbow=elbow.left ) )
dev.off()
|
1f52dcee96b97c478903b0f7a26dbd720a34ef7a | f1871c39a0ec5d66a1e002bee8433e19de369da7 | /R/utils-data.R | 1f2ae5d072164872c1ccc3a1b54d85938a998bf9 | [
"MIT"
] | permissive | turgut090/torch | 00adc4c63ed10240cd943693379b6cfe6ee3f0a1 | 0b2e04a0d82cd3649aa0ab8de48bdd42c9cff7bf | refs/heads/master | 2023-03-30T13:40:19.939616 | 2021-04-02T13:16:57 | 2021-04-02T13:16:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,284 | r | utils-data.R | Dataset <- R6::R6Class(
classname = "dataset",
lock_objects = FALSE,
public = list(
.getitem = function(index) {
not_implemented_error()
}
)
)
is_map_dataset <- function(x) {
inherits(x, "dataset")
}
get_init <- function(x) {
if (!is.null(x$public_methods$initialize))
return(x$public_methods$initialize)
else
return(get_init(x$get_inherit()))
}
#' Helper function to create an R6 class that inherits from the abstract `Dataset` class
#'
#' All datasets that represent a map from keys to data samples should subclass this
#' class. All subclasses should overwrite the `.getitem()` method, which supports
#' fetching a data sample for a given key. Subclasses could also optionally
#' overwrite `.length()`, which is expected to return the size of the dataset
#' (e.g. number of samples) used by many sampler implementations
#' and the default options of [dataloader()].
#'
#' @section Get a batch of observations
#'
#' By default datasets are iterated by returning each observation/item individually.
#' Sometimes it's possible to have an optimized implementation to take a batch
#' of observations (eg, subsetting a tensor by multiple indexes at once is faster than
#' subsetting once for each index), in this case you can implement a `.getbatch` method
#' that will be used instead of `.getitem` when getting a batch of observations within
#' the dataloader.
#'
#' @note
#' [dataloader()] by default constructs a index
#' sampler that yields integral indices. To make it work with a map-style
#' dataset with non-integral indices/keys, a custom sampler must be provided.
#'
#' @param name a name for the dataset. It it's also used as the class
#' for it.
#' @param inherit you can optionally inherit from a dataset when creating a
#' new dataset.
#' @param ... public methods for the dataset class
#' @param parent_env An environment to use as the parent of newly-created
#' objects.
#' @inheritParams nn_module
#'
#' @export
dataset <- function(name = NULL, inherit = Dataset, ...,
private = NULL, active = NULL,
parent_env = parent.frame()) {
args <- list(...)
if (!is.null(attr(inherit, "Dataset")))
inherit <- attr(inherit, "Dataset")
e <- new.env(parent = parent_env)
e$inherit <- inherit
d <- R6::R6Class(
classname = name,
lock_objects = FALSE,
inherit = inherit,
public = args,
private = private,
active = active,
parent_env = e
)
init <- get_init(d)
# same signature as the init method, but calls with dataset$new.
f <- rlang::new_function(
args = rlang::fn_fmls(init),
body = rlang::expr({
d$new(!!!rlang::fn_fmls_syms(init))
})
)
attr(f, "Dataset") <- d
f
}
#' @export
`[.dataset` <- function(x, y) {
if (length(y) > 1 && !is.null(x$.getbatch))
x$.getbatch(y)
else
x$.getitem(y)
}
#' @export
length.dataset <- function(x) {
x$.length()
}
#' Dataset wrapping tensors.
#'
#' Each sample will be retrieved by indexing tensors along the first dimension.
#'
#' @param ... tensors that have the same size of the first dimension.
#'
#' @export
tensor_dataset <- dataset(
name = "tensor_dataset",
initialize = function(...) {
tensors <- rlang::list2(...)
lens <- sapply(tensors, function(x) x$shape[1])
if (!length(unique(lens)))
value_error("all tensors must have the same size in the first dimension.")
self$tensors <- tensors
},
.getitem = function(index) {
if (is.list(index)) {
index <- unlist(index)
}
lapply(self$tensors, function(x) {
x[index, ..]
})
},
.getbatch = function(index) {
self$.getitem(index)
},
.length = function() {
self$tensors[[1]]$shape[1]
}
)
#' Dataset Subset
#'
#' Subset of a dataset at specified indices.
#'
#' @param dataset (Dataset): The whole Dataset
#' @param indices (sequence): Indices in the whole set selected for subset
#'
#' @export
dataset_subset <- dataset(
initialize = function(dataset, indices) {
self$dataset = dataset
self$indices = indices
},
.getitem = function(idx) {
return(self$dataset[self$indices[idx]])
},
.length = function() {
return(length(self$indices))
}
)
|
7f7f0ab09f08648d848148d29c18fc4d5182e8dd | 2724f6e874c9500b0df726c2d6903fe486b8cc84 | /man/get_irefindex_list.Rd | 88e94510053080cce0b414e85e0a9498b489dc03 | [
"MIT"
] | permissive | lagelab/Genoppi | 62006e9f123a1857580e5ebd88885c945483f6e3 | f78e7b3e523650e7ae211fc93b79dff3e28dd03f | refs/heads/master | 2023-08-31T09:51:59.007937 | 2022-10-12T14:17:17 | 2022-10-12T14:17:17 | 83,465,982 | 23 | 10 | null | null | null | null | UTF-8 | R | false | true | 803 | rd | get_irefindex_list.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_irefindex_list.R
\name{get_irefindex_list}
\alias{get_irefindex_list}
\title{Retrieve irefindex for a given bait}
\usage{
get_irefindex_list(bait, n = 1)
}
\arguments{
\item{bait}{string. name of bait protein}
\item{n}{numeric. Minimum number of publications that this interaction has been described in.}
}
\value{
data.frame containing gene and significant columns for all non-bait IRefIndex genes
(significant=T for IRefIndex interactors of bait). NULL if bait not found in IRefIndex.
}
\description{
Use irefindex_table data to get IRefIndex interactors and non-interactors of bait.
See \code{?irefindex_table} for more details about the data set.
}
\examples{
\dontrun{
df1 <- get_irefindex_list('BCL2',n = 1)
}
}
|
172db70323cc325ff2c1485ceafcf42c23ee781c | 0c20139f2d1a741a3105fce10a38a883263054cb | /gradmcmc-1.0/models/wildflower_nc/run_model.R | 8bb5c34ea103eb274a0432ca051619314f17e3e5 | [
"MIT"
] | permissive | caimiao0714/HMC_ECS | 53b8e4cfdda6f444dd5e274453abb7fd7f9beeac | fa67e1ff9589d8c496ab5ad4c2af07517294b288 | refs/heads/master | 2020-06-30T07:35:32.221038 | 2019-12-12T06:29:45 | 2019-12-12T06:29:45 | 200,453,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,673 | r | run_model.R | ## Sourcing this file will run everything for this model, given the MCMC
## arguments are in the global workspace.
setwd(paste0('models/',m))
## Load empirical data and inits
data <- readRDS('data.RDS')
params.jags <-
c("yearInterceptSD", "plantInterceptSD", "plantSlopeSD", "intercept",
"slope", "yearInterceptEffect_raw", "plantSlopeEffect_raw", "plantInterceptEffect_raw")
inits <-
list(list(
yearInterceptSD = 1,
plantInterceptSD = 1,
plantSlopeSD = 1,
intercept = rep(0,data$Nstage), slope = 0,
yearInterceptEffect_raw= rep(0, data$Nyear),
plantInterceptEffect_raw= rep(0, data$Nplant),
plantSlopeEffect_raw= rep(0, data$Nplant)))
## stan.fit <- stan(file='wildflower_nc.stan', data=data, init=inits,seed=11,
## pars=params.jags, iter=1000, chains=1)
## shinystan::launch_shinystan(stan.fit)
## jags.fit <- jags(data=data, inits=inits, parameters.to.save=params.jags,
## model.file='wildflower_nc.jags', n.chains=1, n.iter=2000)
## jags.sims <- shinystan::as.shinystan(jags.sims)
## shinystan::launch_shinystan(jags.sims)
## Get independent samples from each model to make sure they are coded the
## same
if(verify)
verify.models(model=m, params.jags=params.jags, inits=inits, data=data,
Nout=Nout.ind, Nthin=Nthin.ind)
sims.ind <- readRDS(file='sims.ind.RDS')
sims.ind <- sims.ind[sample(x=1:NROW(sims.ind), size=length(seeds)),]
inits <- lapply(1:length(seeds), function(i)
list(
yearInterceptSD=sims.ind$yearInterceptSD[i],
plantInterceptSD=sims.ind$plantInterceptSD[i],
plantSlopeSD=sims.ind$plantSlopeSD[i],
intercept = as.numeric(sims.ind[i, grep('intercept', names(sims.ind))]),
yearInterceptEffect =
as.numeric(sims.ind[i, grep('yearInterceptEffect', names(sims.ind))]),
plantInterceptEffect =
as.numeric(sims.ind[i, grep('plantInterceptEffect', names(sims.ind))]),
plantSlopeEffect =
as.numeric(sims.ind[i, grep('plantSlopeEffect', names(sims.ind))]),
slope=sims.ind$slope[i])
)
## Fit empirical data with no thinning for efficiency tests
fit.empirical(model=m, params.jag=params.jags, inits=inits, data=data,
lambda=lambda.vec, delta=delta, metric=metric, seeds=seeds,
Nout=Nout)
## library(coda)
## library(shinystan)
## stan.fit <- readRDS(file='fits/stan_nuts_diag_e_0.8_10_.RDS')
## stan.fit <- as.shinystan(mcmc.list(as.mcmc(readRDS(file='sims.ind.RDS'))))
## shinystan::launch_shinystan(stan.fit)
message(paste('Finished with model:', m))
setwd('../..')
|
24e3975d282e677768d8b2c659b110db5340078a | 1da1269745b6ce6806ffd7a15668fc27470cd921 | /man/rcrainfo_rcr_ca_authority.Rd | ebe2317082d84debdd5711d75f7d0965ad869720 | [] | no_license | markwh/envirofacts | d0c3bb7495060fd00b825c1e72602479f8a92b72 | 815ba95808a37f552d9a7041be532817e4766b90 | refs/heads/master | 2021-01-10T07:14:32.874354 | 2019-03-27T02:28:15 | 2019-03-27T02:28:15 | 50,798,175 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,341 | rd | rcrainfo_rcr_ca_authority.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcrainfo_rcr_ca_authority.R
\name{rcrainfo_rcr_ca_authority}
\alias{rcrainfo_rcr_ca_authority}
\title{Retrieve rcr ca authority data from rcrainfo database}
\usage{
rcrainfo_rcr_ca_authority(HANDLER_ID = NULL, EFFECTIVE_DATE = NULL,
RESPONSIBLE_AGENCY = NULL, ISSUE_DATE = NULL, REVOKE_DATE = NULL,
REPOSITORY = NULL, ACTIVITY_LOCATION = NULL, OWNER = NULL,
AUTHORITY_TYPE = NULL, PERSON_OWNER = NULL, PERSON_ID = NULL,
LEAD_PROGRAM = NULL, SUB_ORGANIZATION_OWNER = NULL,
SUB_ORGANIZATION = NULL)
}
\arguments{
\item{HANDLER_ID}{e.g. 'AK0000374959'. See Details.}
\item{EFFECTIVE_DATE}{e.g. '19-OCT-00'. See Details.}
\item{RESPONSIBLE_AGENCY}{e.g. 'E'. See Details.}
\item{ISSUE_DATE}{e.g. 'NA'. See Details.}
\item{REVOKE_DATE}{e.g. 'NA'. See Details.}
\item{REPOSITORY}{e.g. 'NA'. See Details.}
\item{ACTIVITY_LOCATION}{e.g. 'AK'. See Details.}
\item{OWNER}{e.g. 'HQ'. See Details.}
\item{AUTHORITY_TYPE}{e.g. 'X'. See Details.}
\item{PERSON_OWNER}{e.g. 'NA'. See Details.}
\item{PERSON_ID}{e.g. 'NA'. See Details.}
\item{LEAD_PROGRAM}{e.g. 'NA'. See Details.}
\item{SUB_ORGANIZATION_OWNER}{e.g. 'NA'. See Details.}
\item{SUB_ORGANIZATION}{e.g. 'NA'. See Details.}
}
\description{
Retrieve rcr ca authority data from rcrainfo database
}
|
54e7869e76c179dd98b451d7dc57b6987121dfdb | 79bf7255ac2d8dd2f23345cc0ba2fbffffb77f74 | /Estrutura condicional.R | f07c094f55e0d38c362e56adf2b9835a8f96adf9 | [] | no_license | pburil/ETL_eletiva | 17abae1aba58ea027e46493bc79c98a2e7e31fec | 1260cebf8f1ace139c05b9f2b6426181eda05f05 | refs/heads/main | 2023-05-15T05:14:30.347723 | 2021-06-14T15:35:49 | 2021-06-14T15:35:49 | 357,028,647 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 200 | r | Estrutura condicional.R |
#Estrutura condicional
Sport <- 1 # Number of goals scored by Team A
SantaCruz <- 3 # Number of goals scored by Team B
if (SantaCruz > Sport){
print ("Santa Cruz maior de Pernambuco")
}
|
3976cbf7646af88aa0140d5388795956b5baa5c7 | ae30c1c5c87f2055b5917555086992ea7bb08bb1 | /clustering.R | 39020bc52e8b888243fc3970d05395e8827fd94b | [] | no_license | CaoCharles/Multivariate-Analysis- | 48bbfdd9eadd76fb2e92e89fc01fb71e0b2809a7 | a74501e5e420cd4c5d19f1f8243538839aa8f79f | refs/heads/master | 2020-03-18T11:13:41.701713 | 2018-06-04T09:34:19 | 2018-06-04T09:34:19 | 134,658,294 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,104 | r | clustering.R | olive <- read.table("olive.txt",h=T)
newolive <- olive[,3:10]
# agnes
# Let us try first standardize the variables and use the “single” linkage:
library(cluster)
x <- daisy(newolive, stand=T)
agn<-agnes(x,metric="euclidean",method="single")
# Use the following interactive command for both the “dedrogram” and “banner plot” :
plot(agn,ask=T)
# or use the following command for only a dendrogram :
plot(agn,which.plots=2)
# Partitioning Method -----------------------------------------------------
# (1)K-means clustering
km <- kmeans(newolive,3,20)
# We show the clustering result on the 2-D plane of PC1 vs PC2 :
pca.newolive <- princomp(scale(newolive,scale=TRUE,center=TRUE),cor=FALSE)
pcs.newolive <- predict(pca.newolive)
plot(pcs.newolive[,1:2], type="n")
text(pcs.newolive,as.character(km$cluster),col=km$cluster,cex=0.6)
# For comparison, a similar plot can be derived from PCA :
plot(pcs.newolive[,1:2],type="n",xlab='1st PC',ylab='2nd PC')
text(pcs.newolive[,1:2],as.character(olive$Region),col=olive$Region,cex=0.6)
# From these two plots, we found that the original regions (shown in PCA) somehow disagree with the
# K-means clustering, especially on the overlap of “region 1” and “region 2”, the overlap of “region 1”
# and “region 3”.
# (2)pam
pa <- pam(daisy(newolive,stand = T), 3, diss = T)
plot(pa, ask = T)
# The clustering result (which takes a few seconds) is projected on a 2-D PC or MDS space:
# The SC (Silhouette Coefficient) is derived to be 0.3, which shows a weak structure of clustering.
# We can use the following command to see if the clustering recovers the original groups of “Regions” :
pa$clustering
# We can also compare this result with PCA :
plot(pcs.newolive[,1:2], type="n")
text(pcs.newolive,as.character(pa$clustering),col=pa$clustering,cex=0.6)
# Self-Organizing Maps (SOM) ----------------------------------------------
install.packages('som')
library(som)
n.newolive<-normalize(newolive, byrow=F) # Standardize variables
install.packages('kohonen')
library(kohonen)
# Run SOM with 20x20=400 grids (neurons), the default number of iterations = 100:
olive.som <- som(n.newolive,grid = somgrid(20, 20, "hexagonal"))
# We first mark the labels of “Region” in the resulting SOM:
plot(olive.som,type="mapping",labels=olive[,1])
# Another display to show clustering:
plot(olive.som, type="dist.neighbours", main = "SOM neighbour distances")
# 分五群show出來
som.hc <- cutree(hclust(dist(olive.som$codes[[1]])), 5)
add.cluster.boundaries(olive.som,som.hc)
# Observe the detailed clustering for each object:
# 每個圓圈被分到哪裡
cutree(hclust(dist(olive.som$codes[[1]])), 5)
# We can make a new SOM by changing the number of iterations, say, by setting “rlen” to be the total
# number of observations:
olive.som<-som(n.newolive, grid = somgrid(20, 20, "hexagonal"), rlen=572)
plot(olive.som,type="mapping",labels=olive[,1])
plot(olive.som, type="dist.neighbours", main = "SOM neighbour distances")
som.hc <- cutree(hclust(dist(olive.som$codes[[1]])), 3)
add.cluster.boundaries(olive.som,som.hc)
|
e66ad9177f1d0fad3cae571a73eb5b5c9d0898d7 | 29585dff702209dd446c0ab52ceea046c58e384e | /CPHshape/R/find.hazard.R | f39141e418994eb4d263cfe83f90b8e5ad886841 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 538 | r | find.hazard.R | find.hazard <- function(t, h.val, h.ranges, type, mode){
if(type=="increasing"){
h <- find.h.up(t, h.val, h.ranges)
}
if(type=="decreasing"){
h <- find.h.down(t, h.val, h.ranges)
}
if(type=="unimodal"){
if(t<mode){
h <- find.h.up(t, h.val, h.ranges)
}
if(t>mode){
h <- find.h.down(t, h.val, h.ranges)
}
if(t==mode){
h <- Inf
}}
if(type=="ushaped"){
if(t<mode){
h <- find.h.down(t, h.val, h.ranges)
}
if(t>mode){
h <- find.h.up(t, h.val, h.ranges)
}
if(t==mode){
h <- 0
}}
return(h)
}
|
bd3520c5a0f585729e862e7a94065fbcb0d82a1f | 43c2f40d81d771c66b8043c6798e5af6b0f6feee | /FY2017/Scripts/tbl_6.8_DOD.BA.By.Title.R | 076288b24363a7cac9af67956ed963e52bc4747e | [] | no_license | ctmann/DOD-Green-Book | 218cc006581cb6c9e7e70f349eabee51061bbb6c | f4ce1281c4f73df29d39fd04386444d158caa37e | refs/heads/master | 2021-08-30T01:49:14.457647 | 2021-08-12T18:48:54 | 2021-08-12T18:48:54 | 71,018,063 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,297 | r | tbl_6.8_DOD.BA.By.Title.R | #' Original DoD Comptroller zip file downloaded here:
#' http://comptroller.defense.gov/BudgetMaterials.aspx
#' To View as pd:
#' http://comptroller.defense.gov/Portals/45/Documents/defbudget/fy2017/FY17_Green_Book.pdf
#'
#' Table 6.8 DOD BA By Title
#'
# Libraries ---------------------------------------------------------------
library(tidyr)
library(dplyr)
library(readxl)
library(stringr)
library(readr)
# Import Data ------------------------------------------------------------
#Create Temporary Scaffolding
my.temporary.zipped.file <- tempfile()
my.temporarary.zipped.folder <- tempdir()
# Declare Source Data Origin
url <- "http://comptroller.defense.gov/Portals/45/Documents/defbudget/fy2017/FY_2017_Green_Book.zip"
spreadsheet.name <- "FY17 PB Green Book Chap 6/FY17 6-8_DoD BA by Title.xlsx"
#Download Source Data to Temp Location
download(url = url, dest = my.temporary.zipped.file)
unzip(my.temporary.zipped.file, exdir = my.temporarary.zipped.folder)
# Create Name of extracted file
filename <- sprintf('%s/%s', my.temporarary.zipped.folder, spreadsheet.name)
# Reshape -----------------------------------------------------------------
#excel_sheets(filename)
df.raw <- read_excel(filename, skip = 4)
# Flatten -----------------------------------------------------------------
# Shape Subset for Current Dollars, ignore rest
df <- df.raw[2:10, -2:-3]
# Flatten
df.flat <- gather(df, Fiscal.Year, Amount, -1)
# Fixing ------------------------------------------------------------------
# Dollars in millions
df.flat$Amount <- df.flat$Amount * 1e6
# Remove trailing dots (non-alphanumeric)
df.flat$`Public Law Title` <- str_trim(gsub("[0-9.]+", "", df.flat$`Public Law Title`))
# Remove 'FY' from Fiscal.Year column
df.flat <- separate(df.flat, Fiscal.Year, c('trash', 'FY'), convert = TRUE )
df.flat <- df.flat[,-2]
df.flat$Deflator.Type <- "Current.Dollars"
df.flat$Source <- "Table 6.8 DOD BA By Title"
# Export ------------------------------------------------------------------
# Filename
mylocation <- "../Data/Processed"
myfilename <- "tbl.6.8_DOD.BA.By.Title"
mydate <- paste('Updated', format(Sys.time(), format = "_%Y-%m-%d_%H%M") , sep = "")
my.file <- sprintf("%s/%s_%s.csv", mylocation, myfilename, mydate)
write_csv(df.flat, my.file)
|
26d1dd01fe050cd12137e09fd1b44e5d1bb2edf2 | 29585dff702209dd446c0ab52ceea046c58e384e | /DDIwR/R/getMetadata.R | e1a9b1f7304658eaa8ca5e7119be7351e39b94ef | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,625 | r | getMetadata.R | require(XML)
getMetadata <- function(xmlpath, OS = "windows", saveFile=FALSE, ...) {
# TODO: detect DDI version or ask the version through a dedicated argument
other.args <- list(...)
enter <- getEnter(OS=OS)
fromsetupfile <- FALSE
if ("fromsetupfile" %in% names(other.args)) {
fromsetupfile <- other.args$fromsetupfile
}
tp <- treatPath(xmlpath, type="XML")
currdir <- getwd()
# if (saveFile) {
setwd(tp$completePath)
# }
singlefile <- length(tp$files) == 1
if (!fromsetupfile) {
cat("Processing:\n")
}
for (ff in seq(length(tp$files))) {
if (!fromsetupfile) {
cat(tp$files[ff], "\n")
}
if (saveFile) {
sink(paste(tp$filenames[ff], "R", sep="."))
}
dd <- xmlTreeParse(tp$files[ff])$doc$children$codeBook
#### !!! ####
# NEVER use getNodeSet() it's toooooo slooooow!!!
# use instead xmlElementsByTagName()
dd <- xmlElementsByTagName(dd, "dataDscr")[[1]]
dd <- xmlElementsByTagName(dd, "var")
xmlVarNames <- as.vector(sapply(dd, xmlGetAttr, "name"))
# return(drop(xmlVarNames))
metadata <- list()
metadata$varlab <- list()
metadata$vallab <- list()
if (saveFile) {
cat("metadata <- list()", enter)
cat("metadata$varlab <- list()", enter)
cat("metadata$vallab <- list()", enter, enter)
}
for (i in seq(length(dd))) {
# metadata$varlab[[xmlVarNames[i]]] <- xmlValue(getNodeSet(dd[[i]], "//labl[@level='variable']")[[1]])
varlab <- xmlValue(xmlElementsByTagName(dd[[i]], "labl")[[1]])
varlab <- gsub("\"", "'", varlab)
varlab <- gsub("\\\\", "/", varlab)
metadata$varlab[[xmlVarNames[i]]] <- varlab
if (saveFile) {
cat(paste("metadata$varlab$", xmlVarNames[i], " <- \"", varlab, "\"", enter, sep=""))
}
#vallabs <- unlist(lapply(getNodeSet(dd[[i]], "//labl[@level='category']"), xmlValue))
vallabs <- xmlElementsByTagName(dd[[i]], "catgry")
if (length(vallabs) > 0) {
# metadata$vallab[[xmlVarNames[i]]] <- unlist(lapply(getNodeSet(dd[[i]], "//catValu"), xmlValue))
values <- as.vector(unlist(lapply(lapply(vallabs, xmlElementsByTagName, "catValu"), function(x) {
return(xmlValue(x[[1]][[1]]))
})))
values <- gsub("\"", "'", values)
values <- gsub("\\\\", "/", values)
labl <- as.vector(lapply(vallabs, xmlElementsByTagName, "labl"))
havelbls <- unlist(lapply(labl, function(x) length(x) > 0))
values <- values[havelbls]
labl <- labl[havelbls]
if (length(values) > 0) {
metadata$vallab[[xmlVarNames[i]]] <- values
testNum <- tryCatch(as.numeric(values),
warning = function(x) {
return("...string...!!!")
})
if (all(testNum != "...string...!!!")) {
metadata$vallab[[xmlVarNames[i]]] <- testNum
if (saveFile) {
cat(paste("metadata$vallab$", xmlVarNames[i], " <- c(",
paste(testNum, collapse=", "), ")", enter, sep=""))
}
justlbls <- as.vector(unlist(lapply(labl, function(x) {
return(xmlValue(x[[1]][[1]]))
})))
justlbls <- gsub("\"", "'", justlbls)
justlbls <- gsub("\\\\", "/", justlbls)
names(metadata$vallab[[xmlVarNames[i]]]) <- justlbls
if (saveFile) {
cat(paste("names(metadata$vallab$", xmlVarNames[i], ") <- c(\"",
paste(justlbls, collapse="\", \""), "\")", enter, sep=""))
}
}
else {
justlbls <- as.vector(unlist(lapply(lapply(vallabs, xmlElementsByTagName, "catValu"), function(x) {
return(xmlValue(x[[1]][[1]]))
})))
justlbls <- gsub("\"", "'", justlbls)
justlbls <- gsub("\\\\", "/", justlbls)
if (saveFile) {
cat(paste("metadata$vallab$", xmlVarNames[i], " <- c(\"",
paste(justlbls, collapse="\", \""), "\")", enter, sep=""))
}
}
}
}
cat(enter)
}
if (saveFile) {
sink()
}
}
setwd(currdir)
if (singlefile) {
return(invisible(metadata))
}
}
|
a36ed6e97eee4a954319660cd8b33e6ad2c330ca | ce68a85c4a6c5d474a6a574c612df3a8eb6685f7 | /src/book/R with application to financial quantitive analysis/CH-11/CH-11-02.R | 2704d04acba75f62da507bcece6133196d479e3b | [] | no_license | xenron/sandbox-da-r | c325b63114a1bf17d8849f076bfba22b6bdb34a3 | c217fdddc26ed523b3860e2000afc699afac55a2 | refs/heads/master | 2020-04-06T06:58:17.049181 | 2016-08-24T06:16:32 | 2016-08-24T06:16:32 | 60,466,314 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,577 | r | CH-11-02.R | ########################################################
# Description:
# 1.for Book 'R with applications to financial quantitive analysis'
# 2.Chapter: CH-11-02
# 3.Section: 11.2
# 4.Purpose: herd behavior through quantile regression
# 5.Author: Liu Xi, polished by Qifa Xu
# 6.Date: Apr 03, 2014.
# 7.Revised: Aug 31, 2014.
########################################################
# Contents:
# 1. read data from EXCEL file
# 2. set check function
# 3. calculate and show CSSD and CSAD
# 4. source HerdBehavior_MR.R for mean regression
# 5. save results
#############################################################
# 0. Initializing
# (1) set path
setwd('F:/programe/book/R with application to financial quantitive analysis/CH-11')
rm(list=ls())
# (2) load packages
library('RODBC') # for reading EXCEL file
library(KernSmooth) # for kernel smooth
library(quantreg) # for quantile regression
library(splines) # for spline functions
library(qrnn) # for quantile regression neural network
library(fGarch) # for GARCH model
library(caret) # for classification and regression training
library(fBasics) # for markets and basic statistics
source('Sub-11.R') # our own functions
# 1. load data from last example
load('HerdBeh.RData')
# 2. source HerdBehavior_QR.R for quantile regression
source("HerdBehavior_QR.R")
HerdBehavior_QR(Data, CS=CSSD, Result1, Result2)
HerdBehavior_QR(Data, CS=CSAD, Result1, Result2)
|
847015aade5d9f6dc68e9ef8aa616d34a681c4af | f80d13fa51bf10e3c4af2fd45423647600ad5117 | /man/stageRunner__run.Rd | 114d24263210bb42fc015c1b3cfb138e83b0479a | [
"MIT"
] | permissive | tonglu/stagerunner | f3e33930f6f8aa8fbc59fd89195403539bd758f8 | 18af11683334d6cb046ca63f1ba274b9c9adb52e | refs/heads/master | 2021-03-12T22:48:01.676590 | 2014-02-18T05:21:02 | 2014-02-18T05:21:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,526 | rd | stageRunner__run.Rd | \name{stageRunner__run}
\alias{stageRunner__run}
\title{Run the stages in a stageRunner object.}
\usage{
stageRunner__run(stage_key = NULL, normalized = FALSE)
}
\arguments{
\item{stage_key}{an indexing parameter. Many forms are
accepted, but the easiest is the name of the stage. For
example, if we have \code{stageRunner$new(context,
list(stage_one = some_fn, stage_two = some_other_fn))}
then using \code{run('stage_one')} will execute
\code{some_fn}. Additional indexing forms are logical
(which stages to execute), numeric (which stages to
execute by indices), negative (all but the given stages),
character (as above), and nested forms of these. The
latter refers to instances of the following:
\code{stageRunner$new(context, list(stage_one =
stageRunner$new(context, substage_one = some_fn,
substage_two = other_fn), stage_two = another_fn))}.
Here, the following all execute only substage_two:
\code{run(list(list(FALSE, TRUE), FALSE))},
\code{run(list(list(1, 2)))},
\code{run('stage_one/substage_two')},
\code{run('one/two')}, \code{run(list(list('one',
'two')))}, \code{run(list(list('one', 2)))} Notice that
regular expressions are allowed for characters. The
default is \code{NULL}, which runs the whole sequences of
stages.}
\item{normalized}{logical. A convenience recursion
performance helper. If \code{TRUE}, stageRunner will
assume the \code{stage_key} argument is a nested list of
logicals.}
}
\description{
Run the stages in a stageRunner object.
}
|
4af7f76afba0646be07ae9a56b25bb0f84075f95 | b37e06b480d4f6bebddd98c1eb28eeba191ad641 | /scripts/r/counts_lowplex.R | 3f6247458702955a1897bef5b58269b59a205158 | [] | no_license | hrk2109/barcoding | b33f0efa721e26437a9149990c592fe7a7300707 | 72a5cd20fe72dd1eb846de9c60b93a408f117f47 | refs/heads/master | 2020-03-27T03:53:29.481291 | 2018-08-07T09:40:19 | 2018-08-07T09:40:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,064 | r | counts_lowplex.R | library(ggplot2)
library(Biostrings)
library(dplyr)
library(tidyr)
library(viridis)
library(data.table)
plot.counts = function(counts, barcodePath, grouped = FALSE) {
ds = data.frame()
i = 0
for (l in counts) {
d = as.data.frame(fread(l,stringsAsFactors=FALSE))
d = cbind(d,l)
i = i + 1
ds = rbind(ds,d)
}
colnames(ds) = c("IdxFirst", "IdxCombined", "Counts", "Run")
barcodes = readDNAStringSet(barcodePath)
bc_names = names(barcodes)
ds$NameLeading = bc_names[ds$IdxFirst+1]
ds$NameTrailing = bc_names[ds$IdxCombined+1]
ds$BarcodePair = paste(bc_names[ds$IdxFirst+1],bc_names[ds$IdxCombined+1],sep="--")
g = ggplot(data=ds, aes(x=BarcodePair, y=Counts, fill=Run)) +facet_wrap(~BarcodePair,scales = "free_x")+
geom_bar(stat="identity", position=position_dodge(width=1))+
geom_text(aes(label=Counts,y=mean(range(Counts))), color="black",
position = position_dodge(1), size=3.5,angle = 90)+
scale_color_brewer(palette = "Set1")+
theme(legend.position="top", legend.direction = "vertical") +
coord_cartesian(ylim = c(0, max(ds$Counts)*1.1))
ggsave("counts_group.png",g,width=36,height=24,dpi=100,units="cm")
g = ggplot(data=ds, aes(x=BarcodePair, y=Counts, fill=Run)) +
geom_bar(stat="identity", position=position_dodge(width=1))+
geom_text(aes(label=Counts), vjust=.4, hjust=-.1, color="black",
position = position_dodge(0.9), size=3.5,angle = 90)+
scale_color_brewer(palette = "Set1")+
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, hjust = 0))+
theme(legend.position="top", legend.direction = "vertical") +
coord_cartesian(ylim = c(0, max(ds$Counts)*1.1))
ggsave("counts_nogroup.png",g,width=36,height=24,dpi=100,units="cm")
}
plot.counts(c("m54007_170701_183412.subreadset.demux.counts",
"m54007_170702_064558.subreadset.demux.counts",
"m54200_170625_190247.subreadset.demux.counts",
"m54200_170626_051342.subreadset.demux.counts"),
"Sequel_RSII_16_barcodes_v1.fasta")
|
d4350374a4af58bbbbf7642ab2ade0cace79439e | b53b817f7025ba2f479fb0e2fd56a49f3ff42c60 | /prcc.R | fe8d60c1d4e2b0ba034b69cbbafc13660defcc09 | [] | no_license | NPSDC/Cross_Cancer | 1ebc51d6dedae9822a2de91c6b8e979e9b0fed18 | 7cbffc262a6a9fa26d6996e727347280e32af565 | refs/heads/master | 2021-09-06T01:39:18.641322 | 2018-02-01T10:43:27 | 2018-02-01T10:43:27 | 118,959,710 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,819 | r | prcc.R | library(DESeq2)
library(pamr)
library(biomaRt)
mart=useMart("ensembl")
ensembl=useDataset("hsapiens_gene_ensembl",mart=mart)
load('~/Honours/Stage-Prediction-of-Cancer/papillary/environment/dds.RData')
load('~/Honours/Stage-Prediction-of-Cancer/ccrcc/environment/kirc_data.RData')
met.genes.df <- read.csv('metabolic_genes.csv')
met.genes <- as.character(met.genes.df$GENE.ID.1)
genes.entrez = getBM(attributes = c('ensembl_gene_id', 'entrezgene'), filters = 'ensembl_gene_id', values = g, mart = ensembl)
rownames(dds) <- remove.dots(rownames(dds))
length(intersect(rownames(prcc.data), met.genes)) == length(met.genes)
length(intersect(rownames(data), met.genes)) == length(met.genes)
met.df.prcc <- assay(dds)[met.genes,]
remove.dots <- function(ens.ids.all)
{
###ens.ids.all <- gets the ids returned from get.genes.files
##The ens ids contain symbols after dots making them as invalid ensembl ids for using for enrichment
##analysis, so stripping the same ids removing the unwanted things after dot
g = sapply(ens.ids.all, function(x)
{
unlist(strsplit(x, split = '.', fixed = T))[1]
}) ##removing the symbols after .
return(g)
}
prcc.matched.data <- prcc.data[,match(colnames(dds), colnames(prcc.data))]
prcc.matched.data <- prcc.matched.data[-which(rowSums(assay(prcc.matched.data)) < 10),]
prcc.matched.data.met <- prcc.matched.data[intersect(met.genes, rownames(prcc.matched.data)),]
dds.obj <- DESeqDataSetFromMatrix(assay(prcc.matched.data.met),
colData = colData(prcc.matched.data.met), design = ~shortLetterCode)
dds.obj.ent <- DESeqDataSetFromMatrix(assay(prcc.matched.data),
colData = colData(prcc.matched.data), design = ~shortLetterCode)
dds.obj <- DESeq(dds.obj, parallel = T)
dds.obj.ent <- DESeq(dds.obj.ent, parallel = T)
res <- results(dds.obj, contrast = c('shortLetterCode', 'TP', 'NT'), parallel = T)
res.ent <- results(dds.obj.ent, contrast = c('shortLetterCode', 'TP', 'NT'), parallel = T)
summary(res)
summary(res.ent)
g <- get.genes(res.prcc[[1]], 2, 0.05, 0.05)
g.ent <- intersect(get.genes(res.prcc[[2]], 2, 0.05, 0.05), met.genes)
prcc.pat <- get.matched.ind(colData(prcc.data))
ccrcc.pat <- get.matched.ind(colData(data))
res.prcc <- get.deseq2.proc(prcc.data, prcc.pat, met.genes)
res.ccrcc <- get.deseq2.proc(data, ccrcc.pat, met.genes)
res.sam.prcc <- get.sam.res(prcc.data, prcc.pat, met.genes)
res.sam.ccrcc <- get.sam.res(data, ccrcc.pat, met.genes)
g.sam.prcc <- sapply(get.sam.genes(res.sam.prcc, list(2,3,4,5)), function(g) intersect(g, met.genes))
g.sam.ccrcc <- sapply(get.sam.genes(res.sam.ccrcc, list(2,3,4,5)), function(g) intersect(g, met.genes))
g.sam.chcc <- sapply(get.sam.genes(res.sam.ch, list(2,3,4,5)), function(g) intersect(g, met.genes))
g.deseq.prcc <- lapply(c(2,3,4,5), function(x){intersect(get.deseq2.genes(res.prcc[[2]], x, 0.05, 0.05), met.genes)})
names(g.deseq.prcc) <- c('2 fold', '3 fold', '4 fold', '5 fold')
g.deseq.ccrcc <- lapply(c(2,3,4,5), function(x){intersect(get.deseq2.genes(res.ccrcc[[2]], x, 0.05, 0.05), met.genes)})
names(g.deseq.ccrcc) <- c('2 fold', '3 fold', '4 fold', '5 fold')
g.deseq.chr <- lapply(c(2,3,4,5), function(x){intersect(get.deseq2.genes(res.deseq.ch[[2]], x, 0.05, 0.05), met.genes)})
names(g.deseq.chr) <- c('2 fold', '3 fold', '4 fold', '5 fold')
g.deseq.prcc.met <- lapply(c(2,3,4,5), function(x){get.deseq2.genes(res.prcc[[1]], x, 0.05, 0.05)})
names(g.deseq.prcc.met) <- c('2 fold', '3 fold', '4 fold', '5 fold')
library(pheatmap)
ann.col.df <- data.frame(type=colData(prcc.data)$shortLetterCode[unlist(prcc.pat)],
row.names = colnames(prcc.data)[unlist(prcc.pat)])
pheatmap(assay(prcc.data)[g[1:10], unlist(prcc.pat)], cluster_rows = F,
cluster_cols = T, annotation_col = ann.col.df, show_colnames = F )
|
345c1f3c9dd6d715210344b54ff78ba8a6d90639 | d1c93472455db0f0c6c5a9353888b75c5fc209dd | /Apostila 01 - Capitulo 06.R | 559cd0427987eca51a6117976cb718c946b23371 | [] | no_license | mariruggeri/IGTI_Bootcamp | 2b7a9356962cfae9d8332baf18ebcca0d8aa150e | 7896d488cb0d900899c560c52700fb8e612735f8 | refs/heads/master | 2023-05-14T14:42:40.092479 | 2021-06-02T21:03:33 | 2021-06-02T21:03:33 | 371,125,746 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,423 | r | Apostila 01 - Capitulo 06.R | ###########################################################
####### Regressao Logística #######
## AED - Capitulo 06 - Prof. Máiron Chaves ####
###########################################################
rm(list = ls()) #Limpa memória do R
#install.packages('pROC')
#Instala e carrega biblioteca para gerar a curva ROC
library(pROC)
library(dplyr)
dados <- readr::read_csv("data/df_04_a_06.csv", na = c("", "-", "NA"))
# Converte variavel resposta para factor
dados$Classe <- factor(dados$Classe, levels = c('Ruim','Boa'))
# Pequena analisa exploratoria
dados %>% group_by(Classe) %>%
summarise_all("mean")
# Ajusta regressao logistica
# Comando glm
fit <- glm(Classe ~ Prova_Logica +
Redacao +
Auto_Avaliacao,
data = dados,
family = binomial)
# Visualiza resumo do modelo ajustado
summary(fit)
# Aplica exponenciacao nos coeficientes para interpretar
exp(fit$coefficients)
# Curva ROC
prob = predict(fit,
newdata = dados,
type = "response")
prob
View(data.frame(dados,prob))
roc = roc(dados$Classe ~ prob,
plot = TRUE,
print.auc = TRUE)
# Obtem a predicao/probabilidade para cada observacao
Probabilidade <- predict(fit,
newdata= dados,
type = 'response')
# Se a probabilidade for maior que 50% classifica como 'Boa'
Classe_Predita <- ifelse(Probabilidade > 0.5,"Boa","Ruim")
#Visualiza data frame com as predicoes
View(data.frame(dados,Probabilidade,Classe_Predita))
# Gera matriz de confusao
matriz_confusao <- table(Classe_Predita = Classe_Predita,
Classe_Original = relevel(dados$Classe,ref = 'Boa'))
# Armazena valores da matriz de confusao
verdadeiro_positivo <- matriz_confusao[1,1];verdadeiro_positivo
verdadeiro_negativo <- matriz_confusao[2,2];verdadeiro_negativo
falso_negativo <- matriz_confusao[2,1];falso_negativo
falso_positivo <- matriz_confusao[1,2];falso_positivo
# Calcula acuracia
# diag = diagonal
acuracia <- sum(diag(matriz_confusao))/ sum(matriz_confusao);acuracia
# Calcula Sensitividade
sensitividade <- verdadeiro_positivo /(verdadeiro_positivo + falso_negativo);sensitividade
#Cacula Especificidade
especificidade <- verdadeiro_negativo / (verdadeiro_negativo + falso_positivo);especificidade
# Analise de Sensitividade e Especificidade
# Organizar as probabilidades criadas na linha 56
limiares <- sort(Probabilidade)
acuracia <- c()
sensitividade <- c()
especificidade <- c()
for ( i in 1:length(limiares)) {
limiar_atual <- limiares[i]
Classe_Predita <- ifelse(Probabilidade > limiar_atual,'Boa' , 'Ruim')
# Gera matriz de confusao
confusao <- table(Classe_Predita = Classe_Predita,
Classe_Original = relevel(dados$Classe,ref = 'Boa'))
vp <- confusao[1,1];
fn <- confusao[2,1];
vn <- confusao[2,2];
fp <- confusao[1,2];
acuracia[i] <- sum(diag(confusao))/ sum(confusao); #Calcula acuracia
sensitividade[i] <- vp /(vp+fn) #Calcula Sensitividade
especificidade[i] <- vn / (vn + fp) #Calcula Especificidade
}
plot(y = sensitividade[1:698] ,
x = limiares[1:698],
type="l",
col="red",
ylab = 'Sensitividade e Especificidade',
xlab= 'Pontos de Corte')
grid()
lines(y = especificidade[1:698],
x = limiares[1:698],
type = 'l',col="blue" )
legend("bottomleft",
c("sensibilidade","especificidade"),
col=c("red","blue"),
lty=c(1,1),
bty="n",
cex=1,
lwd=1)
abline(v=0.225)
# Obtem novamente as probabilidades para classificar baseado no ponto de corte 22,5%
Probabilidade <- predict(fit, newdata= dados,type = 'response')
Classe_Predita <- ifelse(Probabilidade > 0.225,"Boa","Ruim")
View(data.frame(dados,Probabilidade,Classe_Predita))
# Visualiza matriz de confusao final
confusao <- table(Classe_Predita = Classe_Predita,
Classe_Original = relevel(dados$Classe,ref = 'Boa'))
# Armazena valores da matriz de confusao
vp <- confusao[1,1];vp
fn <- confusao[2,1];fn
vn <- confusao[2,2];vn
fp <- confusao[1,2];fp
# Calcula acuracia
acuracia <- sum(diag(confusao))/ sum(confusao);acuracia
# Calcula Sensitividade
sensitividade <- vp /(vp+fn)
# Cacula Especificidade
especificidade <- vn / (vn + fp)
# Biblioteca caret fornece acuracidade, sensitividade, especificidade |
5fb86d006c94f76c47f8b2fbae8d090c6fa10c19 | 5ff77f4b5eecf33f4fda9028e0875d9048d7684b | /R/eurusds.R | 7a66d63bc3ae1e556b0cc7a94e9bffda2737a38d | [] | no_license | szigony/xchangeR | 4ebb42260efe1ca009bca61b269fd72ac6629602 | b3003accdbbc448a42d0a92fdcf9528872a13037 | refs/heads/master | 2020-05-27T06:36:28.723627 | 2019-06-06T05:29:43 | 2019-06-06T05:29:43 | 188,524,286 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,646 | r | eurusds.R | #' USD/EUR exchange rates
#'
#' Looks up the daily USD/EUR exchange rate via an API call for the specified date range.
#'
#' @export
#'
#' @importFrom httr GET content
#' @import dplyr
#' @importFrom data.table data.table as.data.table
#'
#' @param date_from The start of the range.
#' @param date_to The end of the range.
#' @param last_x_days Return the exchange rates for the last X days compared to \code{date_to}.
#'
#' @return Returns the daily exchange rates.
#'
#' @format Returns a \code{data.table}.
#' \itemize{
#' \item \code{date}: The day for which the exchange rate was valid.
#' \item \code{exchange_rate}: Daily exchange rate.
#' }
#'
#' @examples
#' # Specific date range
#' eurusds("2019-05-01", "2019-05-24")
#'
#' # Last 45 days from a specific date
#' eurusds(date_to = "2019-05-01", last_x_days = 45)
#'
#' # Last 45 days from today
#' eurusds(last_x_days = 45)
#'
#' @seealso \code{\link{eurusd}}
eurusds <- function(date_from, date_to, last_x_days = NULL) {
if (is.null(last_x_days)) {
date_from <- date_from
} else {
if (missing(date_to)) {
date_to <- format(Sys.Date(), "%Y-%m-%d")
}
date_from <- format(as.Date(date_to, "%Y-%m-%d") - last_x_days, "%Y-%m-%d")
}
exchange_rates <- content(
GET(
"https://api.exchangeratesapi.io/history",
query = list(
base = "USD",
symbols = "EUR",
start_at = date_from,
end_at = date_to
)
)
)$rates
eurusds <- data.table(
date = as.Date(names(exchange_rates)),
exchange_rate = as.numeric(unlist(exchange_rates))
) %>%
arrange(date)
return(as.data.table(eurusds))
}
|
6b2ef79a57ddc2c4553190dce8669aba8a7ccc03 | 884e1fa9b78a77b81c1bc963d839667ac276fab8 | /Exploratory_Data_Analysis/week_4_assignment/plot2.R | 32ec34c8532a89344621164a028c89dbe761e263 | [] | no_license | guy6400/Data-Science-Specialization-John-Hopkins-Coursera | d97ec07c603aeb38dcd7ccc959139d8b4524e9e9 | 53002aece812c225b9803d728445a1266cea2fe4 | refs/heads/master | 2022-12-03T05:04:11.198251 | 2020-08-03T13:02:57 | 2020-08-03T13:02:57 | 283,740,970 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,156 | r | plot2.R |
#####libraries
library(dplyr)
library(ggthemes)
library(ggplot2)
#####download files####
setwd("G:\\Scripting\\R\\John Hopkins Data Science\\Exploratory Data Analysis\\week 4 assignment")
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
path<- getwd()
download.file(url,file.path(path, "dataFiles.zip"))
unzip(zipfile ="dataFiles.zip")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
####question 2 :Have total emissions from PM2.5 decreased in the Baltimore City, Maryland from 1999 to 2008?
#Use the base plotting system to make a plot answering this question.
###create separate df for Baltimore City, Maryland
Balt_df <-filter(NEI,fips == "24510" )
# create df grouped by year
df_agg <- Balt_df %>%
group_by(as.factor(year))
#summerize according to the sum of emmisions
df_summary <- summarize(df_agg,emissions_per_year =sum(Emissions))
png("plot2.png")
barplot(df_summary$emissions_per_year,main = "Baltimore City, Maryland, Overall Emissions over Time",ylab = "Total Emissions"
,names = df_summary$`as.factor(year)`
,col = "red")
dev.off() |
68735bec8371547e0a07370de4d3d40874159664 | 3fc5b02bdc700c9b1d4fee77d41f50c28eb79cfa | /munge/02-B_split_training_in_column_types.R | 5265b855bfdf26a4e5e147226526ca6346fe6bdb | [] | no_license | Ruud-Janssen/hp2 | 34de5021c81ebc3ca807b41729b5bd307602c82c | 938eea7c64bfc5f8437e87d50696a669824a9108 | refs/heads/master | 2021-09-09T09:28:42.554222 | 2018-03-14T20:17:17 | 2018-03-14T20:17:17 | 119,682,696 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,864 | r | 02-B_split_training_in_column_types.R | namesTrain <- names(House.Prices.Kaggle.preprocessed)
#indicator columns
#train_ind <- House.Prices.Kaggle.preprocessed[, sapply(House.Prices.Kaggle.preprocessed, function(vec) length(unique(vec)) == 2 & ifelse(class(vec) == "integer", sum(unique(vec)), 0) == 1)]
namesTrain_ind <- colnames(House.Prices.Kaggle.preprocessed[, sapply(House.Prices.Kaggle.preprocessed, function(vec) length(unique(vec)) == 2 & ifelse(class(vec) == "integer", sum(unique(vec)), 0) == 1)])
namesTrain_int <- namesTrain[sapply(House.Prices.Kaggle.preprocessed, is.integer)]
namesTrain_fac <- namesTrain[endsWith(namesTrain, ".n")]
namesTrain_num <- namesTrain[sapply(House.Prices.Kaggle.preprocessed, is.numeric)]
namesTrain_char <- namesTrain[sapply(House.Prices.Kaggle.preprocessed, is.character)]
#not waterproof year...
#train_year <- House.Prices.Kaggle.preprocessed[, sapply(House.Prices.Kaggle.preprocessed, function(vec) ifelse(class(vec) == "integer", between(mean(unique(vec)), 1900, 2100), F) == T)]
namesTrain_date <- colnames(House.Prices.Kaggle.preprocessed[, sapply(House.Prices.Kaggle.preprocessed, function(vec) ifelse(class(vec) == "integer", between(mean(unique(vec)), 1900, 2100), F) == T)])
namesTrain_date <- append(namesTrain_date, c("YrMoSold", "MoSold")) #"YrMoSoldCount"
namesTrain_id <- c("Id")
#now remove some overlap
namesTrain_num <- setdiff(namesTrain_num, namesTrain_id)
namesTrain_int <- setdiff(namesTrain_int, namesTrain_id)
namesTrain_num <- setdiff(namesTrain_num, namesTrain_date)
namesTrain_int <- setdiff(namesTrain_int, namesTrain_date)
namesTrain_int <- setdiff(namesTrain_int, namesTrain_fac)
namesTrain_num <- setdiff(namesTrain_num, namesTrain_fac)
namesTrain_int <- setdiff(namesTrain_int, namesTrain_ind)
namesTrain_num <- setdiff(namesTrain_num, namesTrain_int)
namesTrain_num <- setdiff(namesTrain_num, namesTrain_ind)
|
979909ab1170e4bc54ef8e4c6b3953a70943d9d4 | 09257aaa16f79444f0f56abf1e94f0aade6b69bd | /man/merge.Rd | 1d4615508d4d4612f8c4ee0c72fd3d67d39957c2 | [] | no_license | aaronpeikert/semtree | 1023f61be1e63258cc3d45388c5c69861f54e176 | bd92e8522a62026b9ac5050c0b8f5a7ff58e9cb7 | refs/heads/master | 2020-06-12T09:07:51.598586 | 2019-06-25T09:41:24 | 2019-06-25T09:41:24 | 194,253,753 | 1 | 0 | null | 2019-06-28T10:17:18 | 2019-06-28T10:17:18 | null | UTF-8 | R | false | false | 672 | rd | merge.Rd | \name{merge}
\alias{merge.semforest}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Merge two SEM forests
}
\description{
This overrides generic base::merge() to merge two forests into one.
}
\usage{
\method{merge}{semforest}(x, y, ...)
}
\arguments{
\item{x}{A SEM Forest}
\item{y}{A second SEM Forest}
\item{\ldots}{Extra arguments. Currently unused.}
}
\references{
Brandmaier, A.M., Oertzen, T. v., McArdle, J.J., & Lindenberger, U. (2013). Structural equation model trees. \emph{Psychological Methods}, 18(1), 71-86.
}
\author{
Andreas M. Brandmaier, John J. Prindle
}
\seealso{
\code{\link{semtree}}
} |
8c805ed2758c528b0b1db952a57ee52343381bad | 81688ef29b9e2780f94c60cb423e2cc2a365a49c | /src/helpers/date_helper.R | b448c2313fe055e32f51f0160195a354d889dd5d | [] | no_license | misharigot/socialcharging | 66eee84f7ee36ba903bdbc78e94eaa138ee42031 | 8411797650d976519f1429058d5cd097d08a2ef3 | refs/heads/master | 2021-03-19T18:48:32.097759 | 2018-03-19T22:23:37 | 2018-03-19T22:23:37 | 105,770,840 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,582 | r | date_helper.R | # This class has several helper functions regarding date, containing custom functions that lubridate doesn't provide.
library(lubridate)
# Get weeknumber from date starting on monday
# Created a custom function, because lubridate's week function starts the week on sunday
getWeekNumber <- function(date) {
strftime(as.Date(date), format = "%V")
}
# Gets weekday from the date starting on monday
# Created a custom function, because lubridate's day function starts the counting on sunday
getDay <- function(date) {
strftime(as.Date(date), format = "%u")
}
# Return a date string of the next weekday specified:
# Example: wday = 1 (Monday)
# Returns the date of the next sunday based on current time
nextWeekday <- function(wday) {
if (wday > 0 & wday < 8) {
today <- date(now())
nextWeekDay <- today + 7
ceiling_date(nextWeekDay, unit = "day") + wday - wday(today)
} else {
warning("Please give a number between 1 (monday) and 7 (sunday)")
}
}
# Returns the start date time of a session, based on the starting hour and the day (1-7).
toNextWeekStartDate <- function(startingHour, day) {
date <- nextWeekday(day)
if (startingHour != 0) {
hms <- hms(paste0(startingHour, ":00:00"))
} else {
hms <- "00:00:00"
}
dateString <- paste(date, hms, sep = "-")
ymd_hms(dateString)
}
# Returns the end date time of a session, based on the starting hour, the day (1-7) and the time elapsed (hour decimal).
toNextWeekEndDate <- function(startingHour, day, elapsed) {
startDate <- toNextWeekStartDate(startingHour, day)
endDate <- startDate + hours(floor(elapsed)) + minutes(getMinutes(elapsed))
}
# Returns the minutes of a decimal hour (hours = 1.1 returns 6 minutes)
getMinutes <- function(hours) {
if (isDecimal(hours)) {
minutesFraction <- hours - floor(hours)
result <- minutesFraction * 60
return(floor(result))
} else {
return(0)
}
}
# Check if a number is a decimal number
isDecimal <- function(number) {
number %% 1 != 0
}
# Strips date and returns time
stripDate <- function(datetime, dateTimeFormat){
x <- strptime(datetime, format = dateTimeFormat)
return(format(x, "%H:%M:%S"))
}
toHourAndMinutes <- function(decimal){
hourAndMinutes <- paste(floor(decimal), round((decimal - floor(decimal)) * 60), sep=":")
return(paste0(hourAndMinutes, ":00"))
}
# Returns the number of the day of the month
getWeekOfMonth <- function(start_date){
weekOfMonth <- ceiling(day(start_date) / 7)
return (weekOfMonth)
}
|
62fe21e1d4b3529f7ea92c7dad74ae28d94efb1e | 44bc24cd738a47bcc2fa0f1272ea231ebdbdb98a | /ui.R | c6fc3f211e3aaa2bbebead087560a8c7014c94c3 | [] | no_license | SFWongBigData/Developing_Data_Products | abf3a1db089a257d8feb083a78bb36aeb6a87e9f | 4febf8c9451dad30ccd2527425395149c0676b3f | refs/heads/master | 2021-01-10T15:32:42.375174 | 2016-01-30T12:32:25 | 2016-01-30T12:32:25 | 50,723,928 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,172 | r | ui.R | library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Predict Ozone"),
sidebarPanel(
h3("Choose one variable for plot"),
helpText("Note: select the factor you want to check,",
"the factor will be shown in the form of histagram."),
radioButtons("plot","Histogram",
c("Ozone"="Ozone",
"Solar radiation"="Solar.R",
"Wind"="Wind",
"Temperature"="Temp"
)),
hr(),
h3("Predict the Ozone"),
helpText("Note: select the factor you want to include in the linear",
"regression model, the results of modeling are shown in the right."),
checkboxGroupInput("x","Independent variable",
c("Solar radiation"="Solar.R",
"Wind"="Wind",
"Temperature"="Temp"
),
selected=c("Solar.R","Wind","Temp")
)
),
mainPanel(
plotOutput("histplot"),
verbatimTextOutput("prediction")
)
)) |
93c19313846895696a9250a2d5b2e28af3a519c6 | b5fd00ca0de81c9f532f3aaf2ebfba48dd141e8f | /scripts/tema1/08-remove-duplicates.R | 5bcd51dd549cea893a17e11597c9d3da2f90be5d | [] | no_license | ledvir26/Machine-Learning-with-R | 0e0da68d33f1709bc0d49347c24fdd3986ff01be | fb69d70e76db6be2c4383852806b470dfba9f31a | refs/heads/master | 2023-07-09T21:29:39.283830 | 2021-08-25T01:46:58 | 2021-08-25T01:46:58 | 385,417,827 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 317 | r | 08-remove-duplicates.R | family_salary = c(40000, 60000, 50000, 80000, 60000, 70000, 60000)
family_size = c(4,3,2,2,3,4,3)
family_car = c("Lujo", "Compacto", "Utilitario", "Lujo",
"Compacto", "Compacto", "Compacto")
family = data.frame(family_salary, family_size, family_car)
family_unique = unique(family)
duplicated(family) |
c2551bebcd35b17f6b2e36de5500d5b561f7efda | df1a5a132abe3ecb3c023514114e8c9b1646dc8d | /man/begin.Rd | 13f4909f9282a8deb3f230d564c922475d07f8d5 | [] | no_license | ari-harmonic/RecordLinkage | 3c22055ce4b0bfa464cb57ba9957ca0835dbebce | fcde6f83eb97b438b56f81979014c20c5067781e | refs/heads/master | 2020-04-25T06:29:40.578522 | 2019-02-27T21:38:54 | 2019-02-27T21:38:54 | 172,583,121 | 0 | 0 | null | 2019-02-27T21:38:54 | 2019-02-25T20:54:10 | C | UTF-8 | R | false | true | 354 | rd | begin.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internals.r
\name{begin}
\alias{begin}
\title{Begin generation of data pairs}
\usage{
begin(x, ...)
}
\description{
An SQL statement representing the generation of data pairs, including
the configuration of blocking fields, phonetics etc. is constructed and
send to SQLite.
}
|
3021b3879706cca066661933f6853c976cca68f1 | f7d5fcc27ed9cddf4b9f6d42c86f6e1e203a62bf | /utils.R | 34bf14290cb05ebf5e330c6213a58b7509910532 | [] | no_license | 1965aafc/tutorial-spatial-modelling-population-welfare-SCL-SPH | bb4dde6ee396fe7a8f567d492e3ab1a2d93be178 | 71c483267f0b0559ea46e43074e6ef15a0c02ef5 | refs/heads/master | 2023-05-10T23:41:49.855094 | 2021-06-03T00:20:33 | 2021-06-03T00:20:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,626 | r | utils.R | #!/usr/bin/env Rscript
# #####################
# ##### Load
# #####################
options(scipen=10000)
root_dir="~/"
project_dir="data/"
dir_data=paste0(root_dir,project_dir)
#####################
##### Install
#####################
##### Cran
pk <- c("devtools", "stringr", "dplyr",
"car", "plotly", "leaflet", "plotly",
"RJSONIO", "kableExtra", "furrr", "leaflet.extras",
"qgraph", "tictoc", "energy",
"parallel", "WGCNA",
# "Pigengene",
"plm", "MatrixModels",
"Hmisc","gpclib", "rgeos","rgdal", "velox"
) #,
install <- pk[!(pk %in% installed.packages()[,'Package'])]
if(length(install)) install.packages(install)
res <- lapply(pk, require, character.only = TRUE)
if(Reduce(res, f = sum)/length(pk) < 1) stop('Some packages could not be loaded.')
install_github("hunzikp/velox")
#####################
##### Source Install before loading
##### TODO(rsanchezavalos) install in dockerfile
####################
##### Source & dev
# GIT - devtools
# --------
#install.packages("de vtools")
#install.packages("plm") # https://cran.r-project.org/web/packages/plm/vignettes/plmPackage.html
#####################################################
# INLA
# --------
#TODO(rsanchezavalos) # freeze INLA version
# install.packages("INLA", repos=c(getOption("repos"),
# INLA="https://inla.r-inla-download.org/R/stable"),
# dep=TRUE)
# R > 3.5.0
#INLA:::inla.dynload.workaround()
#This function is replaced by: inla.binary.install() in new R - use ->
#* Install file [https://inla.r-inla-download.org/Linux-builds/./CentOS Linux-6 (Core)/Version_21.02.23/64bit.tgz]
# inla.binary.install()
#library("INLA")
#####################################################
# Install BiocManager - biocLite
# --------
# R < 3.5.0
#source("https://bioconductor.org/biocLite.R")
# R > 3.5.0
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# update.packages()
# source("https://bioconductor.org/biocLite.R")
# biocLite()
# Bioconductor version 3.8 (BiocManager 1.30.4), R 3.5.1 (2018-07-02)
# BiocManager::install(c("biocLite"))
#####################################################
# R > 4.0.0
# install.packages("INLA",repos=c(getOption("repos"),INLA="https://inla.r-inla-download.org/R/stable"), dep=TRUE)
#
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# BiocManager::install(c("graph", "Rgraphviz"), dep=TRUE)
# BiocManager::install(c("Pigengene"))
library("graph")
library("Rgraphviz")
library("Pigengene")
|
a141cbb76892157fbaf9271ff76fbc52153c638c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/FatTailsR/examples/estimkiener11.Rd.R | 6674768378f2b2f4442e17539ea7675c672629ec | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 769 | r | estimkiener11.Rd.R | library(FatTailsR)
### Name: estimkiener11
### Title: Estimation Functions with 5, 7 or 11 Quantiles
### Aliases: estimkiener11 estimkiener7 estimkiener5
### ** Examples
require(timeSeries)
## Choose j in 1:16. Choose ord in 1:12 (7 is default)
j <- 5
ord <- 5
DS <- getDSdata()
p11 <- elevenprobs(DS[[j]])
x11 <- quantile(DS[[j]], probs = p11, na.rm = TRUE, names = TRUE, type = 6)
round(estimkiener11(x11, p11, ord), 3)
## Compare the results obtained with the 12 different values of ord on stock j
compare <- function(ord, x11, p11) {estimkiener11(x11, p11, ord)}
coefk <- t(sapply(1:12, compare, x11, p11))
rownames(coefk) <- 1:12
mcoefk <- apply(coefk, 2, mean) # the mean of the 12 results above
roundcoefk(rbind(coefk, mcoefk), 13)
|
256d35a3e20d533a5a24e33b4b3204ee317bcbc3 | ae0f7c49555c63f93389c0c559f2bb193d1d1a38 | /run_analysis.R | 8903cdcf2d4347d758a0f5c030dae5cd7c8d055e | [] | no_license | luj1985/getdata-008 | b598593ed5bbcdf6bbb4162d37b2ca88e0abe471 | f68e8123dddc34381e8fb62767782db00057d48b | refs/heads/master | 2016-09-08T02:40:03.818313 | 2014-10-26T08:08:10 | 2014-10-26T08:08:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,587 | r | run_analysis.R | library(dplyr)
readData <- function(type) {
subjects <- read.table(paste("./data/", type, "/subject_", type, ".txt", sep = ""))
measures <- read.table(paste("./data/", type, "/X_", type, ".txt", sep = ""))
labels <- read.table(paste("./data/", type, "/Y_", type, ".txt", sep = ""))
features <- read.table("./data/features.txt")
names(subjects) <- c("subject")
names(labels) <- c("activity")
names(measures) <- features[,2]
cbind(subjects, labels, measures)
}
run_analysis <- function() {
# 1. Merges the training and the test sets to create one data set.
train <- readData("train")
test <- readData("test")
raw <- rbind(train, test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
n1 <- names(raw)
meanAndStd <- n1[grep("(mean|std)\\(\\)$", n1)]
data <- raw[, c("subject", "activity", unlist(meanAndStd))]
# 3. Uses descriptive activity names to name the activities in the data set
labels <- read.table("./data/activity_labels.txt")
data$activity = factor(data$activity, levels=labels[,1], labels=labels[,2])
data$subject = as.factor(data$subject)
# 4. Appropriately labels the data set with descriptive variable names.
n2 <- names(data)
n2 <- gsub("\\(\\)", "", n2)
n2 <- gsub("-", ".", n2)
names(data) <- n2
# 5. From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
tidy_df <- tbl_df(data)
tidy_df %>%
group_by(activity, subject) %>%
summarise_each(funs(mean))
} |
df52f1b66016db84a89d0aca6f88e6ab1b9e2b86 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/deldir/examples/plot.deldir.Rd.R | 28dad7e6decf2fbe732b06b9f745d81265de11f1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 600 | r | plot.deldir.Rd.R | library(deldir)
### Name: plot.deldir
### Title: Plot objects produced by deldir
### Aliases: plot.deldir
### Keywords: hplot
### ** Examples
## Not run:
##D try <- deldir(x,y,list(ndx=2,ndy=2),c(0,10,0,10))
##D plot(try)
##D #
##D deldir(x,y,list(ndx=4,ndy=4),plot=TRUE,add=TRUE,wl='te',
##D col=c(1,1,2,3,4),num=TRUE)
##D # Plots the tesselation, but does not save the results.
##D try <- deldir(x,y,list(ndx=2,ndy=2),c(0,10,0,10),plot=TRUE,wl='tr',
##D wp='n')
##D # Plots the triangulation, but not the points, and saves the
##D # returned structure.
## End(Not run)
|
90ef249de1f0d63373b8973d9441755c636d535c | b7bf62deb228ee72d8ff96205a230575cbdfb51c | /man/amean_byelt_jack.Rd | 8b37012edd38b628ace6bdcb6d17238645a4994a | [] | no_license | BenitoJaillard/combinAna | f48c855ea4b8b2dd22eb58652fa6b87b49c56a4a | 114df1efc319db55c72f3b9bb23095a4345dabbb | refs/heads/master | 2020-04-24T03:05:11.212153 | 2019-03-26T09:57:19 | 2019-03-26T09:57:19 | 171,659,371 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,525 | rd | amean_byelt_jack.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predicting.int.R
\name{amean_byelt_jack}
\alias{amean_byelt_jack}
\title{Arithmetic mean (amean) by motif (bymot) by jackknife (jack)
over several experiments (xpr)}
\usage{
amean_byelt_jack(fctMot, mOccurMot, jack)
}
\arguments{
\item{fctMot}{a vector of numeric values of elements belonging to
a same motif.}
\item{mOccurMot}{a matrix of occurrence (occurrence of elements).
Its first dimension equals to \code{length(fctMot)}. Its second
dimension equals to the number of elements.}
\item{jack}{a vector of two elements. The first one \code{jack[1]}
specifies the size of subset, the second one \code{jack[2]} specifies
the number of subsets.}
}
\value{
Return a vector of \code{length(fctMot)}, of which values are
computed as the arithmetic mean of all vector elements.
}
\description{
Take a numeric vector and return the predicted vector
computed as the arithmetic mean of all elements belonging to the same motif.
}
\details{
Prediction is computed using arithmetic mean \code{amean}
by motif \code{bymot} in a whole (WITHOUT taking into account species
contribution). The elements belonging to a same motif are divided
into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
is computed by excluding \code{jack[1]} elements, of which the element
to predict. If the total number of elements belonging to the motif
is lower than \code{jack[1]*jack[2]}, prediction is computed by
Leave-One-Out (LOO).
}
\keyword{internal}
|
4b68d748a0105e635c3bcf53932de4c3adce4934 | 8a483632aada1fea716ed7ddab9ef42b113c413e | /code/scenarios/80_10/evaluations_80_10.R | 1d1823f20eda8c1e713d6639f8da02effba3315f | [] | no_license | ben-williams/parallel_diverge | ea54ca6caee59d321412e088ae57f920850d4464 | 9a0fd91a8e2418bbb0b1f0e7f37ca9b8c66acd7c | refs/heads/master | 2020-07-06T12:56:27.404297 | 2018-08-06T18:44:12 | 2018-08-06T18:44:12 | 66,984,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,282 | r | evaluations_80_10.R | source('code/functions/helper.R')
source('code/functions/cleanup.R')
cq_fed <- read_csv('output/80_10/community_quota_fed_only_80_10.csv') %>%
filter(sim<41)
cq_oa <- read_csv('output/80_10/community_quota_open_access_80_10.csv') %>%
filter(sim<41)
sq <- read.csv("output/80_10/status_quo_80_10.csv")
state_ecs <- read.csv("output/80_10/state_equal_catch_share_80_10.csv") %>%
filter(sim<41)
state_llp <- read.csv("output/80_10/state_llp_small_vessel_80_10.csv") %>%
filter(sim<41)
state_superx <- read.csv("output/80_10/state_super_exclusive_80_10.csv") %>%
filter(sim<41)
psc <- read.csv("output/80_10/fed_psc_80_10.csv") %>%
filter(sim<41)
coop <- read.csv("output/80_10/fed_coop_80_10.csv") %>%
filter(sim<41)
coop.oa <- read.csv("output/80_10/all_coop_80_10.csv") %>%
filter(sim<41)
ifq_fed <- read.csv("output/80_10/fed_ifq_80_10.csv") %>%
filter(sim<41)
EXV <- 0.15
TEXV <- round(EXV * 2204.62)
FUEL <- 0.70
f.rev(sq, EXV, FUEL)%>%
mutate(group='SQ') -> asq
state_ecs = f.rev(state_ecs, EXV, FUEL)
cq_fed = f.rev(cq_fed, EXV, FUEL)
cq_oa = f.rev(cq_oa, EXV, FUEL)
state_llp = f.rev(state_llp, EXV, FUEL)
state_superx = f.rev(state_superx, EXV, FUEL)
psc = f.rev(psc, EXV, FUEL)
coop = f.rev(coop, EXV, FUEL)
coop.oa = f.rev(coop.oa, EXV, FUEL)
ifq_fed = f.rev(ifq_fed, EXV, FUEL)
head(ifq_fed)
bind_rows(ifq_fed, state_superx) %>%
mutate(group='1C') -> a1c
bind_rows(ifq_fed, state_ecs) %>%
mutate(group='1D') -> a1d
cq_oa %>%
mutate(group='2A') -> a2a
bind_rows(cq_fed, state_llp) %>%
mutate(group='2B') -> a2b
bind_rows(cq_fed, state_superx) %>%
mutate(group='2C') -> a2c
coop.oa %>%
mutate(group='3A') -> a3a
bind_rows(coop, state_llp) %>%
mutate(group='3B') -> a3b
bind_rows(coop, state_superx) %>%
mutate(group='3C') -> a3c
bind_rows(psc, state_superx) %>%
mutate(group='4C') -> a4c
bind_rows(psc, state_ecs) %>%
mutate(group='4D') -> a4d
grps <- c('SQ', '1C', '1D', '2A', '2B', '2C', '3A', '3B', '3C', '4C', '4D')
bind_rows(asq, a1c, a1d, a2a, a2b, a2c, a3a, a3b, a3c, a4c, a4d) %>%
asq %>%
group_by(sim, d, season, group) %>%
summarise(cv = sd(n_rev) / mean(n_rev), rev = sum(n_rev/1000000)) %>%
mutate(Port = factor(d)) %>%
mutate(group = factor(group, levels = grps)) -> dat
# Rev Figs
ggplot(dat, aes(group, rev, fill = Port)) + geom_boxplot(color='black') +
theme_dark() + theme( panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
line = element_line(colour = "white", size = 0.5, linetype = 1,
lineend = "butt"),
rect = element_rect(fill = "white",
colour = "white", size = 0.5, linetype = 1),
text = element_text(face = "plain", colour = "white", size = 20,
angle = 0, lineheight = 0.9, hjust = 0, vjust = 0),
plot.background = element_rect(colour = 'black', fill = 'gray50'),
strip.background = element_rect(fill = "grey50", colour = "white"),
panel.border = element_rect(fill = NA, colour = "white"),
legend.background = element_blank(),
legend.key = element_blank(),
legend.position=c(.85, .8)) +
scale_fill_brewer(palette = "PuBu") +
geom_rect(aes(xmin = 3.5, xmax = 6.5, ymin = 0, ymax = 11), fill = 'gray50') +
geom_rect(aes(xmin = 7.5, xmax = 9.5, ymin = 0, ymax = 11.5), fill = 'gray50') +
ylab("Revenue") + xlab("Model") + ggtitle("Bounding scenarios")
ggplot(dat, aes(group, rev, fill = Port)) + geom_boxplot(color='black') +
theme_dark() + theme( panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
line = element_line(colour = "white", size = 0.5, linetype = 1,
lineend = "butt"),
rect = element_rect(fill = "white",
colour = "white", size = 0.5, linetype = 1),
text = element_text(face = "plain", colour = "white", size = 20,
angle = 0, lineheight = 0.9, hjust = 0, vjust = 0),
plot.background = element_rect(colour = 'black', fill = 'gray50'),
strip.background = element_rect(fill = "grey50", colour = "white"),
panel.border = element_rect(fill = NA, colour = "white"),
legend.background = element_blank(),
legend.key = element_blank(),
legend.position=c(.85, .8)) +
scale_fill_brewer(palette = "PuBu") +
geom_rect(aes(xmin = 1.5, xmax = 3.5, ymin = 0, ymax = 11.5), fill = 'gray50') +
geom_rect(aes(xmin = 9.5, xmax = 11.5, ymin = 0, ymax = 11), fill = 'gray50') +
ylab("Revenue") + xlab("Model") + ggtitle("Likely scenarios")
ggplot(dat, aes(group, rev, fill = Port)) + geom_boxplot(color='black') +
theme_dark() + theme( panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
line = element_line(colour = "white", size = 0.5, linetype = 1,
lineend = "butt"),
rect = element_rect(fill = "white",
colour = "white", size = 0.5, linetype = 1),
text = element_text(face = "plain", colour = "white", size = 20,
angle = 0, lineheight = 0.9, hjust = 0, vjust = 0),
plot.background = element_rect(colour = 'black', fill = 'gray50'),
strip.background = element_rect(fill = "grey50", colour = "white"),
panel.border = element_rect(fill = NA, colour = "white"),
legend.background = element_blank(),
legend.key = element_blank(),
legend.position=c(.85, .8)) +
scale_fill_brewer(palette = "PuBu") +
ylab("Revenue") + xlab("Model") + ggtitle("All scenarios")
# CV Figs
ggplot(dat, aes(group, cv, fill = Port)) + geom_boxplot(color='black') +
theme_dark() + theme( panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
line = element_line(colour = "white", size = 0.5, linetype = 1,
lineend = "butt"),
rect = element_rect(fill = "white",
colour = "white", size = 0.5, linetype = 1),
text = element_text(face = "plain", colour = "white", size = 20,
angle = 0, lineheight = 0.9, hjust = 0, vjust = 0),
plot.background = element_rect(colour = 'black', fill = 'gray50'),
strip.background = element_rect(fill = "grey50", colour = "white"),
panel.border = element_rect(fill = NA, colour = "white"),
legend.background = element_blank(),
legend.key = element_blank(),
legend.position=c(.85, .8)) +
scale_fill_brewer(palette = "PuBu") +
geom_rect(aes(xmin = 3.5, xmax = 6.5, ymin = 0, ymax = 3), fill = 'gray50') +
geom_rect(aes(xmin = 7.5, xmax = 9.5, ymin = 0, ymax = 3), fill = 'gray50') +
ylab("CV") + xlab("Model") + ggtitle("Bounding scenarios")
ggplot(dat, aes(group, cv, fill = Port)) + geom_boxplot(color='black') +
theme_dark() + theme( panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
line = element_line(colour = "white", size = 0.5, linetype = 1,
lineend = "butt"),
rect = element_rect(fill = "white",
colour = "white", size = 0.5, linetype = 1),
text = element_text(face = "plain", colour = "white", size = 20,
angle = 0, lineheight = 0.9, hjust = 0, vjust = 0),
plot.background = element_rect(colour = 'black', fill = 'gray50'),
strip.background = element_rect(fill = "grey50", colour = "white"),
panel.border = element_rect(fill = NA, colour = "white"),
legend.background = element_blank(),
legend.key = element_blank(),
legend.position=c(.45, .8)) +
scale_fill_brewer(palette = "PuBu") +
geom_rect(aes(xmin = 1.5, xmax = 3.5, ymin = 0, ymax = 3), fill = 'gray50') +
geom_rect(aes(xmin = 9.5, xmax = 11.5, ymin = 0, ymax = 4), fill = 'gray50') +
ylab("CV") + xlab("Model") + ggtitle("Likely scenarios")
ggplot(dat, aes(group, cv, fill = Port)) + geom_boxplot(color='black') +
theme_dark() + theme( panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
line = element_line(colour = "white", size = 0.5, linetype = 1,
lineend = "butt"),
rect = element_rect(fill = "white",
colour = "white", size = 0.5, linetype = 1),
text = element_text(face = "plain", colour = "white", size = 20,
angle = 0, lineheight = 0.9, hjust = 0, vjust = 0),
plot.background = element_rect(colour = 'black', fill = 'gray50'),
strip.background = element_rect(fill = "grey50", colour = "white"),
panel.border = element_rect(fill = NA, colour = "white"),
legend.background = element_blank(),
legend.key = element_blank(),
legend.position=c(.85, .8)) +
scale_fill_brewer(palette = "PuBu") +
ylab("Revenue") + xlab("Model") + ggtitle("All scenarios")
|
7f8c937f00f131f523439903cffe40f436f8dff1 | 210210c8dccad3aad7cfcdeff10f3d814ea3a14f | /install.R | c572ed9f5d998eccabc84f2bcaad15a74ddfee4f | [] | no_license | bas-1994/2D3D | f6596d24a9a7d02f7bfdc9348ca7065ff4d89f71 | d1739ea52c1b5a2612cc590e0acae3222edc9a01 | refs/heads/master | 2023-05-09T02:32:39.336175 | 2021-05-28T10:17:07 | 2021-05-28T10:17:07 | 371,623,939 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 146 | r | install.R | install.packages("readxl")
install.packages("tidyverse")
install.packages("tidyselect")
install.packages("tableone")
install.packages("olsrr") |
c3f91d8a70fcb87261eb81db4f01a9007e02ef7b | 3835d6896cc2193cb9253efb295f5dbf39944274 | /cachematrix.R | 5597a7f768aebfb83e34d7cb899d42d3511df216 | [] | no_license | nchandola/ProgrammingAssignment2 | 233c6350692b1f86290d0cee3c53bfc9f3457e38 | 467dd4d0e18bcdcfc3e660f63cec7876ecd9bfa4 | refs/heads/master | 2021-01-18T02:38:03.224247 | 2015-09-27T18:21:02 | 2015-09-27T18:21:02 | 43,175,882 | 0 | 0 | null | 2015-09-25T21:15:07 | 2015-09-25T21:15:07 | null | UTF-8 | R | false | false | 2,569 | r | cachematrix.R | ## Below are a pair of functions that cache the inverse of a matrix. Matrix
## inversion is a costly computation and there may be benefits to cache the
## inverse of a matrix rather than compute it repeatedly. This benefit may be
## realised if the contents of the matrix are not changing.
## makeCacheMatrix Function: This function creates a specal "matrix" that can
## cache its inverse. For this assignment, assume that matrix supplied is
## always invertible.
makeCacheMatrix <- function(x = matrix()) {
## initialise the inverse matrix variable
s <- NULL
## set the value of the matrix
set <- function(y) {
x <<- y
s <<- NULL
}
## get the value of the matrix
get <- function () x
## set the value of the inverse matrix
setsolve <- function(solve) s <<- solve
## get the value of the inverse matrix
getsolve <- function() s
## lists the defined functions
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve Function: This function calculates the inverse matrix created
## using above function. It checks to see if the inverse matrix has already
## been calculated.if so, it gets the inverse matrix from the cache and skips
## the computation. Otherwise, it calculates the inverse matrix of the data
## and set the value of the inverse matrix in the cache via the setsolve
## function.
cacheSolve <- function(x, ...) {
## get the value of the cached inverse matrix
s <- x$getsolve()
## check whether there was a value returned from cached
## i.e. if value of inverse matrix was cached
if(!is.null(s)){
message("getting cached data")
return (s)
}
## get value of matrix
data <- x$get()
## get value of inverse matrix using solve function from library
s <- solve(data, ...)
## set the value of the inverse matrix in the cache
x$setsolve(s)
## Return a matrix that is the inverse of 'x'
return(s)
}
## TESTING INSTRUCTIONS:
## 1. Create a new invertible matrix, e.g.
## m <- matrix(c(1,-1/4,1/4,1),2,2)
## 2. Apply following line to test code
## m1 < makeCacheMatrix(m)
## 3. Test the cacheSolve function to get desired result
## cacheSolve(m1) |
069285f2143382461a9e2cff8b868a436e4b1ce9 | b6492c0a4318047608a36c8df0e59a8830b7a2fa | /161230LoadMLR.R | 88a7b479a2bb94c5d7bf2c4186b01fd713f96d03 | [] | no_license | scfurl/170110 | 4b66a03e61844c27beaedc1b41572d2de3092dd5 | a6403116a508a6d55df78a76d548747fb13a1e2b | refs/heads/master | 2021-04-29T11:01:34.018879 | 2017-01-03T14:16:29 | 2017-01-03T16:43:38 | 77,856,331 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,697 | r | 161230LoadMLR.R |
# -------------------------------------
# specify paths and load functions
# -------------------------------------
DATA_DIR <- paste(ROOT_DIR, "/Analysis/MLR2/WS", sep="") # SPECIFY HERE
PROG_DIR <- paste(ROOT_DIR, "/Analysis/MLR2/prog", sep="") # SPECIFY HERE
RES_DIR <- paste(ROOT_DIR, "/Analysis/MLR2/res/", key, sep="") # SPECIFY HERE
source(file.path(PROG_DIR,'SFfunc.R'))
dir.create(file.path(RES_DIR), showWarnings=F)
source(file.path(PROG_DIR,'SFplotting.R'))
source(file.path(PROG_DIR,'support_functions.R'))
source(file.path(PROG_DIR,'color.R'))
colorfile<-file.path(PROG_DIR, "CBSafe15.csv")
colors<-readColorFile(colorfile)
colors<-as.character(colors)
#knitr::opts_knit$set(root.dir = ROOT_DIR)
library(colorout)
library(Matrix)
library(monocle)
library(stringr)
library(slam)
library(pheatmap)
library(matrixStats)
#library(plyr)
library(dplyr)
library(reshape2)
library(piano)
library(DDRTree)
library(gridExtra)
library(XLConnect)
library(tsne)
library(Rtsne)
library(e1071)
library(RColorBrewer)
#library(densityClust)
library(devtools)
load_all(file.path(ROOT_DIR, "monocle-dev"))
load_all(file.path(ROOT_DIR, "densityClust"))
#load_all(file.path(ROOT_DIR, "fstree"))
# Set global ggplot2 properties for making print-scaled PDF panels
SFtheme<-theme_bw(base_size=14) +
theme(panel.background = element_rect(fill = "transparent",colour = NA), # or theme_blank()
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
plot.background = element_rect(fill = "transparent",colour = NA))
theme_set(SFtheme)
set.seed(0)
print("done")
mix<-readRDS(file.path(DATA_DIR, "CDS2_Final.RDS"))
genes2<-read.table(file.path(DATA_DIR, "genes2.tsv"))
# Filter super high mRNA cells, which are probably not singletons:
mRNA_thresh <- 10000
removedhigh<-mix[,which(pData(mix)$Total_mRNAs>mRNA_thresh)]
mix <- mix[,pData(mix)$Total_mRNAs < 10000]
# Remove low mRNA cells: (There are none)
keep <- detectGenes(mix, min_expr = 0.1)
which(!rownames(pData(mix)) %in% rownames(pData(keep)))
mix<-keep
rm(keep)
cthSVM <- newCellTypeHierarchy()
cthSVM <- addCellType(cthSVM, "CD3s", classify_func=function(x) {x["CD3D",] > 0})
cthSVM <- addCellType(cthSVM, "CD4s", classify_func=function(x) {x["CD4",] > 0}, parent_cell_type_name = "CD3s")
cthSVM <- addCellType(cthSVM, "CD8s", classify_func=function(x) {x["CD8A",] > 0 | x["CD8B",] > 0 }, parent_cell_type_name = "CD3s")
cthSVM <- addCellType(cthSVM, "Bcells", classify_func=function(x)
{x["MS4A1",] > 0})
cthSVM <- addCellType(cthSVM, "Monos", classify_func=function(x)
{x["CD14",] > 0 })
cthSVM <- addCellType(cthSVM, "NKs", classify_func=function(x)
{x["KLRD1",] > 0 |
x["NCAM1",] > 0})
|
0ff0c79360d1ff1ff2a79ef49a637907a4f9956e | c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab | /man/gtkColorSelectionSetChangePaletteWithScreenHook.Rd | ba10d20a5b3b2bc7a82b5bbe64388a028219d662 | [] | no_license | cran/RGtk2.10 | 3eb71086e637163c34e372c7c742922b079209e3 | 75aacd92d4b2db7d0942a3a6bc62105163b35c5e | refs/heads/master | 2021-01-22T23:26:26.975959 | 2007-05-05T00:00:00 | 2007-05-05T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 879 | rd | gtkColorSelectionSetChangePaletteWithScreenHook.Rd | \alias{gtkColorSelectionSetChangePaletteWithScreenHook}
\name{gtkColorSelectionSetChangePaletteWithScreenHook}
\title{gtkColorSelectionSetChangePaletteWithScreenHook}
\description{Installs a global function to be called whenever the user tries to
modify the palette in a color selection. This function should save
the new palette contents, and update the GtkSettings property
"gtk-color-palette" so all GtkColorSelection widgets will be modified.}
\usage{gtkColorSelectionSetChangePaletteWithScreenHook(func)}
\arguments{\item{\code{func}}{[\code{\link{GtkColorSelectionChangePaletteWithScreenFunc}}] a function to call when the custom palette needs saving.}}
\details{ Since 2.2}
\value{[\code{\link{GtkColorSelectionChangePaletteWithScreenFunc}}] the previous change palette hook (that was replaced).}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
a349a3c3b9e2eb0336ed05bcdc1ab4af957f3400 | f2f1bdd7d8792dfcf378bb4d6e60fc82c11b5d92 | /scripts/week_5_class_code.R | 7c0c7c21cad01e49987990acd74055e1b45dfa5c | [] | no_license | gge-ucd/r-davis-in-class-caparisek | 897adea352b37c4b5ca98d34e09e914d6019b789 | 69f6e4452950613d75c0e6665a4e67d3fa19b71b | refs/heads/master | 2021-06-30T10:02:16.660017 | 2020-10-15T18:32:37 | 2020-10-15T18:32:37 | 167,059,209 | 0 | 1 | null | 2019-02-19T18:38:46 | 2019-01-22T20:04:25 | R | UTF-8 | R | false | false | 9,272 | r | week_5_class_code.R | # Week 3: February 05, 2019 - R-DAVIS Class Code - Week 5
# GUESS WHAT I FOUND:
# https://twitter.com/mathematicalm3l/status/1090720774464421889/video/1
# https://github.com/melissanjohnson/pupR
# Step 1: install package "devtools" -- for helping develop your own package.
# Step 2: run the line: devtools::install_github("melissanjohnson/pupR")
devtools::install_github("melissanjohnson/pupR")
# check console to answer, option 1-7, just pick 7 ("dontupdate")
# note: if console has a "1:" instead of ">" it's waiting for this answer! Click next to "1:" and click "esc", or answer "7"
library(pupR)
pupR()
# Ok, now let's get to class
#############################################################################
# last week: bracketing and subsetting using base-R
# good to know, but in big datasets or complicated things, cumbersome. so use tidyverse.
# Today: " Tidyverse World: dplyr, ggplot, etc."
# https://gge-ucd.github.io/R-DAVIS/lectures.html
# https://gge-ucd.github.io/R-DAVIS/lesson_dplyr_ecology.html
# class livecode: https://www.dl.dropboxusercontent.com/s/knklng647ndc3sb/Week_5_LiveCode.R?dl=0
#############################################################################################
# install.packages("tidyverse") #dont need to install each time, but do need to call it.
library(tidyverse) #scary colored text, checks and some red x's. it's saying watch out these are named the name thing, you might have a problem later, but I'll download it anyway - it says.
library(dplyr)
# download.file(url="https://ndownloader.figshare.com/files/2292169",destfile = "data/portal_data_joined.csv") ## if you don't have it
#read.csv is base R
#read_csv is tidyverse
surveys<- read_csv("data/portal_data_joined.csv")
#if doesn't work: surveys<- readr::read_csv("data/portal_data_joined.csv")
str(surveys) #notice the top says it's a "tibble dataframe" (tbl_df). Columns are characters. Or you can click on it in your environment and view in table format. TBL is a fancy dataframe. A tidyverse construct. prints nicer, fancier looking, not really different. fresh coat of paint
# [ tbl_df -- table dataframe -- tibbledataframe (said really fast)]
############################################################################# dplyr functions
#select is used for selecting columns in a dataframe
select(surveys, plot_id, species_id, weight) #plot_id, species_id, and weight
#filter is used for selection rows
filter(surveys, year == 1995) #prints table with only the years from 1995
surveys2 <- filter(surveys,weight<5) #new df that filters weights that are less than 5
surveys_sml <-select(surveys2, species_id, sex, weight) #creates new df with those 3 columns of the df that has weight <5
#but this can get tedious too! so use Pipe!
############################################################################# #Pipes %>%
#Pipes %>% Shortcut -- PC: cntrl+shift+M // MAC: cmd+shift+M
# %>% aka "then/and then" do this
#mcgritter package is for piping without tidyverse, but you could just load tidyverse each time too
# .,
surveys %>% #tells following commands that what is left of the pipe to go into it
filter(weight<5) %>% #close the pipe
select(species_id, sex, weight)
############################################################################# CHALLENGE START
#Challenge! Using pipes, subset the surveys data to include individuals collected before 1995 and retain only the columns year, sex, and weight.
surveys %>%
filter(year<1995) %>%
select(year,sex,weight) #could you select before you filter? in this case, yes. but sometimes if you switch order you might unselect a column you wanted. just keep track of that
############################################################################# CHALLENGE END
############################################################################# MUTATE
View(surveys) #ok, checked the table's appearance.
#mutate is used to create new columns
surveys_kg <- surveys %>%
mutate(weight_kg = weight/1000) %>% #perform an operation on a column to create new column (kg one)
mutate(weight_kg2 = weight_kg * 2) # another new column / if you called it the same name, it would replace it
#! is a negating operator "is not NA"
#notice we have all these NAs in our data...
surveys %>%
filter(!is.na(weight)) %>% #use is.NA to ask if it is an NA or not. filters the NAs out
mutate(weight_kg = weight/1000) %>%
summary
#use "complete cases" to filter out ALL of the NAs
############################################################################# CHALLENGE START
#Challenge! Create a new data frame from the surveys data that meets the following criteria: contains only the species_id column and a new column called hindfoot_half containing values that are half the hindfoot_length values. In this hindfoot_half column, there are no NAs and all values are less than 30.
#Hint: think about how the commands should be ordered to produce this data frame!
#filter is the largest operation, then mutate, then select
surveymchallenge <- surveys %>%
filter(!is.na(surveymchallenge) %>%
filter(surveymchallenge<30)) %>%
select(species_id, hindfoot_half) %>%
mutate(hindfoot_half = hindfoot_length/2) %>%
summary()
#ughh out of orderrrrrrr
#ANSWER
surveys_hindfoot_half <- surveys %>%
filter(!is.na(hindfoot_length)) %>%
mutate(hindfoot_half = hindfoot_length / 2) %>%
filter(hindfoot_half < 30) %>%
select(species_id, hindfoot_half)
############################################################################# CHALLENGE END
############################################################################# GROUP-BY
#group_by is good for split-apply-combine
surveys %>%
group_by(sex) %>%
summarize(mean_weight = mean(weight, na.rm=TRUE)) %>% View
#remove NAs in the weight column for means and get mean weight for male/female/NA
#look, add view here! notice it's not named anything or saved in the environment
#SO FAR IN TIDYVERSE:
#filter, select, mutate, group_by, summarize
##mutate added new columns to existing dataframe, mutated it
##summarize spits out entirely NEW tbl
surveys %>%
filter(is.na(sex)) %>%
View #way to look at all the NAs in the data frame
surveys %>% #tells us where the NAs are in species
group_by(species) %>%
filter(is.na(sex)) %>%
tally() # NEW
# you can use group_by with multiple columns
surveys %>%
filter(!is.na(weight)) %>% #get's rid of the NaN's
group_by(sex,species_id) %>%
summarize(mean_weight = mean(weight,na.rm=TRUE)) %>%
View #NaN's gone
#why did the NaNs happen? because maybe you said males of this sp divided by N -- but there were no males of that species, only females. so it divided by zero (can't do that) and it gave you something but it's wrong. Maybe that's why it happens.
surveys %>% #now I want to know the min_weight too
filter(!is.na(weight)) %>%
group_by(sex,species_id) %>%
summarize(mean_weight = mean(weight), min_weight=min(weight)) %>%
View
########################################################################################## TALLY FUNCTION
surveys %>%
group_by(sex) %>%
tally( ) %>% View
#tally is for diving into guts of dataframe
# assign to something? and
# tally will give you a tbl
?tally #counting
#tally () same as group_by(something) %>% summarize (new_column=n)
################################################################################### GATHERING AND SPREADING
# dataframe that has mean weight of each species of each plot
#spoiler alert, use gathering and spreading
#SPREADING: takes long format dataframe & spreads it to wide (lot of rows, a few columns)
#Spread
surveys_gw <- surveys %>%
filter(!is.na(weight)) %>%
group_by(genus, plot_id) %>%
summarize(mean_weight = mean(weight))
surveys_spread <- surveys_gw %>%
spread(key=genus,value=mean_weight) #if your computer forgets the package use "tidyr::" before "spread"
# in our lesson, see figure under "reshaping with gather and spread"
# https://gge-ucd.github.io/R-DAVIS/lesson_dplyr_ecology.html#reshaping_with_gather_and_spread
surveys_gw %>%
spread(genus,mean_weight,fill=0) #func knows the first thing is KEY and 2nd is VALUE
#fill=0 filled the NAs with zero
#GATHERING: for WIDE format (many columns, few rows) >TO> long format
#you'll do this a lot less
#find this more useful to get from FIELD DATASHEET to something that's more useable
#takes a few more things
#see fig 2 in the link above
surveys_gather <- surveys_spread %>%
gather(key = genus, value = mean_weight, -plot_id) #use all columns but plot_id to fill genus
View(surveys_gather)
## BTW -- the package was made by HadleyWickam, from NewZealand. When the package first same out, functions were like "summarise" not "summarize" and people made fun that those in UK or USA couldn't use func on words they were used to. You had to write it with S! New version, you can use either, they do the same thing.
|
ab42786fd54cead896870fdd93b7252966a7c897 | 552e7dfdb045f5d775b6c6b36e42bff881ede4d1 | /cachematrix.R | 4bdcfa5baf27797e41b06b6a563442665c2907be | [] | no_license | maulik438/ProgrammingAssignment2 | 4f84eb3cb2fe2d4fc95545acedca81fb4ec27b01 | 8526449a45647eeb2e5a5b928b35748f95ad2456 | refs/heads/master | 2020-09-27T06:31:28.192071 | 2019-12-12T15:42:48 | 2019-12-12T15:42:48 | 226,453,102 | 0 | 0 | null | 2019-12-07T04:03:33 | 2019-12-07T04:03:32 | null | UTF-8 | R | false | false | 932 | r | cachematrix.R | ## Function to store inverse of matrix in cache
## makeCacheMatrix Function to store inverse matrix with getter and setter functions
makeCacheMatrix <- function(x = matrix()) {
# initialize null matrix
invMat = NULL
# setter function
set <- function(y) {
x <<- y
invMat <<- NULL
}
# getter function
get <- function() x
# get inverse function
getInv <-function() invMat
# set inverse function
setInv <- function(inv) invMat <<- inv
list(set = set, get = get,getInv=getInv,setInv=setInv)
}
## Cache function to reuse inverse matrix information if available
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInv()
# If inverse matrix is stored, use from cache
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# Else, get data, compute inverse matrix and store to cache
data <- x$get()
m <- solve(data)
x$setInv(m)
m
}
|
bfe026e00554ae21922b75ea3e1fe81cea6e2780 | c9fe6fb7f69d6ccc0d281b0e9f8730b6789eb214 | /project_code.R | 59a5a2fff0459c6fbe5ee149e5c57a33db4cbc01 | [] | no_license | Akahdeep-balu/San-Francisco-Crime-Analysis | 99cfd5bc28111493cedb5ebb909c02534a6cfe82 | 17bc66691f679efd9d70bcd76cb026da253ad092 | refs/heads/main | 2023-06-19T05:46:10.053833 | 2021-07-07T03:41:04 | 2021-07-07T03:41:04 | 383,664,328 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,572 | r | project_code.R | library(funModeling)
library(tidyverse)
library(Hmisc)
library(geohashTools)
library(randomForest)
library(class)
library(data.table)
library(kknn)
library(rpart)
library(e1071)
#install.packages("e1071")
library(caret)
require(caTools)
sfc.data <-read.csv(file = 'train.csv')
zip.data <- read.csv(file = 'uszipsv1.4-2.csv')
# Get a glimpse of Data to understad
glimpse(sfc.data)
#Get the metrics about data types, zeros, infinite numbers, and missing values
df_status(sfc.data)
# Analysing the data
describe(sfc.data)
#classifying dates to seasons
fall <- c("09","10","11")
summer <- c("06", "07", "08")
spring <- c("03", "04", "05")
winter <- c("12", "01", "02")
sfc.data$season <- apply(sfc.data, 1, FUN = function(x) if(strsplit( x, "-" )[[1]][2] %in% fall) {
"fall"
} else if (strsplit( x, "-" )[[1]][2] %in% summer ) {
"summer"
} else if (strsplit( x, "-" )[[1]][2] %in% spring ) {
"spring"
} else if (strsplit( x, "-" )[[1]][2] %in% winter ) {
"winter"
})
head(sfc.data)
#removing outliers
boxplot(sfc.data$Y)
sfc.data <- sfc.data[!(sfc.data$Y==90),]
boxplot(sfc.data$Y)
#classifying lat,long to geohash
get_geohash <- function(a){
return(gh_encode(a[9],a[8] , precision = 6L))
}
sfc.data$geohash <- apply(sfc.data, 1, get_geohash)
head(sfc.data)
#classifying crimes
Theft <- c("LARCENY/THEFT","VEHICLE THEFT","BURGLARY")
Sexual_offences <- c("SEX OFFENSES FORCIBLE", "SEX OFFENSES NON FORCIBLE", "PORNOGRAPHY/OBSCENE MAT","PROSTITUTION")
Public_order <- c("DRUNKENNESS", "SUSPICIOUS OCC", "BRIBERY","DRIVING UNDER THE INFLUENCE","RECOVERED VEHICLE",
"BAD CHECKS","LOITERING","DISORDERLY CONDUCT","LIQUOR LAWS","TRESPASS","WEAPON LAWS")
Assault <- c("ROBBERY", "KIDNAPPING", "ASSAULT")
Drug_offences <- c("DRUG/NARCOTIC")
Property_crime <- c("TREA", "EMBEZZLEMENT", "STOLEN PROPERTY","VANDALISM","ARSON")
Whitecollor_crime <- c("FRAUD", "FORGERY/COUNTERFEITING", "SECONDARY CODES")
Victimless_crime <- c("GAMBLING", "RUNAWAY" )
Suicide <- c("SUICIDE", "FAMILY OFFENSES", "MISSING PERSON","EXTORTION")
Other <- c("WARRANTS", "OTHER OFFENSES","NON-CRIMINAL" )
sfc.data$Category <- apply(sfc.data, 1, FUN = function(x) if((x)[2] %in% Theft) {
"Theft"
} else if ((x)[2] %in% Sexual_offences ) {
"Sexual offences"
} else if ((x)[2] %in% Public_order ) {
"Public order"
} else if ((x)[2] %in% Assault ) {
"Assault"
} else if((x)[2] %in% Drug_offences ) {
"Drug offences"
} else if ((x)[2] %in% Property_crime ) {
"Property crime"
} else if((x)[2] %in% Whitecollor_crime ) {
"White-collor crime"
} else if((x)[2] %in% Victimless_crime ) {
"Victimless-crime"
} else if ((x)[2] %in% Suicide ) {
"Suicide"
} else if ((x)[2] %in% Other ) {
"Other"
})
head(sfc.data)
sapply(sfc.data, function(x) sum(is.na(x))) # reverifying missing values
# Selecting only the zipcodes for the city San Francisco
zip.data <- zip.data[(zip.data$city=='San Francisco'),]
zip.data$geohash <- apply(zip.data, 1, FUN = function(x) gh_encode(x[2],x[3] , precision = 6L))
zip.data = zip.data[c("zip", "lat", "lng", "geohash", "population")]
# Replacing missing values with the mean
zip.data$population[is.na(zip.data$population)] <- round(mean(zip.data$population, na.rm = TRUE))
sapply(zip.data, function(x) sum(is.na(x))) # checking missing values
# innerjoin two data frames by geohash
innerjoin <- inner_join(sfc.data,zip.data,by = "geohash")
head(innerjoin)
df_status(innerjoin)
sanfc.data = innerjoin[!duplicated(innerjoin[c('Dates', 'X', 'Y', 'Descript')]), ]
sanfc.data$hour <- as.numeric(substr(sanfc.data$Dates,12,13))
#Converting to factor variables
sapply(sanfc.data, class)
sanfc.data = sanfc.data[c("Dates", "Category", "Descript", "DayOfWeek", "PdDistrict","Resolution",
"Address","season","geohash","zip","lat","lng","population","hour")]
sanfc.data <- transform(sanfc.data,
Category=as.factor(Category))
sanfc.data <- transform(sanfc.data,
season=as.factor(season))
sanfc.data <- transform(sanfc.data,
geohash=as.factor(geohash))
sapply(sanfc.data, function(x) sum(is.na(x))) # checking missing values
# To view the final data set
summary(sanfc.data)
#analyzing numerical variables
plot_num(sanfc.data)
#analyzing categorical variables
freq(sanfc.data)
# 80:20 data
head(sanfc.data)
sample = sample.split(sanfc.data,SplitRatio = 0.80)
sanfc.train <- subset(sanfc.data, sample == TRUE)
head(sanfc.train)
sanfc.test <- subset(sanfc.data, sample == FALSE)
head(sanfc.test)
zip.data = zip.data[c("zip", "lat", "lng", "geohash", "population")]
# Scale dependent variables in 'train'.
x_train_scaled = scale(sanfc.train$lat)
y_train_scaled = scale(sanfc.train$lng)
hour_train_scaled = scale(sanfc.train$hour)
# Scale dependent variables in 'test' using mean and standard deviation derived from scaling variables
#in 'train'.
x_test_scaled = (sanfc.test$lat - attr(x_train_scaled, 'scaled:center')) / attr(x_train_scaled, 'scaled:scale')
y_test_scaled = (sanfc.test$lng - attr(y_train_scaled, 'scaled:center')) / attr(y_train_scaled, 'scaled:scale')
hour_test_scaled = (sanfc.test$hour - attr(hour_train_scaled, 'scaled:center')) / attr(hour_train_scaled, 'scaled:scale')
days_num = as.numeric(sanfc.data$DayOfWeek)
print(days_num)
pd_num = as.numeric(sanfc.data$PdDistrict)
print(pd_num)
season_num = as.numeric(sanfc.data$season)
print(season_num)
geohash_num = as.numeric(sanfc.data$geohash)
print(geohash_num)
# Create 'train_model' and 'test_model' which only include variables used in the model.
train_model = data.table(category_predict = sanfc.train$Category,
x_scaled = x_train_scaled,
y_scaled = y_train_scaled,
hour_scaled = hour_train_scaled,
population = sanfc.train$population,
days_num = days_num,
pd_num = pd_num,
season_num = season_num,
geohash_num = geohash_num)
setnames(train_model,
names(train_model),
c('category_predict', 'x_scaled', 'y_scaled', 'hour_scaled', 'population', 'days_num','pd_num', 'season_num','geohash_num'))
test_model = data.table(x_scaled = x_test_scaled,
y_scaled = y_test_scaled,
hour_scaled = hour_test_scaled)
#####
# CREATE MODEL AND PREDICTIONS.
# Set seed to ensure reproducibility.
set.seed(1)
# Define model.
model = category_predict ~ x_scaled + y_scaled + hour_scaled + population + days_num + pd_num + season_num + geohash_num
model = category_predict ~ x_scaled + y_scaled + hour_scaled
# Create model and generate predictions for training set.
# Variable scaling is done in this command.
knn_train = kknn(formula = model,
train = train_model,
test = train_model,
scale = T)
# Create model and generate predictions for test set.
knn_test = kknn(formula = model,
train = train_model,
test = test_model,
scale = T)
train_pred = data.table(knn_train$fitted.values)
test_pred = data.table(knn_test$prob)
# View testing accuracy.
print('Testing Accuracy')
print(table(train_model$category_predict == train_pred$V1))
print(prop.table(table(train_model$category_predict == train_pred$V1)))
# Conduct cross validation.
cv = cv.kknn(model,
data = train_model,
kcv = 2,
scale = T)
# View cross validation accuracy.
cv = data.table(cv[[1]])
print('Cross Validation Accuracy')
print(table(cv$y == cv$yhat))
print(prop.table(table(cv$y == cv$yhat)))
# Random Forest
sanfc.rf <- randomForest( sanfc.test$Category ~ sanfc.test$DayOfWeek + sanfc.test$PdDistrict +
sanfc.test$hour ,data = sanfc.test,ntree = 25)
sanfc.rf
#decision tree
sanfc.dt <- train(Category ~ DayOfWeek + PdDistrict + hour, data = sanfc.train , method = "rpart")
sanfc.dt1 <- predict(sanfc.dt, data = sanfc.train)
table(sanfc.dt1,sanfc.train$Category)
mean(sanfc.dt1 == sanfc.train$Category)
#Cross Validation
sanfc.dtcv <- predict(sanfc.dt, newdata = sanfc.test)
table(sanfc.dtcv,sanfc.test$Category)
mean(sanfc.dtcv == sanfc.test$Category)
|
7bd86ab4f612784d492319075227e700bd1c6d2b | 6464efbccd76256c3fb97fa4e50efb5d480b7c8c | /cran/paws.end.user.computing/man/appstream_create_image_builder.Rd | b9905b3318ac468d67ebf7e57b6b04008e4b8d33 | [
"Apache-2.0"
] | permissive | johnnytommy/paws | 019b410ad8d4218199eb7349eb1844864bd45119 | a371a5f2207b534cf60735e693c809bd33ce3ccf | refs/heads/master | 2020-09-14T23:09:23.848860 | 2020-04-06T21:49:17 | 2020-04-06T21:49:17 | 223,286,996 | 1 | 0 | NOASSERTION | 2019-11-22T00:29:10 | 2019-11-21T23:56:19 | null | UTF-8 | R | false | true | 4,587 | rd | appstream_create_image_builder.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appstream_operations.R
\name{appstream_create_image_builder}
\alias{appstream_create_image_builder}
\title{Creates an image builder}
\usage{
appstream_create_image_builder(Name, ImageName, ImageArn, InstanceType,
Description, DisplayName, VpcConfig, IamRoleArn,
EnableDefaultInternetAccess, DomainJoinInfo, AppstreamAgentVersion,
Tags, AccessEndpoints)
}
\arguments{
\item{Name}{[required] A unique name for the image builder.}
\item{ImageName}{The name of the image used to create the image builder.}
\item{ImageArn}{The ARN of the public, private, or shared image to use.}
\item{InstanceType}{[required] The instance type to use when launching the image builder. The following
instance types are available:
\itemize{
\item stream.standard.medium
\item stream.standard.large
\item stream.compute.large
\item stream.compute.xlarge
\item stream.compute.2xlarge
\item stream.compute.4xlarge
\item stream.compute.8xlarge
\item stream.memory.large
\item stream.memory.xlarge
\item stream.memory.2xlarge
\item stream.memory.4xlarge
\item stream.memory.8xlarge
\item stream.graphics-design.large
\item stream.graphics-design.xlarge
\item stream.graphics-design.2xlarge
\item stream.graphics-design.4xlarge
\item stream.graphics-desktop.2xlarge
\item stream.graphics-pro.4xlarge
\item stream.graphics-pro.8xlarge
\item stream.graphics-pro.16xlarge
}}
\item{Description}{The description to display.}
\item{DisplayName}{The image builder name to display.}
\item{VpcConfig}{The VPC configuration for the image builder. You can specify only one
subnet.}
\item{IamRoleArn}{The Amazon Resource Name (ARN) of the IAM role to apply to the image
builder. To assume a role, the image builder calls the AWS Security
Token Service (STS) \code{AssumeRole} API operation and passes the ARN of the
role to use. The operation creates a new session with temporary
credentials. AppStream 2.0 retrieves the temporary credentials and
creates the \strong{AppStream\\_Machine\\_Role} credential profile on the
instance.
For more information, see \href{https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html}{Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances}
in the \emph{Amazon AppStream 2.0 Administration Guide}.}
\item{EnableDefaultInternetAccess}{Enables or disables default internet access for the image builder.}
\item{DomainJoinInfo}{The name of the directory and organizational unit (OU) to use to join
the image builder to a Microsoft Active Directory domain.}
\item{AppstreamAgentVersion}{The version of the AppStream 2.0 agent to use for this image builder. To
use the latest version of the AppStream 2.0 agent, specify [LATEST].}
\item{Tags}{The tags to associate with the image builder. A tag is a key-value pair,
and the value is optional. For example, Environment=Test. If you do not
specify a value, Environment=.
Generally allowed characters are: letters, numbers, and spaces
representable in UTF-8, and the following special characters:
\\_ . : / = + \\ - @
If you do not specify a value, the value is set to an empty string.
For more information about tags, see \href{https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html}{Tagging Your Resources}
in the \emph{Amazon AppStream 2.0 Administration Guide}.}
\item{AccessEndpoints}{The list of interface VPC endpoint (interface endpoint) objects.
Administrators can connect to the image builder only through the
specified endpoints.}
}
\description{
Creates an image builder. An image builder is a virtual machine that is
used to create an image.
}
\details{
The initial state of the builder is \code{PENDING}. When it is ready, the
state is \code{RUNNING}.
}
\section{Request syntax}{
\preformatted{svc$create_image_builder(
Name = "string",
ImageName = "string",
ImageArn = "string",
InstanceType = "string",
Description = "string",
DisplayName = "string",
VpcConfig = list(
SubnetIds = list(
"string"
),
SecurityGroupIds = list(
"string"
)
),
IamRoleArn = "string",
EnableDefaultInternetAccess = TRUE|FALSE,
DomainJoinInfo = list(
DirectoryName = "string",
OrganizationalUnitDistinguishedName = "string"
),
AppstreamAgentVersion = "string",
Tags = list(
"string"
),
AccessEndpoints = list(
list(
EndpointType = "STREAMING",
VpceId = "string"
)
)
)
}
}
\keyword{internal}
|
cbf2d243744331672c8106828bc2a708750142b5 | 065a6a5b55a67334fb18460519ac171d7631d789 | /R_scripts/DataPrep.R | 0f998bdbe5e7a4a559a5a7cb3e42a5115d9f059e | [] | no_license | Shrubner/restistutorial | c1add3cca3eb9c631651e6f7ae8df6543c5e5b4b | 243a5c8c22027d596c45745cdd529e77c219b5e7 | refs/heads/main | 2023-03-07T11:29:55.306357 | 2021-03-04T23:17:01 | 2021-03-04T23:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,135 | r | DataPrep.R | library(raster)
library(sp)
library(sf)
library(rgeos)
library(rgdal)
library(tidyverse)
hm.us <- raster("/Users/mattwilliamson/Analyses/ConservationResistance/Data/OriginalData/hm_fsum3_270/")
gap.status.WY <- st_read("~/Google Drive/My Drive/Data/Original Data/PAD_US2_1_GDB/PADUS_21_CombFeeDes.shp") %>%
filter(State_Nm=="WY" & GAP_Sts == "1" & GIS_Acres >50000) %>%
st_make_valid()
st_crs(gap.status.WY) <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs"
gap.status.WY <- gap.status.WY %>% st_transform(., st_crs(hm.us))
st_write(gap.status.WY, "Data/wy_gap1.shp", append=FALSE)
hm.crop <- crop(hm.us, gap.status.WY)
writeRaster(hm.crop, "Data/human_mod.tif")
elev <- getData('alt', country = "USA")
elev <- crop(elev[[1]], extent(-120, -102, 38, 50))
elev.p <- projectRaster(elev, crs = "+proj=aea +lat_0=37.5 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs", res= 270)
elev.crop <- crop(elev.p, hm.crop)
elev.p <- projectRaster(elev.crop, hm.crop)
writeRaster(elev.p, "Data/elevation_agg.tif", overwrite = TRUE)
|
8a534565c3ea7384bce53f2162090c8f9e2e8ccf | f61064bb7d0013f111123206b230482514141d9e | /R/sis_loglikelihood_complete.R | 5bc6a7eb6bbd9ee08fbac4879838a158f11e8383 | [] | no_license | nianqiaoju/agents | 6e6cd331d36f0603b9442994e08797effae43fcc | bcdab14b85122a7a0d63838bf38f77666ce882d1 | refs/heads/main | 2023-08-17T05:10:49.800553 | 2021-02-18T23:01:47 | 2021-02-18T23:01:47 | 332,890,396 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,051 | r | sis_loglikelihood_complete.R | #' @title Complete Likelihood for SIS model
#' @description computes log-likelihood p(x, y) when agent states and counts are observed
#' @param y a vector of observations
#' @param agent_state binary matrix of size N by length(y)
#' @param model_config a list containing parametesr and network structure
#' @export
sis_loglikelihood_complete <- function(y, agent_states, model_config){
num_observations <- length(y)
## check if observations and hidden states are compatible
if(num_observations != dim(agent_states)[2]) warning('incorrect length of observations');
if (N != dim(agent_states)[1]) warning('incorrect number of agents');
## observation densities
llik <- sum(dbinom(x = y, size = colSums(agent_states), prob = model_config$rho, log = TRUE ));
## transition densities
## t = 0
llik <- llik + logdbern_sum_cpp(model_config$alpha0, agent_states[,1]);
for (t in 1 : (num_observations - 1)){
a <- sis_get_alpha(agent_states[, t - 1 + 1], model_config);
llik <- llik + logdbern_sum_cpp(a, agent_states[,t + 1]);
}
return(llik)
} |
8580886fcb0d215a2e0f4b82964dcb1e06e7f027 | d92752ea41593a0f8965cc10a94a4fe358df9bd0 | /man/multip.Rd | aa73e86896e07c7392ea9299ef3ce9659ad67b3e | [] | no_license | fhernanb/beergame | 13c20c32dcc1718d810dec9eeea3d020bcad21cf | dd7ee8118b25cbed23ca81d831c35abc89d517e4 | refs/heads/master | 2020-12-21T18:15:54.770598 | 2020-01-30T19:48:45 | 2020-01-30T19:48:45 | 236,519,557 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 464 | rd | multip.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multip.R
\name{multip}
\alias{multip}
\title{Function to multiply two values}
\usage{
multip(x, y)
}
\arguments{
\item{x}{this is the first value we want to multiply.}
\item{y}{this is the second value we want to multiply.}
}
\description{
This function multiplies two values
}
\examples{
# Primer ejemplo
multip(5, 10)
# Segundo ejemplos
a <- 5
b <- 15
res <- multip(x=a, y=b)
res
}
|
fa481a1795005f0927916ca77a7cde0a981976af | 6c1ad9221998bf18a654be33f79b9f5dec8ee0c8 | /R/publish.R | 61397983705e78e5c62eb56d052a3554ac857470 | [
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | jzwart/vizlab | 20624542e80589957c5a20619d23f48fe5d90531 | 43c6502dea6c8a7041d87ea0fb3eea126615410d | refs/heads/master | 2021-07-15T05:01:07.756308 | 2017-10-20T21:40:46 | 2017-10-20T21:40:46 | 106,702,748 | 0 | 0 | null | 2017-10-12T14:20:59 | 2017-10-12T14:20:58 | null | UTF-8 | R | false | false | 14,554 | r | publish.R | #' Peform the publish step to ready the viz for hosting
#'
#' Determine the type and dispatch to that method to produce
#' files to serve up as the final viz
#'
#' @param viz vizlab object or identifier
#' @export
publish <- function(viz) UseMethod("publish")
#' publish a given id
#' @rdname publish
#' @export
publish.character <- function(viz) {
viz <- as.viz(viz)
viz <- as.publisher(viz)
publish(viz)
}
#' publish a list representing a viz
#' @rdname publish
#' @export
publish.list <- function(viz) {
viz <- as.viz(viz)
viz <- as.publisher(viz)
publish(viz)
}
#' publish a page
#' @rdname publish
#' @export
publish.page <- function(viz) {
required <- c("template", "context")
checkRequired(viz, required)
template <- template(viz[['template']])
dependencies <- gatherDependencyList(c(viz[['depends']], template[['depends']]))
# also manually put resources into context
context <- replaceOrAppend(template[['context']], viz[['context']])
context[['info']] <- replaceOrAppend(getBlocks("info", keep.block=F)[[1]], context[['info']])
# flatten dependencies before lookups
dependencies <- c(dependencies, recursive = TRUE)
# replace ids in context with expanded dependencies
context <- buildContext(context, dependencies)
file <- export(viz)
render(template, context, file)
return(relativePath(file))
}
#' publish a section
#'
#' @details Sections are published individually but are returned invisibly
#' as text to be used directly.
#'
#' @importFrom whisker whisker.render
#' @rdname publish
#' @export
publish.section <- function(viz) {
required <- c("template")
checkRequired(viz, required)
template <- template(viz[['template']])
# TODO Watch out for cyclic depends
dependencies <- gatherDependencyList(c(viz[['depends']], template[['depends']]))
context <- replaceOrAppend(template[['context']], viz[['context']])
context[['info']] <- replaceOrAppend(getBlocks("info", keep.block=F)[[1]], context[['info']])
# flatten dependencies before lookups
dependencies <- c(dependencies, recursive = TRUE)
context <- buildContext(context, dependencies)
viz[['output']] <- render(template, context)
if (!is.null(viz[['analytics']])) {
viz <- analytics(viz)
}
if (!is.null(viz[['embed']]) && isTRUE(viz[['embed']])) {
file <- export(viz)
setupFoldersForFile(file)
embedTmpl <- template("embed")
context[['embed']] <- viz[['output']]
context[['resources']] <- lapply(context[['resources']], gsub, pattern = '(href="|src=")(css|js|images)',
replacement = '\\1../\\2')
render(embedTmpl, data = context, file = file)
# viz[['output']] <- wrapEmbed(viz[['output']])
# wrap or add embed links to page
}
return(viz[['output']])
}
#' publish a resource
#'
#' @details This copies static resources to the target directory, and invisibly
#' will return the preferred usage.
#'
#' The job of minification or css precompiling could also be added here, but
#' currently this is not handled.
#'
#' Also, templating the resources that make sense would be useful
#'
#' @rdname publish
#' @export
publish.resource <- function(viz) {
# figure out resource type and hand to resource handler
# going to start out with simple images
destFile <- export(viz)
if (!is.null(destFile)) {
dir.create(dirname(destFile), recursive = TRUE, showWarnings = FALSE)
srcFile <- viz[['location']]
if (!is.null(viz[['packaging']]) && viz[['packaging']] == "vizlab") {
srcFile <- system.file(srcFile, package = "vizlab")
}
file.copy(srcFile, destFile, overwrite = TRUE)
viz[['relpath']] <- relativePath(destFile)
} else {
viz[['relpath']] <- NA
}
return(viz)
}
#' Image publishing
#'
#' @rdname publish
#' @export
publish.img <- function(viz) {
required <- c("alttext", "relpath", "title")
viz <- NextMethod()
checkRequired(viz, required)
html <- NULL
if (!is.na(viz[['relpath']])) {
alt.text <- viz[['alttext']]
relative.path <- viz[['relpath']]
title.text <- viz[['title']]
img.class <- ifelse(is.null(viz[['class']]), "",
paste0(" class=\"",
paste0(viz[['class']], collapse=" "),
"\""))
html <- sprintf('<img src="%s?_c=%s" alt="%s" title="%s"%s />', relative.path, uniqueness(),
alt.text, title.text, img.class)
}
return(html)
}
#' Favicon resource
#'
#' @rdname publish
#' @export
publish.ico <- function(viz) {
required <- c("relpath")
viz <- NextMethod()
checkRequired(viz, required)
html <- NULL
if (!is.na(viz[['relpath']])) {
relative.path <- viz[['relpath']]
html <- sprintf('<link rel="icon" type="image/ico" href="%s?_c=%s"/>',
relative.path, uniqueness())
}
return(html)
}
#' Add a font to the page
#'
#' @rdname publish
#' @importFrom utils URLencode
#' @export
publish.googlefont <- function(viz) {
required <- c("family", "weight")
checkRequired(viz, required)
families <- paste(URLencode(viz[["family"]]), collapse = "|")
weights <- paste(viz[["weight"]], collapse = ",")
googlefont <- "//fonts.googleapis.com/css"
html <- sprintf('<link href="%s?family=%s:%s" rel="stylesheet" type="text/css">',
googlefont, families, weights)
return(html)
}
#' javascript publishing
#' TODO allow for cdn js
#'
#' @rdname publish
#' @export
publish.js <- function(viz) {
required <- c("relpath", "mimetype")
viz <- NextMethod()
checkRequired(viz, required)
output <- NULL
if (!is.na(viz[['relpath']])) {
output <- sprintf('<script src="%s?_c=%s" type="text/javascript"></script>',
viz[['relpath']], uniqueness())
}
return(output)
}
#' css publishing
#'
#' @rdname publish
#' @export
publish.css <- function(viz) {
required <- c("relpath", "mimetype")
viz <- NextMethod()
checkRequired(viz, required)
output <- NULL
if (!is.na(viz[['relpath']])) {
output <- sprintf('<link href="%s?_c=%s" rel="stylesheet" type="text/css" />',
viz[['relpath']], uniqueness())
}
return(output)
}
#' svg publishing, may return NULL
#'
#' from here on out will use svg-inject to get svg to dom
#'
#' also, svg will support landscape or portrait for mobile support
#'
#' @rdname publish
#' @export
publish.svg <- function(viz) {
required <- c("relpath", "title", "alttext")
viz <- NextMethod()
checkRequired(viz, required)
orientation = c()
if (!is.null(viz[['orientation']]) && viz[['orientation']] == "landscape") {
orientation <- "vizlab-landscape"
} else if (!is.null(viz[['orientation']]) && viz[['orientation']] == "portrait"){
orientation <- "vizlab-portrait"
} else { # default or both
orientation <- "vizlab-landscape vizlab-portrait"
}
if (!is.null(viz[['inline']])) {
warning("inline option is deprecated, all SVGs now use svg-inject")
}
output <- NULL
if (!is.na(viz[['relpath']])) {
output <- sprintf('<img class="%s" src="%s" title="%s" alt="%s" />',
orientation, viz[['relpath']], viz[['title']], viz[['alttext']])
}
return(output)
}
#' Footer publishing
#' @importFrom utils download.file
#' @rdname publish
#' @export
publish.footer <- function(viz) {
#should also check blogs? Or one or the other?
checkRequired(viz, required = "vizzies")
template <- template(viz[['template']])
dependencies <- gatherDependencyList(c(viz[['depends']], template[['depends']]))
context <- replaceOrAppend(template[['context']], viz[['context']])
# flatten dependencies before lookups
dependencies <- c(dependencies, recursive = TRUE)
context <- buildContext(context, dependencies)
#add info from viz.yaml to context to inject into template
vizzies <- viz$vizzies
for(v in 1:length(vizzies)){
info <- getVizInfo(repo=vizzies[[v]]$repo, org=vizzies[[v]]$org)
if (is.null(vizzies[[v]]$name)){ # don't replace it if it is already set
vizzies[[v]]$name <- info$context$name
}
if(is.null(vizzies[[v]]$url)){
vizzies[[v]]$url <- info$context$path
}
if(is.null(vizzies[[v]]$thumbLoc)){
vizzies[[v]]$thumbLoc <- info$context$thumbnail
}
}
context[['blogsInFooter']] <- viz$blogsInFooter
context[['blogs']] <- viz$blogs
context[['vizzies']] <- vizzies
viz[['output']] <- render(template, data = context)
if (!is.null(viz[['analytics']])) {
viz <- analytics(viz)
}
return(viz[['output']])
}
#' Footer publishing
#' @importFrom utils download.file
#' @rdname publish
#' @export
publish.social <- function(viz) {
template <- template(viz[['template']])
context <- replaceOrAppend(template[['context']], viz[['context']])
if("depends" %in% names(viz)){
if("social-links" %in% viz[["depends"]]){
links <- readDepends(viz)[["social-links"]]
if(any(c("facebook","facebookLink") %in% names(links))){
names(links)[names(links) == "facebookLink"] <- "facebook"
context[["facebookLink"]] <- links[["facebook"]]
}
if(any(c("twitter","twitterLink") %in% names(links))){
names(links)[names(links) == "twitterLink"] <- "twitter"
context[["twitterLink"]] <- links[["twitter"]]
}
if(any(c("github","githubLink") %in% names(links))){
names(links)[names(links) == "githubLink"] <- "github"
context[["githubLink"]] <- links[["github"]]
}
if(any(c("embed","embedLink") %in% names(links))){
names(links)[names(links) == "embedLink"] <- "embed"
context[["embedLink"]] <- links[["embed"]]
}
viz[['depends']] <- viz[['depends']][viz[['depends']] != "social-links"]
template[["depends"]] <- template[["depends"]][names(template[["depends"]]) != "social-links"]
}
}
dependencies <- gatherDependencyList(c(viz[['depends']], template[['depends']]))
# flatten dependencies before lookups
dependencies <- c(dependencies, recursive = TRUE)
context <- buildContext(context, dependencies)
context[["mainEmbed"]] <- "embedLink" %in% names(context)
viz[['output']] <- render(template, data = context)
if (!is.null(viz[['analytics']])) {
viz <- analytics(viz)
}
return(viz[['output']])
}
#' Header publishing
#' @rdname publish
#' @export
publish.header <- function(viz) {
return(publish.section(viz))
}
#' publish landing page
#'
#' @rdname publish
#' @export
publish.landing <- function(viz){
repos <- getRepoNames(viz[['org']])
viz_info <- lapply(repos, getVizInfo, org=viz[['org']])
names(viz_info) <- repos
# rm null
viz_info <- viz_info[!sapply(viz_info, is.null)]
# sort reverse chronological
viz_info <- viz_info[order(sapply(viz_info, '[[', 'publish-date'), decreasing=TRUE)]
pageviz <- viz
names(pageviz$depends) <- pageviz$depends
pageviz$depends <- as.list(pageviz$depends)
pageviz$depends <- append(pageviz$depends, viz_info)
pageviz$context <- list(sections = c("owiNav", "header", names(viz_info)), #names of section ids
resources = c("lib-vizlab-favicon", "landingCSS", "owiCSS", "jquery", "appJS"),
header = "usgsHeader",
footer = "usgsFooter",
info = list(`analytics-id` = "UA-78530187-11"))
pageviz$publisher <- "page"
pageviz <- as.viz(pageviz)
pageviz <- as.publisher(pageviz) #maybe/maybe not
publish(pageviz)
}
#' publish template
#'
#' @rdname publish
#' @export
publish.template <- function(viz) {
# nothing for now
}
#' check dimensions and size, publish thumbnail
#'
#' @rdname publish
#' @export
publish.thumbnail <- function(viz){
checkRequired(viz, required = c("for", "location"))
#compliance
#dimensions in pixels, file sizes in bytes!
if(tolower(viz[['for']]) == "facebook") {
maxSize <- 8388608
checkHeight <- 820
checkWidth <- 1560
} else if(tolower(viz[['for']]) == "twitter") {
maxSize <- 1048576
checkHeight <- 300
checkWidth <- 560
} else { #landing
maxSize <- 1048576
checkHeight <- 400
checkWidth <- 400
}
dims <- checkThumbCompliance(file = viz[['location']], maxSize = maxSize,
checkHeight = checkHeight, checkWidth = checkWidth)
#send to other publishers if all ok
viz <- NextMethod()
viz[['url']] <- pastePaths(getVizURL(), viz[['relpath']])#need to add slash between?
viz[['width']] <- dims[['width']]
viz[['height']] <- dims[['height']]
}
#' helper to check thumbnail compliance
#' @importFrom imager load.image width height
#' @param file char Name of thumbnail file
#' @param maxSize numeric Max size in bytes
#' @param checkHeight numeric Height in pixels to enforce
#' @param checkWidth numeric Width in pixels to enforce
checkThumbCompliance <- function(file, maxSize, checkHeight, checkWidth) {
fileSize <- file.info(file)
im <- imager::load.image(file)
width <- imager::width(im)
height <- imager::height(im)
if(fileSize > maxSize || width != checkWidth || height != checkHeight) {
stop(paste("Thumbnail", file, "does not meet site requirements"))
}
return(c(width = width, height = height))
}
#' coerce to a publisher
#' @param viz object describing publisher
#' @param ... not used, just for consistency
#' @export
as.publisher <- function(viz, ...) {
# default to a resource
publisher <- ifelse(exists("publisher", viz), viz[['publisher']], "resource")
class(viz) <- c("publisher", class(viz))
if (publisher %in% c("resource", "thumbnail")) {
viz <- as.resource(viz)
} else if (publisher == "template") {
viz <- as.template(viz)
} else {
class(viz) <- c(publisher, class(viz))
}
return(viz)
}
#' coerce to resource
#' @param viz vizlab object
#' @param ... not used, following convention
#' @importFrom utils packageName
#' @export
as.resource <- function(viz, ...) {
required <- c("mimetype", "location")
checkRequired(viz, required)
mimetype <- viz[['mimetype']]
resource <- lookupMimetype(mimetype)
if (!file.exists(viz[['location']])) {
internal <- system.file(viz[['location']], package = packageName())
if (file.exists(internal)) {
viz[['location']] <- internal
}
}
if(length(resource) == 0){
warning(mimetype, " will be treated as data: ", viz[['id']])
resource <- "data"
}
if ("publisher" %in% names(viz) && viz[['publisher']] == "thumbnail") {
class(viz) <- c("thumbnail","resource", class(viz))
} else {
class(viz) <- c(resource, "resource",class(viz))
}
return(viz)
}
|
61b79e24426cce7bc14ea20931f2f6f26bf7389f | 357d90c25253b342454d423c24c5ae8df4b7ec92 | /Block4/B4_SEM_mroz.R | fd2e6edeb940bdfb93229a6c4cacbd6597af63e1 | [] | no_license | formanektomas/4EK608_4EK416 | 3bd6740f7beba7f1bb188f0dec57e0be8b041f1a | 5d6dde192123ab3f040773ad033aed756e54b9ec | refs/heads/master | 2023-04-27T14:48:28.817712 | 2023-04-16T14:44:25 | 2023-04-16T14:44:25 | 149,155,768 | 3 | 7 | null | 2020-12-23T08:17:24 | 2018-09-17T16:33:54 | R | UTF-8 | R | false | false | 5,443 | r | B4_SEM_mroz.R | #### SEMs - specification, identification and estimation ####
#
#
#
#
#
#
### Example 16.3 & 16.5: Based on Wooldridge: Introductory econometrics,
#
# Labor supply of married, working women
#
#
# Data
rm(list=ls())
mroz <- read.csv('mroz.csv')
# We limit our data to working women only
mroz <- mroz[mroz$inlf == 1, ]
#
# Model data:
#
# hours - hours worked, 1975
# wage - hourly wage
# educ - years of schooling
# age - woman's age in years
# kidslt6 - number of kids < 6 years old
# nwifeinc - faminy income with "wage" variable excluded
# exper - actual labor market experience
# expersq - exper^2
#
# Basic data plots
# Scatterplot matrix of the data used in our model
plot(mroz[, c(2,3,4,5,6,7,19,20,22)])
#
#
library(systemfit) # install.packages("systemfit")
#
# Specify a system of equations and instruments:
eqHours <- hours ~ log(wage) + educ + age + kidslt6 + nwifeinc
eqWage <- log(wage) ~ hours + educ + exper + expersq
instr <- ~ educ + age + kidslt6 + nwifeinc + exper + expersq
# If no labels are provided, equations are named automatically
Wage.model <- list(Hours = eqHours, Wages = eqWage)
#
#
# We start by estimating the model using OLS (interdependencies ignored)
fitOls <- systemfit(Wage.model, data = mroz)
summary(fitOls)
round(coef(summary(fitOls)), digits = 4)
#
#
#
#
# Before we try 2SLS estimation of the SEM, we want to make sure that both equations
# are identified:
#
# Step 1:
# Estimate the reduced forms for both dependent variables:
# Reduced form for hours:
red.hours <- lm(hours ~ educ + age + kidslt6 + nwifeinc + exper + expersq, data=mroz)
# Reduced form for log(wage)
red.wage <- lm(log(wage) ~ educ + age + kidslt6 + nwifeinc + exper + expersq, data=mroz)
#
# Step 2: Verify identification of equation 1 (eqHours):
summary(red.wage)
# eqHours is identified if either exper or expersq coefficients are not zero
summary(red.hours)
#
## Assignment 1
## What is the identification condition for equation eqWage?
## Is the equation eqWage identified?
#
#
#
# Next, we estimate the model using 2SLS method
fit2sls <- systemfit(Wage.model, method = "2SLS", inst = instr, data = mroz)
summary(fit2sls)
round(coef(summary(fit2sls)), digits = 4)
#
#
# We can also estimate the model using the 3SLS method
fit3sls <- systemfit(Wage.model, method = "3SLS", inst = instr, data = mroz)
summary(fit3sls)
round(coef(summary(fit3sls)), digits = 4)
#
# The estimated models may be compared using BIC
BIC(fitOls)
BIC(fit2sls)
BIC(fit3sls)
#
#
#
#
# To assess the quality of instruments and endogeneity of regressors, we need to
# use the ivreg command (2SLS method) from the {AER} package:
library('AER') # install.packages('AER')
#
# equation eqHours
eqHours.iv <- ivreg(hours ~ log(wage) + educ + age + kidslt6 + nwifeinc
| educ + age + kidslt6 + nwifeinc + exper + expersq,
data = mroz)
summary(eqHours.iv, vcov = sandwich, diagnostics = T)
#
#
## Assignment 2
## Comment on the results of Weak instruments test,
## Wu-Hausmann test and Sargan test
#
#
## Assignment 3
## By analogy to lines 98 - 101, evaluate the instruments for equation eqWage.
#
#
#
#
#
#
#
#
#
#
### Computer exercise C16.2: Based on Wooldridge: Introductory econometrics,
#
# Labor supply of married, working women
#
#
# (i) We re-estimate the SEM with log(hours) used instead of "hours"
#
# Specify a system of equations and instruments:
eqHours2 <- log(hours) ~ log(wage) + educ + age + kidslt6 + nwifeinc
eqWage2 <- log(wage) ~ log(hours) + educ + exper + expersq
instr2 <- ~ educ + age + kidslt6 + nwifeinc + exper + expersq
# If no labels are provided, equations are named automatically
Wage.model2 <- list(Hours = eqHours2, Wages = eqWage2)
#
fit2s2s.c16.2 <- systemfit(Wage.model2, method = "2SLS", inst = instr2, data = mroz)
#
summary(fit2s2s.c16.2)
round(coef(summary(fit2s2s.c16.2)), digits = 4)
#
#
# (ii) We allow educ to be endogenous because of omitted ability.
# We use motheduc and fatheduc as IVs for educ.
#
eqHours3 <- log(hours) ~ log(wage) + educ + age + kidslt6 + nwifeinc
eqWage3 <- log(wage) ~ log(hours) + educ + exper + expersq
instr3 <- ~ age + kidslt6 + nwifeinc + exper + expersq + motheduc + fatheduc
#
# Note that we go beyond the standard SEM definition and identification paradigm,
# as motheduc and fatheduc are not present in the set of SEM regressors...
#
Wage.model3 <- list(Hours = eqHours3, Wages = eqWage3)
#
fit2s2s.c16.2.ii <- systemfit(Wage.model2, method = "2SLS", inst = instr3, data = mroz)
#
summary(fit2s2s.c16.2.ii)
round(coef(summary(fit2s2s.c16.2.ii)), digits = 4)
#
#
# (iii) We use the ivreg command (2SLS method) from the {AER} package
# to test the IVs-setup introduced in (ii):
#
eqHours.iv3 <- ivreg(log(hours) ~ log(wage) + educ + age + kidslt6 + nwifeinc
| age + kidslt6 + nwifeinc + exper + expersq + motheduc + fatheduc,
data = mroz)
summary(eqHours.iv3, vcov = sandwich, diagnostics = T)
#
eqWages.iv3 <- ivreg(log(wage) ~ log(hours) + educ + exper + expersq
| age + kidslt6 + nwifeinc + exper + expersq + motheduc + fatheduc,
data = mroz)
summary(eqWages.iv3, vcov = sandwich, diagnostics = T)
#
#
#
#
# |
06958f7cbac92dde04d0545963629d024af23616 | 89bfd2ed18de2845834ff4754f3be23bf45352cf | /Fig.2_Correlation_analysis/TMEimmune32.immunePlot.R | b7a2d538fa019ff1347623e420dc0bcc4a5f5c30 | [] | no_license | luyuitng/ICB_projects | 060162beeed03d5aeecf286f53f015483bb6076f | 2966dee54358759f3692a57df0dabb4a842cbc9e | refs/heads/main | 2023-04-22T06:53:22.576300 | 2021-05-08T15:47:34 | 2021-05-08T15:47:34 | 365,549,511 | 1 | 0 | null | null | null | null | GB18030 | R | false | false | 15,646 | r | TMEimmune32.immunePlot.R | #install.packages("corrplot")
library(corrplot) #引用包
setwd("E:\\Lenvatinib\\corroplot") #设置工作目录
#读取免疫结果文件,并对数据进行整理
immune=read.table("Pancancer.txt",sep="\t",header=T,row.names=1,check.names=F)
immune=as.matrix(immune)
#绘制相关性图形
pdf(file="PanCancer.pdf",width=8,height=8)
corrplot(immune,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "upper", #("full", "lower", "upper"
tl.pos = "lt",# If character, it must be one of"lt", "ld", "td", "d" or "n". "lt"(default if type=="full") means left andtop, "ld"(default if type=="lower") means left and diagonal, "td"(default iftype=="upper") means top and diagonal(near), "d" means diagonal, "n" mean don’t add textlabel.
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
bg ="white",#背景颜色
tl.cex=1.1,#标签字体大小
title = "PanCancer", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("#00305d", "white", "#9c1915"))(50))
corrplot(immune,
add = TRUE,
type = "lower",
method = "number",
order = "original",
col = "black",
diag = FALSE, #diag是否显示对角线值
tl.pos = "n", #坐标基因的位置
cl.pos = "n",
number.cex = 0.7)#数字大小
dev.off()
#BRCA.txt
BRCA=read.table("BRCA.txt",sep="\t",header=T,row.names=1,check.names=F)
BRCA=as.matrix(BRCA)
pdf(file="1BRCA.pdf",width=8,height=8)
corrplot(BRCA,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "BRCA", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#BLCA.txt
BLCA=read.table("BLCA.txt",sep="\t",header=T,row.names=1,check.names=F)
BLCA=as.matrix(BLCA)
pdf(file="BLCA.pdf",width=8,height=8)
corrplot(BLCA,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "BLCA", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#ESCA.txt
ESCA=read.table("ESCA.txt",sep="\t",header=T,row.names=1,check.names=F)
ESCA=as.matrix(ESCA)
pdf(file="ESCA.pdf",width=8,height=8)
corrplot(ESCA,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "ESCA", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#GBM.txt
GBM=read.table("GBM.txt",sep="\t",header=T,row.names=1,check.names=F)
GBM=as.matrix(GBM)
pdf(file="GBM.pdf",width=8,height=8)
corrplot(GBM,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "GBM", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#HNSC.txt
HNSC=read.table("HNSC.txt",sep="\t",header=T,row.names=1,check.names=F)
HNSC=as.matrix(HNSC)
pdf(file="HNSC.pdf",width=8,height=8)
corrplot(HNSC,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "HNSC", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
KIRC=read.table("KIRC.txt",sep="\t",header=T,row.names=1,check.names=F)
KIRC=as.matrix(KIRC)
pdf(file="KIRC.pdf",width=8,height=8)
corrplot(KIRC,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "KIRC", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#KIRP.txt
KIRP=read.table("KIRP.txt",sep="\t",header=T,row.names=1,check.names=F)
KIRP=as.matrix(KIRP)
pdf(file="KIRP.pdf",width=8,height=8)
corrplot(KIRP,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "KIRP", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#LIHC.txt
LIHC=read.table("LIHC.txt",sep="\t",header=T,row.names=1,check.names=F)
LIHC=as.matrix(LIHC)
pdf(file="LIHC.pdf",width=8,height=8)
corrplot(LIHC,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "LIHC", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#LUAD.txt
LUAD=read.table("LUAD.txt",sep="\t",header=T,row.names=1,check.names=F)
LUAD=as.matrix(LUAD)
pdf(file="LUAD.pdf",width=8,height=8)
corrplot(LUAD,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "LUAD", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#LUSC.txt
LUSC=read.table("LUSC.txt",sep="\t",header=T,row.names=1,check.names=F)
LUSC=as.matrix(LUSC)
pdf(file="LUSC.pdf",width=8,height=8)
corrplot(LUSC,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "LUSC", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#OV.txt
OV=read.table("OV.txt",sep="\t",header=T,row.names=1,check.names=F)
OV=as.matrix(OV)
pdf(file="OV.pdf",width=8,height=8)
corrplot(OV,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "OV", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#PAAD.txt
PAAD=read.table("PAAD.txt",sep="\t",header=T,row.names=1,check.names=F)
PAAD=as.matrix(PAAD)
pdf(file="PAAD.pdf",width=8,height=8)
corrplot(PAAD,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "PAAD", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#PRAD.txt
PRAD=read.table("PRAD.txt",sep="\t",header=T,row.names=1,check.names=F)
PRAD=as.matrix(PRAD)
pdf(file="PRAD.pdf",width=8,height=8)
corrplot(PRAD,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "PRAD", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#READ.txt
READ=read.table("READ.txt",sep="\t",header=T,row.names=1,check.names=F)
READ=as.matrix(READ)
pdf(file="READ.pdf",width=8,height=8)
corrplot(READ,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "READ", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#SKCM.txt
SKCM=read.table("SKCM.txt",sep="\t",header=T,row.names=1,check.names=F)
SKCM=as.matrix(SKCM)
pdf(file="SKCM.pdf",width=8,height=8)
corrplot(SKCM,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "SKCM", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#STAD.txt
STAD=read.table("STAD.txt",sep="\t",header=T,row.names=1,check.names=F)
STAD=as.matrix(STAD)
pdf(file="STAD.pdf",width=8,height=8)
corrplot(STAD,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "STAD", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#THCA.txt
THCA=read.table("THCA.txt",sep="\t",header=T,row.names=1,check.names=F)
THCA=as.matrix(THCA)
pdf(file="THCA.pdf",width=8,height=8)
corrplot(THCA,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "THCA", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#UCEC.txt
UCEC=read.table("UCEC.txt",sep="\t",header=T,row.names=1,check.names=F)
UCEC=as.matrix(UCEC)
pdf(file="UCEC.pdf",width=8,height=8)
corrplot(UCEC,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "UCEC", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#CHOL.txt
CHOL=read.table("CHOL.txt",sep="\t",header=T,row.names=1,check.names=F)
CHOL=as.matrix(CHOL)
pdf(file="CHOL.pdf",width=8,height=8)
corrplot(CHOL,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "CHOL", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#CESC.txt
CESC=read.table("CESC.txt",sep="\t",header=T,row.names=1,check.names=F)
CESC=as.matrix(CESC)
pdf(file="CESC.pdf",width=8,height=8)
corrplot(CESC,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "CESC", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
#COAD.txt
COAD=read.table("COAD.txt",sep="\t",header=T,row.names=1,check.names=F)
COAD=as.matrix(COAD)
pdf(file="COAD.pdf",width=8,height=8)
corrplot(COAD,
order = "original", #"original","AOE", "FPC", "hclust", "alphabet"
type = "lower", #("full", "lower", "upper"
method="pie",#"circle", "square", "ellipse", "number","shade","color", "pie"
tl.col="black",#标签字体颜色
tl.cex=1.1,#标签字体大小
bg ="white",#背景颜色
number.cex = 0.9,
addCoef.col = "gray",#增加数字
title = "COAD", mar=c(0, 0, 1, 0),
col=colorRampPalette(c("blue", "white", "red"))(50))
dev.off()
|
514d81399f9b8f9ba00344dc1f7b3a5bf3a92628 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Surrogate/examples/AA.MultS.Rd.R | 20f55bee78ee02458de6b913e53a9c0153bce055 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 879 | r | AA.MultS.Rd.R | library(Surrogate)
### Name: AA.MultS
### Title: Compute the multiple-surrogate adjusted association
### Aliases: AA.MultS
### Keywords: Adjusted Association Causal-Inference framework
### Counterfactuals Single-trial setting Sensitivity ICA Multiple
### surrogates
### ** Examples
data(ARMD.MultS)
# Regress T on Z, S1 on Z, ..., Sk on Z
# (to compute the covariance matrix of the residuals)
Res_T <- residuals(lm(Diff52~Treat, data=ARMD.MultS))
Res_S1 <- residuals(lm(Diff4~Treat, data=ARMD.MultS))
Res_S2 <- residuals(lm(Diff12~Treat, data=ARMD.MultS))
Res_S3 <- residuals(lm(Diff24~Treat, data=ARMD.MultS))
Residuals <- cbind(Res_T, Res_S1, Res_S2, Res_S3)
# Make covariance matrix of residuals, Sigma_gamma
Sigma_gamma <- cov(Residuals)
# Conduct analysis
Result <- AA.MultS(Sigma_gamma = Sigma_gamma, N = 188, Alpha = .05)
# Explore results
summary(Result)
|
7a518fa7b7526e8029fcdf8da4b0eec4b06dd1d1 | 42bfe23f85ead532e6b437c247879123c342878d | /man/plot_structure.Rd | 2636fc9a7bb55dc662f0173bcdf5f41840c5ad8a | [] | no_license | ldutoit/snpR | b858e82e6a94bc3e7d52aef2cd1ae3f83dc93563 | a4a896d0a822f1a1d3c177601ba2620825b0eabf | refs/heads/master | 2023-03-16T22:03:28.929189 | 2021-02-11T01:51:48 | 2021-02-11T01:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 8,359 | rd | plot_structure.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.R
\name{plot_structure}
\alias{plot_structure}
\title{Create STRUCTURE-like cluster plots}
\usage{
plot_structure(
x,
facet = NULL,
facet.order = NULL,
k = 2,
method = "snmf",
reps = 1,
iterations = 1000,
I = NULL,
alpha = 10,
qsort = "last",
qsort_K = "last",
clumpp = T,
clumpp_path = "/usr/bin/CLUMPP.exe",
clumpp.opt = "greedy",
ID = NULL,
viridis.option = "viridis",
alt.palette = NULL,
t.sizes = c(12, 12, 12),
...
)
}
\arguments{
\item{x}{snpRdata object, list of Q matrices (sorted by K in the first level
and run in the second), or a character string designating a pattern
matching Q matrix files in the current working directories.}
\item{facet}{character, default NULL. If provided, individuals will not be
noted on the x axis. Instead, the levels of the facet will be noted. Only a
single, simple, sample specific facet may be provided. Individuals must be
sorted by this facet in x. If Q matrices are provided (either directly or
via file path), this should instead be a vector of group identities for
each individual (populations, etc.).}
\item{facet.order}{character, default NULL. Optional order in which the
levels of the provided facet should appear on the plot, left to right.}
\item{k}{numeric, default 2. The maximum of k (number of clusters) for which
to run the clustering/assignment algorithm. The values 2:k will be run.}
\item{method}{character, default "snmf". The clustering/assignment method to
run. Options: \itemize{\item{snmf: } sNMF (sparse Non-Negative Matrix
Factorization). \item{snapclust: } Maximum-likelihood genetic clustering.}
See \code{\link[LEA]{main_sNMF}} or
\code{\link[adegenet]{snapclust.choose.k}} for details, respectively.}
\item{reps}{numeric, default 1. The number of independent clustering
repititions to run.}
\item{iterations}{numeric or Inf, default 1000. For snapclust, the maximum
number of iterations to run.}
\item{I}{numeric or NULL, default NULL. For snmf, how many SNPs should be
used to initialize the search? Initializing with a subset of the total SNPs
can radically speed up computation time for large datasets.}
\item{alpha}{numeric, default 10. For sNMF, determines the regularization
parameter. For small datasets, this can have a large effect, and should
probably be larger than the default. See documentation for
\code{\link[LEA]{main_sNMF}}.}
\item{qsort}{character, numeric, or FALSE, default "last". Determines if
individuals should be sorted (possibly within facet levels) by cluster
assignment proportion. If not FALSE, determines which cluster to use for
sorting (1:k). If "last" or "first" sorts by those clusters.}
\item{qsort_K}{numeric or character, default "last". If qsorting is
performed, determines the reference k value by which individuals are
sorted. If "first" or "last", sorts by k = 2 or k = k, respectively.}
\item{clumpp}{logical, default T. Specifies if CUMPP should be run to
collapse results across multiple reps. If FALSE, will use only the first
rep for plotting.}
\item{clumpp_path}{character, default "/usr/bin/CLUMPP.exe". Path to the
clumpp executable, required if clumpp = T.}
\item{clumpp.opt}{character, default "greedy". Designates the CLUMPP method
to use. Options: \itemize{ \item{fullsearch: } Search all possible
configurations. Slow. \item{greedy: } The standard approach. Slow for large
datasets at high k values. \item{large.k.greedy: } A fast but less accurate
approach. } See CLUMPP documentation for details.}
\item{ID}{character or NULL, default NULL. Designates a column in the sample
metadata containing sample IDs.}
\item{viridis.option}{character, default "viridis". Viridis color scale
option. See \code{\link[ggplot2]{scale_gradient}} for details.}
\item{alt.palette}{charcter or NULL, default NULL. Optional palette of colors
to use instead of the viridis palette.}
\item{t.sizes}{numeric, default c(12, 12, 12). Text sizes, given as
c(strip.title, axis, axis.ticks).}
\item{...}{additional arguments passed to either \code{\link[LEA]{main_sNMF}}
or \code{\link[adegenet]{snapclust.choose.k}}.}
}
\value{
A list containing: \itemize{\item{plot: } A ggplot object.
\item{data: } A nested list of the raw Q matrices, organized by K and then
by run. \item{plot_data: } The raw data used in constructing the ggplot.
\item{K_plot: } A data.frame containing the value suggested for use in K
selection vs K value for the selected method.}
}
\description{
Creates ggplot-based stacked barcharts of assignment probabilities (Q) into
an arbitrary 'k' number of clusters like those produced by the program
STRUCTURE. Runs for each value of k between 2 and the given number.
}
\details{
Individual cluster assignment probabilities can be calculated using several
different methods: \itemize{\item{snmf: } sNMF (sparse Non-Negative Matrix
Factorization). \item{snapclust: } Maximum-likelihood genetic clustering.}
These methods are not re-implemented in R, instead, this function calls the
\code{\link[LEA]{main_sNMF}} and \code{\link[adegenet]{snapclust.choose.k}}
functions instead. Please cite the references noted in those functions if
using this function. For snapclust, the "ward" method is used to initialize
clusters if one rep is requested, otherwise the clusters are started randomly
each rep. Other methods can be used by providing pop.ini as an additional
argument as long as only one rep is requested.
Multiple different runs can be conducted using the 'reps' argument, and the
results can be combined for plotting across all of these reps using the
clumpp option. This option calls the CLUMPP software package in order to
combine proportion population membership across multiple runs via
\code{\link[pophelper]{clumppExport}}. Again, please cite both CLUMPP and
pophelper if using this option.
Since CLUMPP is run independantly for each value of K, cluster identites
often "flip" between K values. For example individuals that are grouped into
cluster 1 and K = 3 may be grouped into cluster 2 at K = 4. To adjust this,
cluster IDs are iteratively adjusted across K values by flipping IDs such
that the euclidian distances between clusters at K and K - 1 are minimized.
This tends to produce consistant cluster IDs across multiple runs of K.
Individuals can be sorted into by membership proportion into different
clusters within populations using the qsort option.
Since the clustering and CLUMPP processes can be time consuming and outside
tools (such as NGSadmix or fastSTRUCTURE) may be prefered, a nested list of Q
matrices, sorted by K and then rep or a character string giving a pattern
matching saved Q matrix files in the current working directory may provided
directly instead of a snpRdata object. Note that the output for this
funciton, if run on a snpRdata object, will return a properly formatted list
of Q files (named 'data') in addition to the plot and plot data. This allows
for the plot to be quickly re-constructed using different sorting parameters
or facets. In these cases, the facet argument should instead be a vector of
group identifications per individuals.
Note that several files will be created in the working directory when using
this function that are not automatically cleaned after use.
}
\examples{
# basic sNMF
plot_structure(stickSNPs, "pop")
}
\references{
Frichot E, Mathieu F, Trouillon T, Bouchard G, Francois O.
(2014). Fast and Efficient Estimation of Individual Ancestry Coefficients.
\emph{Genetics}, 194(4): 973–983.
Frichot, Eric, and Olivier François (2015). LEA: an R package for
landscape and ecological association studies. \emph{Methods in Ecology and
Evolution}, 6(8): 925-929.
Beugin, M. P., Gayet, T., Pontier, D., Devillard, S., & Jombart,
T. (2018). A fast likelihood solution to the genetic clustering problem.
\emph{Methods in ecology and evolution}, 9(4), 1006-1016.
Francis, R. M. (2017). pophelper: an R package and web app to
analyse and visualize population structure. \emph{Molecular ecology
resources}, 17(1), 27-32.
Jakobsson, M., & Rosenberg, N. A. (2007). CLUMPP: a cluster
matching and permutation program for dealing with label switching and
multimodality in analysis of population structure. \emph{Bioinformatics},
23(14), 1801-1806.
}
\author{
William Hemstrom
}
|
12366208e598ef51f621900b103eced051018dc0 | 98b478b16df3b97d7e7d9b02d94c6f93975ed22d | /sectiopn 3 Matrix operation.R | 6654854d4d65f9a4f7446347aa1543afa53af262 | [] | no_license | nyaundid/R-For-Data-Science-With-Real- | 8ee549322eadc556e839aa7d35c46ec6e45b4957 | 2ffd3e011df69ec8c88e2ae17d8aa90399ad75cb | refs/heads/main | 2023-03-30T18:31:53.914565 | 2021-04-12T03:07:11 | 2021-04-12T03:07:11 | 357,039,462 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 212 | r | sectiopn 3 Matrix operation.R |
Games
rownames(Games)
colnames(Games)
Games["LeBronJames", "2012"]
FieldGoals
#round 1 number after decimal average score per game
round (FieldGoals / Games, 1)
round(MinutesPlayed / Games)
|
054d0a1b767b094c65280043ff3ea56e3edbb43c | ad521cc18a8b05b494ccd0d654a1c50c329c40e2 | /man/uaparser.Rd | 125bc54162485a03d9520381bd7e93c856f704f6 | [
"MIT"
] | permissive | artemklevtsov/uap-r | e79af62da84cd61bbb6d3814e1cd39ba0b6d15ea | b90070e9b68f2820f4e2659d2ae1d842058f78f4 | refs/heads/master | 2020-11-27T08:47:37.183200 | 2017-07-07T02:05:09 | 2017-07-07T02:05:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 290 | rd | uaparser.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/uaparser.R
\docType{package}
\name{uaparser}
\alias{uaparser}
\alias{uaparser-package}
\title{Parse user agents in R}
\description{
this package provides a standardised user agent parser for use in R.
}
|
53d8151056bd070bfd8067f035851db3c21633ab | f69cdfa49008779593b0da4c4b4252f165a73d46 | /R/helpfunctions.R | 58a4ae23f16d135672767fb2e1a685d00051c039 | [] | no_license | MaartenCruyff/mse | dce4e054a27ee0e9c31e95f3bfe19d3444936f8f | 7b3e06ca87767c81c61ff74ae46c9c8d62dfa62c | refs/heads/master | 2021-12-14T21:04:05.253405 | 2021-11-30T13:53:21 | 2021-11-30T13:53:21 | 186,817,076 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,304 | r | helpfunctions.R |
bootx <- function(b0, f, N, f0, d, obs){
d$Freq <- rmultinom(n=1, size=N, prob=f0/N)
mx <- glm(f, poisson, data=subset(d, obs==1), start=b0)
freqs <- predict(mx, newdata = d, type="response")
return(freqs)
}
ci_95 <- function(x){
v <- quantile(x, probs = c(0.025, 0.975))
names(v) <- c("min95", "max95")
v
}
gen_plot <- function(temp, ttl , gr = NULL){
temp <- as.data.frame(temp)
limx <- c(min(temp[, 1]), max(temp[, 1]))
labx <- unique(temp[, 1])
if (is.null(gr)){
p <- ggplot2::ggplot(temp, aes(x = .data$Year, y = .data$Nhat))+
geom_line() +
ylab("Nhat") +
geom_ribbon(aes(x = .data$Year, ymin = .data$min95, ymax = .data$max95),
linetype = 0, alpha = .2) +
ggtitle(ttl) +
ylim(0, NA) +
scale_x_continuous(name = "year", breaks = labx, labels = labx) +
theme_light() +
theme(legend.title = element_blank())
}
if (!is.null(gr)){
p <- ggplot2::ggplot(temp, aes(x = .data$Year, y = .data$Nhat, group = gr, col = gr))+
geom_line() +
ylab("Nhat") +
geom_ribbon(aes(x = .data$Year, ymin = .data$min95, ymax = .data$max95, fill = gr),
linetype = 0, alpha = .2) +
ggtitle(ttl) +
ylim(0, NA) +
scale_x_continuous(name = "year", breaks = labx, labels = labx) +
theme_light() +
theme(legend.title=element_blank())
}
p
}
generate_tables <- function(d, lists, m, year, boot_fitted){
est <- list()
est[[1]] <- round(cbind(nobs = sum(d$Freq),
Nhat = sum(m),
t(ci_95(colSums(boot_fitted)))))
names(est)[[1]] <- "x"
rownames(est[[1]]) <- "all"
if (!is.null(year)){
plots <- list()
yearnr <- which(colnames(d) == year)
temp <- matrix(0, length(levels(d[, yearnr])), 5,
dimnames = list(levels(d[, yearnr]),
c("Year", "nobs", "Nhat", "min95", "max95")))
for (j in 1:length(levels(d[, yearnr]))){
nrs <- which(d[, yearnr] == levels(d[, yearnr])[j])
temp[j, ] <- round(cbind(Year = as.numeric(levels(d[, yearnr])[j]),
nobs = sum(d$Freq[nrs]),
Nhat = sum(m[nrs]),
t(ci_95(colSums(boot_fitted[nrs, ])))))
}
est[[length(est) + 1]] <- temp
plots[[1]] <- gen_plot(temp = temp, ttl = names(est)[length(est)])
names(est)[[length(est)]] <- names(plots)[[1]] <- colnames(d)[yearnr]
}
if (ncol(d) > length(lists) + 1 & is.null(year)){ #no year and only one covariate
covs <- d[, -lists]
lev <- lapply(covs[, -ncol(covs), drop = F], levels)
for (i in 1:length(lev)){
temp <- matrix(0, length(lev[[i]]), 4,
dimnames = list(lev[[i]],
c("nobs", "Nhat", "min95", "max95")))
for (j in 1:length(lev[[i]])){
nrs <- which(covs[, i] == lev[[i]][j])
temp[j, ] <- round(cbind(nobs = sum(covs$Freq[nrs]),
Nhat = sum(m[nrs]),
t(ci_95(colSums(boot_fitted[nrs, ])))))
}
est[[length(est) + 1]] <- temp
names(est)[[length(est)]] <- names(lev)[i]
}
}
if (ncol(d) > length(lists) + 2 & is.null(year)){ #no year and at least two covariates
for (i in 1:(length(lev) - 1)){
for (j in (i+1):length(lev)){
z <- xtabs(boot_fitted ~ ., covs[, c(i, j)])
temp <- NULL
tempnames <- NULL
for (k in 1:length(lev[[i]])){
for (l in 1:length(lev[[j]])){
nrs <- which(covs[, i] == lev[[i]][k] & covs[, j] == lev[[j]][l])
vtemp <- round(cbind(nobs = sum(covs$Freq[nrs]),
Nhat = sum(m[nrs]),
t(ci_95(colSums(boot_fitted[nrs, ])))))
temp <- rbind(temp, vtemp)
tempnames <- c(tempnames, paste(lev[[i]][k], lev[[j]][l], sep = ":"))
}
}
rownames(temp) <- tempnames
est[[length(est) + 1]] <- temp
names(est)[[length(est)]] <- paste(names(lev)[i], names(lev)[j], sep = "x")
}
}
}
if (ncol(d) > length(lists) + 2 & !is.null(year)){ #year and at leat one covariate
covs <- d[, -lists]
lev <- lapply(covs[, -ncol(covs), drop = F], levels)
yearnr <- which(colnames(covs) == year)
covnr <- (1:ncol(covs))[-c(yearnr, ncol(covs))]
temp <- matrix(0, length(lev[[yearnr]]), 5,
dimnames = list(lev[[yearnr]],
c("Year", "nobs", "Nhat", "min95", "max95")))
for (i in covnr){
temp <- NULL
tempnames <- NULL
for (j in 1:length(lev[[i]])){
for (k in 1:length(lev[[yearnr]])){
nrs <- which(covs[, i] == lev[[i]][j] & covs[, yearnr] == lev[[yearnr]][k])
vtemp <- round(cbind(Year = as.numeric(lev[[yearnr]][k]),
nobs = sum(covs$Freq[nrs]),
Nhat = sum(m[nrs]),
t(ci_95(colSums(boot_fitted[nrs, ])))))
temp <- rbind(temp, vtemp)
tempnames <- c(tempnames, paste(lev[[i]][j]))
}
}
rownames(temp) <- tempnames
est[[length(est) + 1]] <- temp
names(est)[[length(est)]] <- names(lev)[i]
plots[[length(plots) + 1]] <- gen_plot(temp = temp, ttl = names(est)[length(est)], gr = as.factor(rownames(temp)))
names(plots)[[length(plots)]] <- names(lev)[i]
}
}
if (ncol(d) > length(lists) + 3 & !is.null(year)){ #year and at leat two covariates
yearnr <- which(colnames(covs) == year)
covnr <- (1:ncol(covs))[-c(yearnr, ncol(covs))]
for (i in covnr[-length(covnr)]){
for (j in (i + 1):length(covnr)){
temp <- NULL
tempnames <- NULL
for (k in 1:length(lev[[i]])){
for (l in 1:length(lev[[j]])){
for (y in 1:length(lev[[yearnr]])){
nrs <- which(covs[, i] == lev[[i]][k] & covs[, j] == lev[[j]][l] & covs[, yearnr] == lev[[yearnr]][y])
vtemp <- round(cbind(Year = as.numeric(lev[[yearnr]][y]),
nobs = sum(covs$Freq[nrs]),
Nhat = sum(m[nrs]),
t(ci_95(colSums(boot_fitted[nrs, ])))))
temp <- rbind(temp, vtemp)
tempnames <- c(tempnames, paste(lev[[i]][k], lev[[j]][l], sep = ":"))
}
}
}
rownames(temp) <- tempnames
est[[length(est) + 1]] <- temp
names(est)[[length(est)]] <- paste(names(lev)[i], names(lev)[j], sep = "x")
plots[[length(plots) + 1]] <- gen_plot(temp = temp, ttl = names(est)[length(est)], gr = as.factor(rownames(temp)))
names(plots)[[length(plots)]] <- paste(names(lev)[i], names(lev)[j], sep = "x")
}
}
}
if (is.null(year)){
return(est)
}else{
return(list(tables = est,
plots = plots))
}
}
|
9194c31a99825c00b079bde69fcd9fd6a3bb12f3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/opera/tests/test-oracle.R | 7a4aa9ce35895351dac60f7b8398667e41b52140 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,769 | r | test-oracle.R | # Unit tests of opera package using testhat package
context("Testing oracle function")
# load some basic data to perform tests
n <- 50
X <- cbind(rep(0, n), rep(1, n))
Y <- rep(0.4, n)
X[n, ] <- c(1, 1)
Y[n] <- 1
awake <- cbind(rep(c(0, 1), n/2), 1)
# Test of loss functions
test_that("loss functions return correct values", {
expect_that(dim(loss(X, Y, loss.type = "square"))[1], equals(n))
})
# Test of oracle functions
test_that("Best expert oracle is ok", {
m <- oracle(Y = Y, experts = X, model = "expert")
expect_that(m$coefficients[1], equals(1))
expect_that(m$loss, equals(mean((X[, 1] - Y)^2)))
expect_that(sum(m$prediction), equals(sum(X[, 1])))
expect_that(m$rmse, equals(sqrt(mean((X[, 1]- Y)^2))))
expect_error(oracle(Y = Y, experts = X, model = "expert", awake = awake), "Sleeping or missing values not allowed")
expect_warning(oracle(Y = Y, experts = X, model = "expert", lambda = 3), "Unused lambda parameter")
expect_warning(oracle(Y = Y, experts = X, model = "expert", niter = 3), "Unused niter parameter")
})
test_that("Best convex oracle is ok", {
m <- oracle(Y = Y, experts = X, model = "convex")
expect_equal(m$coefficients[1], 0.6)
expect_equal(sum(m$coefficients), 1)
expect_equal(m$loss, 0)
expect_true(sum(abs(m$prediction - Y)) < 1e-10)
expect_equal(m$rmse, 0)
expect_warning(m <- oracle(Y = Y, experts = X, model = "convex", loss.type = "percentage"))
expect_true(abs(m$coefficients[1] - 0.6) < 1e-04)
expect_true(m$loss < 1e-04)
expect_true(sum(abs(m$prediction - Y)) < 1e-04)
expect_warning(m <- oracle(Y = Y, experts = X, model = "convex", loss.type = "absolute", awake = awake))
expect_true(abs(m$coefficients[1] - 0.6) < 0.1)
l <- getAnywhere(lossConv)$objs[[1]]
expect_equal(mean(loss(m$prediction, Y, "absolute")), l(m$coefficients, Y, X,
awake, "absolute"))
expect_equal(m$loss, mean(loss(m$prediction, Y, "absolute")))
})
test_that("Best linear oracle is ok", {
m <- oracle(Y = Y, experts = X, model = "linear")
expect_equal(m$coefficients[1], 0.6)
expect_equal(sum(m$coefficients), 1)
expect_equal(m$loss, 0)
expect_true(sum(abs(m$prediction - Y)) < 1e-10)
expect_equal(m$rmse, 0)
expect_error(oracle(Y = Y, experts = X, model = "linear", awake = awake), "Sleeping or missing values not allowed")
expect_warning(m <- oracle(Y = Y, experts = X, model = "linear", loss.type = "percentage"))
expect_equal(m$loss, mean(loss(m$prediction, Y, loss.type = "percentage")))
})
test_that("Quantile oracles are ok", {
set.seed(1)
# test of quantile oracles
quantiles <- seq(0.1, 0.9, by = 0.1)
K <- length(quantiles)
Y <- rnorm(n, mean = 0, sd = 1)
X <- t(matrix(rep(quantile(Y, probs = quantiles), n), nrow = K))
i <- sample(1:K, 1)
l <- list(name = "pinball", tau = quantiles[i])
# best expert oracle
m.best_expert <- oracle(Y = Y, experts = X, model = "expert", loss.type = l)
expect_equal(which(m.best_expert$coefficients == 1), i)
expect_equal(m.best_expert$loss, mean(loss(m.best_expert$prediction, Y, loss.type = l)))
# best convex oracle
expect_warning(m <- oracle(Y = Y, experts = X[, c(1, K)], model = "convex", loss.type = l))
expect_lt(abs(sum(X[1, c(1, K)] * m$coefficients) - X[1, i]), 0.1)
expect_equal(m$loss, mean(loss(m$prediction, Y, loss.type = l)))
expect_warning(oracle(Y = Y, experts = X[, c(1, K)], model = "convex", loss.type = l))
# best linear oracle (with singular matrix)
expect_warning(m <- oracle(Y = Y, experts = X[, c(1, K)], model = "linear", loss.type = l, niter = 10))
expect_lt(abs(sum(X[1, c(1, K)] * m$coefficients) - X[1, i]), 0.1)
expect_equal(m$loss, mean(loss(m$prediction, Y, loss.type = l)))
expect_warning(oracle(Y = Y, experts = X[, c(1, K)], model = "linear", loss.type = l))
# best linear oracle (with direct computation using rq)
X[n, ] <- 1
Y[n] <- 1
m <- oracle(Y = Y, experts = X[, c(1, K)], model = "linear", loss.type = l)
expect_lt(abs(sum(X[1, c(1, K)] * m$coefficients) - X[1, i]), 0.1)
expect_equal(m$loss, mean(loss(m$prediction, Y, loss.type = l)))
})
test_that("Best shifting oracle is ok", {
m <- oracle(Y = Y, experts = X, model = "shifting", loss.type = "square")
expect_equal(m$loss[1], min(mean(loss(X[, 1], Y)), mean(loss(X[, 2], Y))))
expect_equal(class(m), "oracle")
expect_equal(class(summary(m)), "summary.oracle")
})
# test multi-dimensional data
test_that("Dimension d>1 is ok",{
# load some basic data to perform tests
n <- 10
d <- 3
for (model in c("expert", "convex", "linear")) {
l <- sample(c("square", "pinball", "percentage", "absolute"), 1)
# Une petite fonction pour creer les prévisions de la base canonique
base_predictions = function(d,n) {
decimals <- c(0:(2^d-1))
m <- cbind(diag(d),-diag(d))
return(t(matrix(rep(t(m),n),nrow = 2*d)))
}
X <- base_predictions(d,n) # X is the canonical basis
theta.star <- sign(rnorm(d)) * runif(d) # point to be predicted
theta.star <- runif(1) * theta.star / sum(abs(theta.star)) # the target point is in the L1 unit ball
if (l == "percentage") {
X <- abs(X)
theta.star <- abs(theta.star)
}
Y <- rep(theta.star, n)
m <- oracle(Y = Y,experts = X, model = model, loss.type = l)
m$d <- d
m$prediction <- seriesToBlock(m$prediction,d)
m$Y <- seriesToBlock(m$Y,d)
m$residuals <- seriesToBlock(m$residuals,d)
m$experts <- seriesToBlock(m$experts,d)
summary(m)
plot(m)
X <- seriesToBlock(X, d = d)
Y <- seriesToBlock(Y, d = d)
m1 <- oracle(Y = Y, experts= X, model = model, loss.type = l)
expect_equal(m$experts,m1$experts)
expect_true(mean(abs(m$prediction - m1$prediction)) < mean(abs(Y))/10)
}
})
|
b8ab5f60355a76ecae4f13500e4b190dbca3519e | da3f26feeca261602f9ee43e0d16a8a43c684ba3 | /metric_classificators/parze_window.R | 2cdc1c554216845c9a57a22fa67b6d0631a58f21 | [] | no_license | Goncharoff/SMPR | 706bff4f7285cc24c9c17cab6727115eb90ebcbe | 6c21529c155180583fdabbf31e63d3d040a0489b | refs/heads/master | 2021-09-05T00:03:10.136082 | 2018-01-22T23:12:23 | 2018-01-22T23:12:23 | 112,397,943 | 2 | 9 | null | null | null | null | UTF-8 | R | false | false | 5,649 | r | parze_window.R | v <- iris[, 3:4]
p <- c(3, 1)
avg <- function(x)
{
sum(x) / length(x)
}
colors <-
c("setosa" = "red",
"versicolor" = "green",
"virginica" = "blue")
ax <- avg(iris[iris$Species == "setosa", 3])
ay <- avg(iris[iris$Species == "setosa", 4])
bx <- avg(iris[iris$Species == "versicolor", 3])
by <- avg(iris[iris$Species == "versicolor", 4])
cx <- avg(iris[iris$Species == "virginica", 3])
cy <- avg(iris[iris$Species == "virginica", 4])
plot(iris[, 3:4],
pch = 21,
bg = colors[iris$Species],
col = colors[iris$Species])
points(ax, ay, pch = 20, col = "black")
points(bx, by, pch = 20, col = "black")
points(cx, cy, pch = 20, col = "black")
points(p, pch = 20, col = "yellow", lwd = 9)
dist <- function(u, v)
{
sqrt(sum((u - v) ^ 2))
}
a <- dist(c(ax, ay), p)
b <- dist(c(bx, by), p)
c <- dist(c(cx, cy), p)
min(c(a, b, c))
euclideanDistance <- function(u, v) {
sqrt(sum((u - v) ^ 2))
}
sortObjectsByDist <- function(xl, z, metricFunction = euclideanDistance)
{
l <- dim(xl)[1]
n <- dim(xl)[2] - 1
distances <- matrix(NA, l, 2)
for (i in 1:l) {
distances[i,] <- c(i, metricFunction(xl[i, 1:n], z))
}
orderedXl <- xl[order(distances[, 2]),]
return (orderedXl<-cbind( orderedXl, evcld = sort(distances[,2],decreasing =FALSE)))
}
kNN <- function(xl, z, k) {
orderedXl <- sortObjectsByDist(xl, z, euclideanDistance)
n <- dim(orderedXl)[2] - 1
classes <-orderedXl[1:k, n + 1]
counts <- table(classes)
class <- names(which.max(counts))
return (class)
}
plot(
iris[, 3:4],
pch = 21,
bg = colors[iris$Species],
col = colors[iris$Species],
asp = 1
)
class <-kNN(xl, z, k = 6)
points(z[1], z[2], pch = 22, bg = colors[class], col = colors[class], asp = 1, lwd = 5)
########################## OKNA #####################################
#прямоугольное окно
u_func <- function(rast, h) {
if(abs(rast/h) <= 1){
return (0.5)
} else {
return(0)
}
}
#ядро епачникова
func_epanechnikov <-function(rast, h){
if(abs(rast/h) <= 1){
return(3/4 * (1 - (rast/h)^2))
} else {
return(0)
}
}
#квадратное ядро
func_kvadrat <-function(rast, h){
if(abs(rast/h) <= 1){
return(15/16 * (1 - (rast/h)^2)^2)
} else {
return(0)
}
}
#ядро треугольлника
func_treyg <-function(rast, h){
if(abs(rast/h) <= 1){
return(1-abs(rast/h))
} else {
return(0)
}
}
#ядро гауссовское
funk_gaus <- function(rast, h){
if(abs(rast/h) <= 1){
return ( (2*pi)^(-1/2) * exp(-1/2 * (rast/h)^2 ) )
} else {
return(0)
}
}
#LOO classifaer
LOO <- function(classificator, fanc){
vec <- c(seq(1, 45))
tmp <- 1
for (h in seq(0.5,5,by=0.1)) {
cnt <- 0
for (i in 1:150) {
x_el <- c(iris[i, 3], iris[i, 4])
x_sample <- iris[-i, 3:5]
class <- classificator(x_sample, x_el, h, fanc)
#print(class)
# print(iris[i, 5])
#print(x_el)
#print(x_sample)
if (iris[i, 5] != class) {
cnt <- cnt + 1
}
}
#print(cnt)
vec[tmp] <- cnt / 150
print(tmp)
tmp = tmp + 1
}
return (vec)
}
#ordinary parzen classificator
parzen_window <- function(xl, z, h, fanc) {
orderedXl <- sortObjectsByDist(xl, z, euclideanDistance)
n <- dim(orderedXl)[2]-1
classes <-orderedXl[1:150, n]
m = c("setosa" = 0, "versicolor" = 0, "virginica" = 0)
for (i in seq(1:149)){
#print(m)
m[[classes[i]]] <- m[[classes[i]]] + fanc(orderedXl[i,4], h)
}
class <- names(which.max(m))
return (class)
}
#LOO results for ordinary parzen window
LOO_pramoygolnik = LOO(parzen_window, u_func)
LOO_epachnikov = LOO(parzen_window, func_epanechnikov)
LOO_kavdrat = LOO(parzen_window, func_kvadrat)
LOO_gaus = LOO(parzen_window, funk_gaus)
LOO_treug = LOO(parzen_window, func_treyg)
#part of drawing plots for ordinary parzen window classficators
h_vect = seq(0.5,5,by=0.1)
xl <- c(seq(0.5, 5, 0.1))
tochka_epachnikova = which( LOO_epachnikov == min(LOO_pramoygolnik) )
tochka_kvarticheskoe = which( LOO_kavdrat == min(LOO_kavdrat) )
tochka_treygolnoe = which( LOO_treug == min(LOO_treug) )
tochka_gauss = which( LOO_gaus == min(LOO_gaus) )
tochka_prjamoygolnoe = which(LOO_pramoygolnik == min(LOO_pramoygolnik) )
par(mfrow=c(3,2))
plot(h_vect,LOO_pramoygolnik, type = "l", xaxt="n", xlab = "h value", ylab = "Error value", main = "????????????? ????")
axis(1, at = seq(0.5, 5, by = 0.1), las=1)
points(h_vect[tochka_prjamoygolnoe], LOO_pramoygolnik[tochka_prjamoygolnoe], col="red", pch = 19)
plot(h_vect,LOO_epachnikov, type = "l", xaxt="n", xlab = "h value", ylab = "Error value", main = "???? ????????????")
axis(1, at = seq(0.5, 5, by = 0.1), las=1)
points(h_vect[tochka_epachnikova], LOO_epachnikov[tochka_epachnikova], col="red", pch = 19)
plot(h_vect,LOO_kavdrat, type = "l", xaxt="n", xlab = "h value", ylab = "Error value", main = "???? ???????????")
axis(1, at = seq(0.5, 5, by = 0.1), las=1)
points(h_vect[tochka_kvarticheskoe], LOO_kavdrat[tochka_kvarticheskoe], col="red", pch = 19)
plot(h_vect,LOO_gaus, type = "l", xaxt="n", xlab = "h value", ylab = "Error value", main = "???? ??????")
axis(1, at = seq(0.5, 5, by = 0.1), las=1)
points(h_vect[tochka_gauss], LOO_gaus[tochka_gauss], col="red", pch = 19)
plot(h_vect,LOO_treug, type = "l", xaxt="n", xlab = "h value", ylab = "Error value", main = "???? ???????????")
axis(1, at = seq(0.5, 5, by = 0.1), las=1)
points(h_vect[tochka_treygolnoe], LOO_treug[tochka_treygolnoe], col="red", pch = 19)
|
371c171ba4cf6322454b713fb479d635ccea7e25 | 90ce5381487899129e90911cb01a3e83dd9aad87 | /R/InOutBags.R | 31cc0abf36e02b7bbc687b7c77137f1a088350e1 | [] | no_license | cran/rfVarImpOOB | 82274d6af3743d660b6178209e0136f3eb9ba15b | 60b5b1366bc22750cf609eb6c60f78ecb2b5172a | refs/heads/master | 2022-07-08T15:10:24.184676 | 2022-07-01T13:40:02 | 2022-07-01T13:40:02 | 236,881,840 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,419 | r | InOutBags.R | InOutBags <- structure(function#separates data into inbag and outbag
### convenience function to mitigate risk of improperly disentangling train/test
### NOTE: the original row names (too dangerous for repeated rows) are not kept but instead recorded in a separate column
(
RF, ##<< object returned by call to randomForest() or ranger()
data, ##<< data which was used to train the RF. NOTE: assumes setting of inbag=TRUE while training
k, ##<< tree number
inclRowNames = TRUE, ##<< create extra column of original row names
NullRowNames=TRUE, ##<< if TRUE set row names to NULL
verbose = 0 ##<< level of verbosity
){
n=nrow(data)
if ("randomForest" %in% class(RF)){
inRows = rep(rownames(RF$inbag),time=RF$inbag[,k])
outRows = names((RF$inbag[RF$inbag[,k]==0,k]))
} else if ("ranger" %in% class(RF)) {
inRows = rep(rownames(data),time=RF$inbag.counts[[k]])
outRows = rownames(data)[RF$inbag.counts[[k]]==0]
}
inbag = data[inRows,]
inbag$origRows=inRows
outbag = data[outRows,]
outbag$origRows=outRows
if (NullRowNames) {
rownames(inbag) = rownames(outbag) = NULL
} else {
rownames(inbag) = 1:nrow(inbag)
rownames(outbag) = 1:nrow(outbag)
}
return(list(inbag=inbag,outbag=outbag))
### inbag and outbag subsets of the original data
}, ex = function(){
rfTit = rfTitanic(nRows = 200,nodesize=10, ntree = 5)
k=1
tmp <- InOutBags(rfTit$RF, rfTit$data, k)
}) |
068151508f963413d6c5b555c7ea944ffaf31bc3 | d3ec31ef9c490eb1360429f9e4c74d82d877b556 | /man/send.down.Rd | fb7bc7bd1d649cb1a90a931fa1dc6f40d6f26174 | [] | no_license | kdoub5ha/rcITR | 44df37807d25fecc7b3c66eb54466b237d69bdfd | 32c3a9a1fd7ebe9d433e44a1848a9f5db06bc2be | refs/heads/master | 2021-05-24T15:08:28.672887 | 2020-08-04T15:23:04 | 2020-08-04T15:23:04 | 253,620,643 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 759 | rd | send.down.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/send.down.R
\name{send.down}
\alias{send.down}
\title{Sends testing data down a tree to obtain terminal node assignments}
\usage{
send.down(dat.new, tre, char.var = 1000, ctgs = NULL)
}
\arguments{
\item{dat.new}{data to be run down the tree. Required input.}
\item{tre}{tree object from grow.ITR(). Required input.}
\item{char.var}{internal variable.}
\item{ctgs}{categorical variables, entered as columns in `dat.new`}
}
\value{
\item{data}{input data with extra column of node assignments}
\item{tree}{input tree with extra column for number of observations in each node}
}
\description{
Sends dat.new down tree 'tre' to obtain node assignment.
}
|
f770713ea6dd670b7f5eb38863d20718d4990078 | 6d817eaea17e676429c2f70eaabde7303ef383ad | /archive/temp_vector_var.R | 455d5b434016ca020333a9f3a99aa393f5d7d3be | [] | no_license | itamarfaran/correlation_glm | f5e84ecd4817c09a478586bae9ba43fc1fe95b3d | 0ba0424d2ef0f2975b80a48c7b84cf992e15659f | refs/heads/master | 2023-08-14T23:37:46.494876 | 2021-10-16T11:24:51 | 2021-10-16T11:24:51 | 151,227,146 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,316 | r | temp_vector_var.R | source("main_work/Code/01_generalFunctions.R")
source("main_work/Code/02_simulationFunctions.R")
source("main_work/Code/03_estimationFunctions2.R")
real.cov2 <- function(i, j, k, l, MATR) {
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
p <- 10
MATR <- build_parameters(p, 0.5, c(0,1))$Corr.mat
# MATR <- matrix(1:9, ncol = 3)
# MATR <- MATR + t(MATR) + diag(3)*9
vector_var_matrix_calc_COR <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- nrow(MATR)
m <- p*(p-1)/2
order_vecti <- unlist(lapply(1:(p - 1), function(i) rep(i, p - i)))
order_vectj <- unlist(lapply(1:(p - 1), function(i) (i + 1):p))
pelet <- matrix(0, nrow = m, ncol = m)
for(i1 in 1:m){
for(j1 in i1:m){
i <- order_vecti[i1]
j <- order_vectj[i1]
k <- order_vecti[j1]
l <- order_vectj[j1]
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
pelet[i1,j1] <-
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
}
pelet <- pelet + t(pelet) - diag(diag(pelet))
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
cppFunction(
'NumericMatrix corcalc_c(NumericMatrix MATR, int p, int m, NumericVector order_vecti, NumericVector order_vectj) {
NumericMatrix pelet(m, m);
for (int i1 = 0; i1 < m; i1++) {
for (int j1 = 0; j1 < m; j1++) {
int i = order_vecti[i1];
int j = order_vectj[i1];
int k = order_vecti[j1];
int l = order_vectj[j1];
int MATRij = MATR(i,j);
int MATRkl = MATR(k,l);
int MATRik = MATR(i,k);
int MATRil = MATR(i,l);
int MATRjk = MATR(j,k);
int MATRjl = MATR(j,l);
pelet(i1,j1) =
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk);
}
}
return pelet;
}')
corcalc_R <- function(MATR, p, m, order_vecti, order_vectj){
pelet <- matrix(0, nrow = m, ncol = m)
for(i1 in 1:m){
for(j1 in i1:m){
i <- order_vecti[i1]
j <- order_vectj[i1]
k <- order_vecti[j1]
l <- order_vectj[j1]
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
pelet[i1,j1] <-
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
}
return(pelet)
}
vector_var_matrix_calc_COR_CR <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- nrow(MATR)
m <- p*(p-1)/2
order_vecti <- unlist(lapply(1:(p - 1), function(i) rep(i, p - i)))
order_vectj <- unlist(lapply(1:(p - 1), function(i) (i + 1):p))
pelet <- corcalc_R(MATR, p, m, order_vecti, order_vectj)
pelet <- pelet + t(pelet) - diag(diag(pelet))
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
vector_var_matrix_calc_COR_C <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- nrow(MATR)
m <- p*(p-1)/2
order_vecti <- unlist(lapply(1:(p - 1), function(i) rep(i, p - i))) - 1
order_vectj <- unlist(lapply(1:(p - 1), function(i) (i + 1):p)) - 1
pelet <- corcalc_c(MATR, p, m, order_vecti, order_vectj)
pelet <- pelet + t(pelet) - diag(diag(pelet))
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
vector_var_matrix_calc_COR_par <- function(MATR, nonpositive = c("Stop", "Force", "Ignore"),
reg_par = 0){
if(length(nonpositive) > 1) nonpositive <- nonpositive[1]
if(!is.positive.definite(MATR)){
if(nonpositive == "Force") {MATR <- force_positive_definiteness(MATR)$Matrix
} else if(nonpositive != "Ignore") stop("MATR not positive definite") }
p <- dim(MATR)[1]
m <- p*(p-1)/2
tocomp <- unlist(sapply(1:m, function(i) (i - 1)*m + i:m))
real.cov2 <- function(q, MATR, p, m, cumsum) {
t1 <- ceiling(q/m)
t2 <- q %% m
t2 <- m*(t2 == 0) + t2*(t2 != 0)
i <- sum(cumsum < t1)
j <- i + t1 - cumsum[i]
k <- sum(cumsum < t2)
l <- k + t2 - cumsum[k]
MATRij <- MATR[i,j]
MATRkl <- MATR[k,l]
MATRik <- MATR[i,k]
MATRil <- MATR[i,l]
MATRjk <- MATR[j,k]
MATRjl <- MATR[j,l]
(MATRij*MATRkl/2) * (MATRik^2 + MATRil^2 + MATRjk^2 + MATRjl^2) -
MATRij*(MATRik*MATRil + MATRjk*MATRjl) -
MATRkl*(MATRik*MATRjk + MATRil*MATRjl) +
(MATRik*MATRjl + MATRil*MATRjk)
}
cumsum <- c(0, cumsum((p - 1):1))
pelet <- mclapply(tocomp, real.cov2, MATR = MATR,
p = p, m = m, cumsum = cumsum,
mc.cores = ifelse(.Platform$OS.type == "windows", 1, ncores))
pelet <- vector2triangle(unlist(pelet), diag = T)
if((reg_par < 0) | (reg_par > 1)) warning("Regularization Parameter not between 0,1")
if(reg_par != 0) pelet <- (1 - reg_par)*pelet + reg_par*diag(diag(pelet))
return(pelet)
}
# profvis({
tt1 <- Sys.time()
pelet1 <- vector_var_matrix_calc_COR(MATR)
tt1 <- Sys.time() - tt1
tt2 <- Sys.time()
pelet2 <- vector_var_matrix_calc_COR_C(MATR)
tt2 <- Sys.time() - tt2
tt3 <- Sys.time()
#pelet3 <- vector_var_matrix_calc_COR_par(MATR)
tt3 <- Sys.time() - tt3
# })
identical(round(pelet1, 2), round(pelet2 ,2))
#identical(round(pelet2, 2), round(pelet3 ,2))
tt1
tt2
tt3
#tt4
# rm(pelet1, pelet2, pelet3, pelet4)
gc()
|
7ce41c9285c516e283e53f4f33775958f3a5fc84 | 4e9751cc804f4d84b93d72bc57d51f9c112c15c4 | /ui.R | b01a7dd6cd2fc6d2792a732984a16a8c404a7d07 | [] | no_license | RobertoSH/DataProducts | 81934b6b8c22fdfee92e7c7d4343f0c3712d78ca | ecdb17f77c8a0c9e60257b713b88045e97ba90b9 | refs/heads/master | 2016-09-06T09:28:31.798227 | 2014-08-24T19:34:47 | 2014-08-24T19:34:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 934 | r | ui.R | shinyUI(fluidPage(
sidebarPanel(
# Copy the chunk below to make a group of checkboxes
checkboxGroupInput("Variables", label = h3("Noise in Variables"),
choices = list("Flavor" = "Flavor",
"Apperance" = "Apperance",
"Smell" = "Smell")),
sliderInput('Range',label = h3("Uniform Range"),min = -4,max = 4,value = c(-1,1),step = 1),
sliderInput('Flavor',label = h3("Flavor"),min = 1,max = 7,value = c(1),step = 1),
sliderInput('Appearance',label = h3("Appearance"),min = 1,max = 7,value = c(1),step = 1),
sliderInput('Smell',label = h3("Smell"),min = 1,max = 7,value = c(1),step = 1)
# hr(),
# fluidRow(column(3, verbatimTextOutput("Variables"))) ,
# actionButton("goButton", "Execute")
),
mainPanel(
plotOutput("TreePlot"),
tableOutput("Prediction"),
tableOutput("Importance")
)
)) |
d128fb5fb11b1fb42313010305d2269d5baa598a | c8494552202bb07e46b3c328e9c503db92fda70e | /man/confint.MxModel.Rd | 062c2f225616cf30e4d401c6b8a9ebff76a77faf | [] | no_license | hmaes/umx | fcc85dc40774552d0f664036404e12bbdd75cc05 | 09b3c0efd4131248e30e67925b7650278ca78ff9 | refs/heads/master | 2021-01-24T20:26:15.667956 | 2014-09-05T09:02:47 | 2014-09-05T09:02:47 | 23,714,644 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,899 | rd | confint.MxModel.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{confint.MxModel}
\alias{confint.MxModel}
\alias{umxConfint}
\title{confint.MxModel}
\usage{
\method{confint}{MxModel}(object, parm = list("existing", c("vector", "of",
"names"), "default = add all"), level = 0.95, run = FALSE,
showErrorcodes = FALSE, ...)
}
\arguments{
\item{object}{An \code{\link{mxModel}}, possibly already containing \code{\link{mxCI}}s that have been \code{\link{mxRun}} with intervals = TRUE))}
\item{parm}{A specification of which parameters are to be given confidence intervals. Can be "existing", "all", or a vector of names.}
\item{level}{The confidence level required (default = .95)}
\item{run}{Whether to run the model (defaults to FALSE)}
\item{showErrorcodes}{(default = FALSE)}
\item{...}{Additional argument(s) for methods.}
}
\value{
- \code{\link{mxModel}}
}
\description{
Implements confidence interval function for OpenMx models.
Note: Currently requested CIs are added to existing CIs, and all are run,
even if they alrady exist in the output. This might change in the future.
}
\details{
Unlike \code{\link{confint}}, if parm is missing, all CIs requested will be added to the model,
but (because these can take time to run) by default only CIs already computed will be reported.
CIs will be run only if run is TRUE, allowing this function to be used to add
CIs without automatically having to run them.
If parm is empty, and run = FALSE, a message will alert you to add run = TRUE.
Even a few CIs can take too long to make running the default.
}
\examples{
require(OpenMx)
data(demoOneFactor)
latents = c("G")
manifests = names(demoOneFactor)
m1 <- mxModel("One Factor", type = "RAM",
manifestVars = manifests, latentVars = latents,
mxPath(from = latents, to = manifests),
mxPath(from = manifests, arrows = 2),
mxPath(from = latents, arrows = 2, free = FALSE, values = 1.0),
mxData(cov(demoOneFactor), type = "cov", numObs = 500)
)
m1 = umxRun(m1, setLabels = TRUE, setValues = TRUE)
m2 = confint(m1) # default: CIs added, but user prompted to set run = TRUE
m2 = confint(m2, run = TRUE) # CIs run and reported
m1 = confint(m1, parm = "G_to_x1", run = TRUE) # Add CIs for asymmetric paths in RAM model, report them, save m1 with this CI added
m1 = confint(m1, parm = "A", run = TRUE) # Add CIs for asymmetric paths in RAM model, report them, save m1 with mxCIs added
confint(m1, parm = "existing") # request existing CIs (none added yet...)
}
\references{
- \url{http://www.github.com/tbates/umx}
}
\seealso{
- \code{\link[stats]{confint}}, \code{\link{mxCI}}, \code{\link{mxRun}}
Other umx reporting: \code{\link{RMSEA.MxModel}};
\code{\link{logLik.MxModel}}; \code{\link{plot.MxModel}},
\code{\link{umxPlot}}; \code{\link{umxCI_boot}};
\code{\link{umxCI}}; \code{\link{umxCompare}};
\code{\link{umxDescriptives}};
\code{\link{umxFitIndices}}; \code{\link{umxSummary}}
}
|
7378be403ba19fa1842a0c42478fb08fa57e21f8 | 41eba080f03f3beb477bd81b49a5e6024343b36d | /moisture transfer in a box.R | a6cc14b65ae4a4d21cef579fd7963dfdcc0d84dc | [] | no_license | marketresearchru/moisture_transfer | baedc772159d9f5505a26892f97e316b75d3a9ca | f5e00c82abf5dcf7a062e671d57f34360080e981 | refs/heads/master | 2021-01-25T04:09:33.586309 | 2017-06-05T12:54:44 | 2017-06-05T12:54:44 | 93,404,019 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,666 | r | moisture transfer in a box.R | # This program calculates moisture transfer over vertical or horisontal line
# Transfer is based on wind and humidity on 19 levels in the atmosphere stack
# we consider rectangle outside of the border which is researched
# in case of this particular grid, we have only odd degrees, therfore
# actual border is the even degree between outer definded below and inner
# (2 degrees smaller in each direction)
# LONGITUDE 1 & 2 - X degree to the east
# LATITUDE 1 & 2 - Y degree to the north
LONGITUDE1 = 34 # east
LONGITUDE2 = 60
LATITUDE1 = 46 # north
LATITUDE2 = 60
# the only library required is NETCDF opener
library("ncdf4")
# source data folder
setwd("E:/ncep_ncar20C")
Levels <- 19 # count of levels in a column
# Take first NC file to create header data
ncfile <- nc_open("special_humidity/shum.1901.nc")
nc.lat <- ncvar_get(ncfile, "lat")
Lat1 <- which(abs(nc.lat-LATITUDE1)<0.5)-1 # shift 1 step top (inside)
Lat2 <- which(abs(nc.lat-LATITUDE2)<0.5) # Lat2 < Lat1 because data in NETCDF starts from 90 and descends
nc.lon <- ncvar_get(ncfile, "lon")
Lon1 <- which(abs(nc.lon-LONGITUDE1)<0.5)
Lon2 <- which(abs(nc.lon-LONGITUDE2)<0.5)-1 # shift 1 step to left (inside)
nc.time <- ncvar_get(ncfile, "time")
nc.levels <- ncvar_get(ncfile, "level") # 19 levels from 1000 with step of 50
nc_close(ncfile)
# month lengths (each day has 4 measures)
# this function below takes in account visokosny year, however, they are not in the file
month.intervals <- function(year, tm.len){
month.sroks <- array(dim=tm.len)
mn2 <- 4*31
month.sroks[1:mn2] <- 1
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + ifelse((year - floor(year/4)*4) == 0, 29*4, 28*4)
month.sroks[mn1:mn2] <- 2
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 31*4
month.sroks[mn1:mn2] <- 3
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 30*4
month.sroks[mn1:mn2] <- 4 # April
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 31*4
month.sroks[mn1:mn2] <- 5
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 30*4
month.sroks[mn1:mn2] <- 6
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 31*4
month.sroks[mn1:mn2] <- 7
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 31*4
month.sroks[mn1:mn2] <- 8
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 30*4
month.sroks[mn1:mn2] <- 9
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 31*4
month.sroks[mn1:mn2] <- 10
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 30*4
month.sroks[mn1:mn2] <- 11
mn1 <- mn2 + 1
mn2 <- mn1 - 1 + 31*4
month.sroks[mn1:mn2] <- 12
return(month.sroks)
}
# Transfer through horisontal border for particular srok
srok.h.transfer <- function(h, w){
trans <- 0
steps <- dim(h)[1]
for(l in 1:steps){ #for each l on horizontal border
m.left <- h[l,1,]
m.right <- h[l,2,]
wind.left <- w[l,1,]
# replace wind with 0 if it blows out of border (leave only those wich really transfers moisture)
wind.left[wind.left>0] <- 0
wind.right <- w[l,2,]
wind.right[wind.right<0] <- 0
#humidity volume at 1000h level
level <- 1
trans <- trans + wind.left[level] * 25 * ((m.left[level]+m.left[level+1])/2 + m.left[level])
trans <- trans + wind.right[level] * 25 * ((m.right[level]+m.right[level+1])/2+m.right[level])
#volume of humidity on all other levels
for(level in 2:18){
trans <- trans + wind.left[level] * 25 * (3*m.left[level]+(m.left[level+1] + m.left[level-1])/2)
trans <- trans + wind.right[level] * 25 * (3*m.right[level]+(m.right[level+1] + m.right[level-1])/2)
}
#volume at 100 level
level <- 19
trans <- trans + wind.left[level] * 50 * ((m.left[level]+m.left[level-1])/2+m.left[level])/2
trans <- trans + wind.right[level] * 50 * ((m.right[level]+m.right[level-1])/2+m.right[level])/2
}
trans
}
# Transfer through vertical border
srok.v.transfer <- function(h, w){
trans <- 0
# cat(dim(h))
# cat(dim(w))
steps <- dim(h)[2]
for(l in 1:steps){ #for each l on vertical border
m.left <- h[1,l,] #left
m.right <- h[2,l,]
wind.left <- w[1,l,]
# replace wind with 0 if it blows out of border (leave only those wich really transfers moisture)
wind.left[wind.left<0] <- 0
wind.right <- w[2,l,]
wind.right[wind.right>0] <- 0
#humidity volume at 1000h level
level <- 1
trans <- trans + wind.left[level] * 25 * ((m.left[level]+m.left[level+1])/2 + m.left[level])
trans <- trans + wind.right[level] * 25 * ((m.right[level]+m.right[level+1])/2+m.right[level])
#volume of humidity on all other levels
for(level in 2:18){
trans <- trans + wind.left[level] * 25 * (3*m.left[level]+(m.left[level+1] + m.left[level-1])/2)
trans <- trans + wind.right[level] * 25 * (3*m.right[level]+(m.right[level+1] + m.right[level-1])/2)
}
#volume at 100 level
level <- 19
trans <- trans + wind.left[level] * 50 * ((m.left[level]+m.left[level-1])/2+m.left[level])/2
trans <- trans + wind.right[level] * 50 * ((m.right[level]+m.right[level-1])/2+m.right[level])/2
}
trans
}
DimLat <- Lat1-Lat2
DimLon <- Lon2-Lon1
# Output file
fileconnector <- file(description = "C:/Users/alexe/Documents/Climat/Wind-and-humidity/monthly box transfer.txt", open="wt")
write(c("YEAR", 1:12), file=fileconnector, append=TRUE, ncolumns=13, sep = "\t")
year <- 1901
for(year in 1901:2012) {
cat("year", year)
tm.len <- 4 * ifelse((year - floor(year/4)*4) == 0, 366, 365)
month.sroks <- month.intervals(year, tm.len)
# Humidity data
ncfileh <- nc_open(paste0("special_humidity/shum.", year, ".nc"))
ncfileu <- nc_open(paste0("uwnd/uwnd.", year, ".nc"))
ncfilev <- nc_open(paste0("vwnd/vwnd.", year, ".nc"))
#print(ncfile)
# read full year, 4 blocks
# block L - left border of a box + 1 vertical to the right (east)
# humidity
hL <- ncvar_get(ncfileh, "shum", start=c(Lon1, Lat2+1, 1, 1), count=c(2, DimLat, 19, tm.len) )
# u-wind component, m/s
wind.uL <- ncvar_get(ncfileu, "uwnd", start=c(Lon1, Lat2+1, 1, 1), count=c(2, DimLat, 19, tm.len ) )
# block R - right border of a box + 1 vertical to the right
# humidity
hR <- ncvar_get(ncfileh, "shum", start=c(Lon2, Lat2+1, 1, 1), count=c(2, DimLat, 19, tm.len ) )
# u-wind component, m/s
wind.uR <- ncvar_get(ncfileu, "uwnd", start=c(Lon2, Lat2+1, 1, 1), count=c(2, DimLat, 19, tm.len) )
# block T - top border of a box + 1 horisontal to the bottom (south)
# humidity
hT <- ncvar_get(ncfileh, "shum", start=c(Lon1+1, Lat2, 1, 1), count=c(DimLon, 2, 19, tm.len) )
# v-wind component, m/s
wind.vT <- ncvar_get(ncfilev, "vwnd", start=c(Lon1+1, Lat2, 1, 1), count=c(DimLon, 2, 19, tm.len) )
# block B - bottom border of a box + 1 horisontal to the bottom (south)
# humidity
hB <- ncvar_get(ncfileh, "shum", start=c(Lon1+1, Lat1, 1, 1), count=c(DimLon, 2, 19, tm.len) )
# v-wind component, m/s
wind.vB <- ncvar_get(ncfilev, "vwnd", start=c(Lon1+1, Lat1, 1, 1), count=c(DimLon, 2, 19, tm.len) )
nc_close(ncfileh)
nc_close(ncfileu)
nc_close(ncfilev)
transfer <- array(dim=12)
cat(" Data taken\nprocessing...")
for(month in 1:12) {
cat(month.abb[month], " ")
transfer[month] <- 0
for(time in which(month.sroks==month, arr.ind = TRUE)){ #for each moment of time
transfer[month] <- transfer[month] + srok.v.transfer(hL[,,,time],wind.uL[,,,time])
transfer[month] <- transfer[month] + srok.v.transfer(hR[,,,time],wind.uR[,,,time])
transfer[month] <- transfer[month] + srok.h.transfer(hT[,,,time],wind.vT[,,,time])
transfer[month] <- transfer[month] + srok.h.transfer(hB[,,,time],wind.vB[,,,time])
} # end of loop by all sroks within month
} # end of loop by month
write(c(year, transfer), file=fileconnector, append=TRUE, ncolumns=13, sep = "\t")
cat("year done\n")
} # end of loop by year
write(paste("Outer rectangle: LONGITUDE = [", LONGITUDE1, ",", LONGITUDE2, "]\tLATITUDE = [", LATITUDE1, ",", LATITUDE2, "]"), file=fileconnector, append=TRUE)
close(fileconnector)
|
a536545def4547a9eea67d864c8d7b612d174e7a | e4ed8a79d7a1c2eeed9f5c3af6a367fda87aba3b | /server.R | cf6f439fbf4aca6429ace2f9c2f5bba2b83dce63 | [] | no_license | nateao/clt_illustration | def5de99f6b45b965c0156b538801d30800bd808 | 0be59d7ade65b9cf35624d010c84d6bc5e0bf4d1 | refs/heads/master | 2016-09-05T21:00:05.936398 | 2015-02-19T05:49:14 | 2015-02-19T05:49:14 | 31,002,222 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 306 | r | server.R | library(shiny)
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
lambda <- 0.2
set.seed(10413)
sim <- replicate(n = 5000, mean(rexp(n = input$integer, rate = lambda)))
hist(sim, prob = TRUE, main = "Distribution of Sample Means", xlab = "Sample Mean")
})
}) |
30c83e51af854b386a65b2dfa03c436c301f0971 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mvoutlier/examples/mvoutlier.CoDa.Rd.R | fb2de3e701d20443da185725b48a7db6fe886eec | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 280 | r | mvoutlier.CoDa.Rd.R | library(mvoutlier)
### Name: mvoutlier.CoDa
### Title: Interpreting multivatiate outliers of CoDa
### Aliases: mvoutlier.CoDa
### Keywords: multivariate robust
### ** Examples
data(humus)
d <- humus[,c("As","Cd","Co","Cu","Mg","Pb","Zn")]
res <- mvoutlier.CoDa(d)
str(res)
|
98538874e81010e690c3521f158646b4091233f9 | 28300f000bc0fc86fe6559a210b48e6101334ff0 | /R/Rpackage/Testing/functions-autotest-testing.R | c92dc217f5d8c4281b51f966d3bec10638760f36 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | naturalengland/Seabird_PVA_Tool | 64eb4f8e83d1503ced331ea6db1a23541ff8febc | 945a6fdc3e34feff4733a5e90d67418f9a86aeaa | refs/heads/master | 2022-11-15T11:59:52.500835 | 2022-11-04T17:48:29 | 2022-11-04T17:48:29 | 184,424,468 | 9 | 1 | NOASSERTION | 2022-11-04T16:03:00 | 2019-05-01T13:59:28 | R | UTF-8 | R | false | false | 22,576 | r | functions-autotest-testing.R | ## ###############################################################################################
## Created 12 September 2019, last modified 29 September 2019
##
## 21 November 2019: a) including "CompareStructure", from stackoverflow.com/questions/32399843
## b) fixed "autocheck.consistency.nepva" to only run in situations where first set of inputs are valid
##
## [1] autocheck.validity.nepva
## [2] nepva.siminputs.valid
## [2] testrun.nepva
## [3] runcheck.nepva
## [4] check.outputs
## [5] check.simplescenarios
## [5] check.validation
## [5] check.sensitivity.local
## [5] check.sensitivity.global
## ###############################################################################################
## ##################################################################################################
## BLOCK 1. Functions to autocheck the validity or otherwise of a large batch of inputs
## Created 29 September 2019, last modified 29 September 2019
## ###############################################################################################
autocheck.validity.nepva <- function(nsms, test.invalid = FALSE){
out <- NULL
runtypes <- c("simplescenarios", "validation", "sensitivity.local", "sensitivity.global")
nrt <- length(runtypes)
for(i in 1:nrt){
for(j in 1:(nsms[i])){
print.noquote(paste(i," ",j," ",date()))
## ######################################
## Generate a random seed
seed.meta <- round(runif(1,0.5,100000+0.5))
## ######################################
## Generate a set of valid inputs
validinputs <- nepva.siminputs.valid(runtype = runtypes[i], seed.meta = seed.meta)
## ######################################
## Check whether the tool runs successfully for these valid inputs
tmp <- testrun.nepva(inputs = validinputs, runtype = runtypes[i])
tmp$runtype <- runtypes[i]
tmp$seed.meta <- seed.meta
tmp$ought.valid <- TRUE
tmp$ought.errtype <- 0
if(test.invalid){
## ######################################
## Perturb valid inputs in order to generate multiple set of invalid inputs
invalidinputs <- nepva.siminputs.invalid(validinputs)
ninv <- length(invalidinputs)
## ######################################
## Check whether tool crashes, and produces appropriate error messages, for each set of
## invalid inputs
for(k in 1:ninv){
new <- testrun.nepva(inputs = invalidinputs[[k]], runtype = runtypes[i])
new$runtype <- runtypes[i]
new$seed.meta <- seed.meta
new$ought.valid <- FALSE
new$ought.errtype <- k
tmp <- rbind(tmp, new)
}
## ######################################
}
tmp <- tmp[,c(4:7,1:3)]
out <- rbind(out, tmp)
}
}
out$errmess <- factor(out$errmess) ## moved Version 4.8
levels(out$errmess) <- gsub(",",";",levels(out$errmess)) ## moved Version 4.8
out
}
## ###############################################################################################
## Functions to check validity of outputs from a single run of the NE PVA tool:
testrun.nepva <- function(inputs, runtype){
obj <- ftry(fn = nepva.batchmode, inputs = inputs, runtype = runtype)
runcheck.nepva(obj, inputs = inputs, runtype = runtype)
}
runcheck.nepva <- function(obj, inputs, runtype){
tmp <- get.errmess(obj)
if(is.na(tmp)){
check <- check.outputs(obj, inputs = inputs, runtype = runtype)
}
else{
check <- FALSE
}
data.frame(check.noerrors = is.na(tmp), check.validoutput = check, errmess = tmp)
}
## ###############################################################################################
check.outputs <- function(obj, inputs, runtype){
if(runtype == "simplescenarios"){
check <- check.simplescenarios(obj, inputs = inputs)
}
if(runtype == "validation"){
check <- check.validation(obj, inputs = inputs)
}
if(runtype == "sensitivity.local"){
check <- check.sensitivity.local(obj, inputs = inputs)
}
if(runtype == "sensitivity.global"){
check <- check.sensitivity.global(obj, inputs = inputs)
}
check
}
## ###############################################################################################
check.simplescenarios <- function(out, inputs){
## #########################
lims.popsize <- c(0, 1e+20)
## #########################
if(inputs$output.raw){
ys <- min(inputs$inipop.years):inputs$output.year.end
check1 <- all(out$raw$years == (ys))
check2 <- all(dim(out$raw$nbyage) == c(inputs$nscen + 1, inputs$npop, length(ys), inputs$sim.n, inputs$afb + 1))
check3 <- (all(! is.na(out$raw$nbyage))) & (min(out$raw$nbyage, na.rm=TRUE) >= lims.popsize[1]) & (max(out$raw$nbyage, na.rm=TRUE) <= lims.popsize[2])
check4 <- check.metricstab(tab = out$tab, inputs = inputs)
check <- check1 & check2 & check3 & check4
}
else{
check <- check.metricstab(tab = out, inputs = inputs)
}
## #########################
check
}
## ###############################################################################################
check.validation <- function(out, inputs){
ny <- inputs$output.year.end - min(inputs$inipop.years) + 1
check <- (ny == nrow(out)) & check.metricsvals(metricstab = out, globalsens = FALSE)
check
}
## ###############################################################################################
check.sensitivity.local <- function(out, inputs){
## #########################
lims.ppcc <- c(-100, 1000)
## #########################
pnames <- c("demobase.prod.mean", "demobase.survadult.mean", "impact.prod.mean", "impact.survadult.mean", "inipop.vals")
mma <- which(colnames(out) == "parname")
mmb <- match(paste("pcchange", pnames, sep="."), colnames(out))
mmc <- match(pnames, colnames(out))
check1 <- (nrow(out) == 1 + inputs$sens.npvlocal*10)
check2 <- all(! is.na(match(pnames, levels(out$parname)[levels(out$parname) != "standard"])))
check3 <- (min(out[,mmb]) >= lims.ppcc[1]) & (max(out[,mmb] <= lims.ppcc[2]))
check4 <- check.sensinputs(out, mbs = inputs$mbs)
check5 <- check.metricsvals(out[,-c(mma, mmb, mmc)], globalsens = FALSE)
check <- check1 & check2 & check3 & check4 & check5
check
}
## ###############################################################################################
check.sensitivity.global <- function(out, inputs){
pnames <- c("demobase.prod.mean", "demobase.survadult.mean", "impact.prod.mean", "impact.survadult.mean", "inipop.vals")
check1 <- (nrow(out$tab) == inputs$sens.npvglobal)
check2 <- nrow(out$decomposition) == length(inputs$sens.pcr)
check3 <- check.sensinputs(out$tab, mbs = inputs$mbs)
tabmet <- out$tab[,is.na(match(colnames(out$tab), pnames))]
check4 <- check.metricsvals(tabmet, globalsens = TRUE)
check5 <- check.globaldecomp(out$decomposition)
check <- check1 & check2 & check3 & check4 & check5
check
}
## ###############################################################################################
check.metricstab <- function(tab, inputs){
## ############################################
ns <- (inputs$nscen + 1)
na <- (inputs$afb + 1)^(inputs$output.agetype == "age.separated")
ny <- (inputs$output.year.end - inputs$output.year.start + 1)
## print(colnames(tab))
check <- ((ns * na * ny) == nrow(tab)) & check.metricsvals(metricstab = tab, globalsens = FALSE)
check
}
## ###############################################################################################
check.metricsvals <- function(metricstab, globalsens = FALSE){
## ############################################
lims.b <- c(0, 1e+20)
lims.c <- c(-100, 1e+06)
lims.e <- c(0, 100)
## ############################################
qs <- c(1, 2.5, 5, 10, 20, 25, 33, 66, 75, 80, 90, 95, 97.5, 99)
cna <- c("Year", "Age", "Scenario", "Baseyear", "Currently.Impacted", "Impact.year")
cnb <- paste("popsize", c("mean", "sd", "median", paste0("q", qs, "%")), sep=".")
cnc <- c(paste(rep(c("pgr", "agr"), 1, each = 5),
rep(c("median", "mean", "sd", "cilo", "cihi"), 2), sep="."))
cnd <- c(paste(rep(c("ppc", "m1", "m2"), 1, each = 5),
rep(c("median", "mean", "sd", "cilo", "cihi"), 3), sep="."))
cne <- paste0("m", 3:6)
if(globalsens){
check <- length(colnames(metricstab)) == length(c(cnb,cnc))
if(check){
check <- all(colnames(metricstab) == c(cnb, cnc))
check <- check & all(metricstab[,cnb] >= lims.b[1]) & all(metricstab[,cnb] <= lims.b[2])
check <- check & all(min(metricstab[,cnc], na.rm=TRUE) >= lims.c[1]) & all(max(metricstab[,cnc], na.rm=TRUE) <= lims.c[2])
}
}
else{
active <- (! is.na(metricstab$Impact.year))
cn <- c(cna, cnb, cnc, cnd, cne)
vb <- metricstab[,cnb]
vc1 <- metricstab[! active, c(cnc, cnd, cne)]
vc2 <- metricstab[active, c(cnc, cnd, cne)]
check1 <- all(colnames(metricstab) == cn)
check2 <- all(is.na(vc1)) & all(! is.na(vc2)) & all(! is.na(metricstab[,cnb]))
check3 <- all(metricstab[,cnb] >= 0) & all(metricstab[,cnb] <= 1e+20)
if(all(is.na(metricstab[,c(cnc, cnd)]))){
check4 <- TRUE
}
else{
check4 <- all(min(metricstab[,c(cnc, cnd)], na.rm=TRUE) >= -100) & all(max(metricstab[,c(cnc, cnd)], na.rm=TRUE) <= 1e+06)
}
if(all(is.na(metricstab[,cne]))){
check5 <- TRUE
}
else{
check5 <- all(min(metricstab[,cne] >= lims.e[1], na.rm=TRUE) & max(metricstab[,cne] <= lims.e[2], na.rm=TRUE))
}
check <- check1 & check2 & check3 & check4 & check5
}
## ############################################
check
}
## ###############################################################################################
check.sensinputs <- function(out, mbs){
pnames <- c("demobase.prod.mean", "demobase.survadult.mean", "impact.prod.mean", "impact.survadult.mean", "inipop.vals")
## ############################################
pvmin <- c(0, 0, -0.5, -0.5, 1)
pvmax <- c(mbs, 1, 0.5, 0.5, 1e+06)
## ############################################
tmp <- out[,pnames]
check <- TRUE
for(k in 1:length(pnames)){
check <- check & (min(tmp[,k]) > pvmin[k]) & (max(tmp[,k]) < pvmax[k])
}
check
}
## ###############################################################################################
check.globaldecomp <- function(tmp){
## ############################################
lims.gd <- c(-1e+20, 1e+20)
## ############################################
pnames <- c("demobase.prod.mean", "demobase.survadult.mean", "impact.prod.mean", "impact.survadult.mean", "inipop.vals")
qs <- c(1, 2.5, 5, 10, 20, 25, 33, 66, 75, 80, 90, 95, 97.5, 99)
cnb <- paste("popsize", c("mean", "sd", "median", paste0("q", qs, "%")), sep=".")
cnc <- c(paste(rep(c("pgr", "agr"), 1, each = 5),
rep(c("median", "mean", "sd", "cilo", "cihi"), 2), sep="."))
cnc <- cnc[1:8] ## !!!!! FUDGE !!!!!!!!
cn <- c(cnb, cnc)
cn <- paste(rep(c("FOI", "TEI"), each = length(cn)), rep(cn, 2), sep=".")
check <- all(dim(tmp) == c(length(pnames), length(cn)))
check <- check & all((tmp >= lims.gd[1]) & (tmp <= lims.gd[2]))
check
}
## ###############################################################################################
## BLOCK 2. Functions to compare outputs from multiple runs of the NE PVA tool, to
## assess internal consistency
##
## Version 4.8: added "full" argument
## ###############################################################################################
autocheck.consistency.nepva <- function(nsm, sim.n, full=TRUE){
## NOTE: this part is only for "simplescenarios"
## Created 29 September 2019, last modified 29 September 2019
## 17 November 2019: added "sim.n" argument
runtype <- "simplescenario"
out <- NULL
j <- 1
while(j <= nsm){ ## Version 4.7: changed from "for" to "while"
seed.meta <- round(runif(1,0.5,100000+0.5))
allinputs <- nepva.siminputs.consistent(seed.meta = seed.meta, sim.n = sim.n, full=full) ## 17 November 2019: added "sim.n" argument
## Version 4.8 - added "full" argument
inputs <- allinputs$inputlist
zout <- testrun.nepva(inputs = inputs[[1]], runtype = "simplescenarios")
if(zout$check.validoutput){ ## v4.7: now only run consistency check if the first (full) inputs were valid
## for(k in 1:128){ print.noquote(paste(k, testrun.nepva(inputs = inputs[[k]], runtype = "simplescenarios"),collapse="-")) }
## browser()
ncombi <- nrow(allinputs$mstruc)
new <- NULL
for(k in 1:ncombi){
print.noquote(paste(j," ",k," ",date()))
## Note: this is not the most efficient way to do this,
## as running of "inputs.1" is repeated multiple times...
tmp <- testcomp.nepva(inputs.1 = inputs[[1]], inputs.2 = inputs[[k]], runtype = "simplescenarios")
tmp$combi <- k
new <- rbind(new, tmp)
}
outij <- data.frame(runtype = rep(runtype, ncombi),
seed.meta = rep(seed.meta, ncombi),
combi = 1:ncombi)
outij <- cbind(outij, allinputs$mstruc)
outij <- cbind(outij, new)
out <- rbind(out, outij)
j <- j + 1
}
else{ ## v4.7: if invalid inputs, print error message
print.noquote(as.character(zout$errmess))
}
}
out
}
## ###############################################################################################
## Compare the results obtained by running the NE PVA tool twice
## Rewritten 29 September 2019 to simplify functionality
##
## Note: only designed to work with "runtype = 'simplescenarios'"
## ###############################################################################################
testcomp.nepva <- function(inputs.1, inputs.2, runtype){
if(runtype == "simplescenarios"){
inputs.1$output.raw <- TRUE
inputs.2$output.raw <- TRUE
}
obj1 <- ftry(fn = nepva.batchmode, inputs = inputs.1, runtype = runtype)
obj2 <- ftry(fn = nepva.batchmode, inputs = inputs.2, runtype = runtype)
chk1 <- runcheck.nepva(obj1, inputs = inputs.1, runtype = runtype)
chk2 <- runcheck.nepva(obj2, inputs = inputs.2, runtype = runtype)
colnames(chk1) <- paste0(colnames(chk1), 1)
colnames(chk2) <- paste0(colnames(chk2), 2)
chk <- cbind(chk1, chk2)
if(chk$check.validoutput1 & chk$check.validoutput2){
## Note: "CompareStructure" checks whether two objects have identical dimensions &
## structure, but does **not** check names (which we would ideally also do...)
chk$samestruc <- CompareStructure(obj1, obj2)
if(chk$samestruc){
if(runtype == "simplescenarios"){
adiff <- obj2$raw$nbyage - obj1$raw$nbyage
denom <- obj1$raw$nbyage
rdiff <- adiff / denom
chk$tmed = median(obj2$raw$nbyage) ## added Version 4.8
chk$tsdd = sd(obj2$raw$nybage)
chk$amax = max(abs(adiff))
chk$amed = median(abs(adiff))
chk$rmax = max(abs(rdiff[denom > 0]))
chk$rmed = median(abs(rdiff[denom > 0]))
}
## chk <- cbind(chk, new)
}
}
else{
chk$samestruc <- FALSE
chk$tmed = NA
chk$tsdd = NA
chk$amax <- NA
chk$amed <- NA
chk$rmax <- NA
chk$rmed <- NA
}
chk
}
## ###############################################################################################
## BLOCK 3. Utility functions
## ###############################################################################################
ftry <- function(fn,...){ try(fn(...), silent = TRUE) }
## ###############################################################################################
## A function to extract the error message that has been created by running a function using "try"
## -- output will be missing (NA) if the function ran successfully, without producing an error message,
## and will otherwise be a character string containing the error message
get.errmess <- function(z){
if(inherits(z, "try-error")){
out <- as.character(attr(z, "condition"))
out <- gsub("\\n", "", gsub("<simple", "", out))
}
else{
out <- NA
}
out
}
## ###############################################################################################
CompareStructure <-
function(x, y) {
# function to recursively compare a nested list of structure annotations
# using pairwise comparisons
TypeCompare <-
function(xSTR, ySTR) {
if (length(xSTR) == length(ySTR)) {
all(mapply(
xSTR,
ySTR,
FUN = function(xValue, yValue) {
if (is.list(xValue) && is.list(yValue)) {
all(TypeCompare(xValue, yValue))
} else if (is.list(xValue) == is.list(yValue)) {
identical(xValue, yValue)
} else {
FALSE
}
}
))
} else {
FALSE
}
}
# if both inputs are lists
if (is.list(x) && is.list(y)) {
# use Rapply to recursively apply function down list
xSTR <-
rapply(
x,
f = function(values) {
c(mode(values), length(values))
},
how = "list"
)
# use Rapply to recursively apply function down list
ySTR <-
rapply(
y,
f = function(values) {
c(mode(values), length(values))
},
how = "list"
)
# call the compare function on both structure annotations
return(TypeCompare(xSTR, ySTR))
} else {
# if inputs are not same class == automatic not same structure
if (class(x) != class(y)) {
FALSE
} else {
# get dimensions of the x input, if null get length
xSTR <-
if (is.null((dimsX <- dim(x)))) {
length(x)
} else {
dimsX
}
# get dimensions of the y input, if null get length
ySTR <-
if (is.null((dimsY <- dim(y)))) {
length(y)
} else {
dimsY
}
# call the compare function on both structure annotations
return(TypeCompare(xSTR, ySTR))
}
}
}
## ###############################################################################################
## Added 13 January 2020 - utility functions need for Kate to run manual testing of outputs against Shiny
## Generate input lists, and where possible output CSV files, associated with a set of input specification:
nepva.save.and.run.valid <- function(inputspecs, outpath){
write.csv(inputspecs, file = paste0(outpath, "inputspecs.csv"), quote=FALSE, row.names=FALSE)
inputslist <- as.list(NULL)
for(k in 1:nrow(inputspecs)){
inputs <- nepva.siminputs.valid(runtype = inputspecs$runtype[k], seed.meta = inputspecs$seed.meta[k])
inputslist[[k]] <- inputs
out <- ftry(fn = nepva.batchmode, inputs = inputs, runtype = inputspecs$runtype[k])
if(! inherits(out, "try-error")){
if(is.null(out$tab)){ ## Clause added Version 4.12
tab <- out
}
else{
tab <- out$tab
}
write.csv(tab, file = paste0(outpath, "outputs", k, ".csv"), quote=FALSE, row.names=FALSE)
}
}
save(inputslist, file = paste(outpath, "inputslist.RData"))
NULL
}
## Simplify error statuses, where evaluating performance:
fixstatus.pva <- function(out){
e1 <- "Error in leslie.update(demobase.ests = demobase.ests[j; ; ]; nbyage.prev = nbyage.prev; : Population size explosion - will lead to numerical overflow"
e2 <- "Error in inits.burned(nbyage.burned = nbyage.burned; inipop.totals = inipop.totals): Error! Zero values during burn-in..."
e3a <- "Error in leslie.update(demobase.ests = demobase.ests[j; ; ]; nbyage.prev = nbyage.prev; : Invalid productivity rates simulated!"
e3b <- "Error in leslie.update(demobase.ests = demobase.ests[j; ; ]; nbyage.prev = nbyage.prev; : Invalid survival probabilities simulated!"
out$status <- factor("error.other", levels = c("run.full", "run.partial", "error.e1", "error.e2", "error.e3", "error.other"))
out$status[out$check.validoutput] <- "run.full"
out$status[out$check.noerrors & (! out$check.validoutput)] <- "run.partial"
out$status[(! out$check.noerrors) & out$errmess == e1] <- "error.e1"
out$status[(! out$check.noerrors) & out$errmess == e2] <- "error.e2"
out$status[(! out$check.noerrors) & out$errmess == e3a] <- "error.e3"
out$status[(! out$check.noerrors) & out$errmess == e3b] <- "error.e3"
out
}
## Select a random subset of valid inputs (of each type), to use for manual checking:
goodsubset.inputs.valid <- function(inputspecs, nman, seed.subset){
set.seed(seed.subset)
man.runtypes <- c("simplescenarios", "validation", "sensitivity.local")
man.errtypes <- c("run.full", "run.partial", "error.e1", "error.e2", "error.e3")
mm <- NULL
for(i in 1:length(man.runtypes)){
for(j in 1:length(man.errtypes)){
ox <- (inputspecs$runtype == man.runtypes[i] & inputspecs$status == man.errtypes[j])
if(any(ox)){ mm <- c(mm, sample(which(ox), size = nman[j,i])) }
}
}
new <- inputspecs[mm,]
row.names(new) <- 1:nrow(new)
new
}
## ###############################################################################################
|
e222b18e602828390598e85def7d009899c7c529 | 9201c9925feb97530455dcff559e5de0be356c79 | /plot3.R | 3450731b2773bb0d54643ebf8d1c574077e2e3b8 | [] | no_license | henrique1837/Exploratory-Data-Analysis | 151b3a60bb83c1d5f70bae48bcde6cea4ffe6603 | 9aa0a5f24b6be67e534bb11f95053b045c606221 | refs/heads/master | 2021-01-10T10:39:34.257586 | 2016-01-10T21:23:06 | 2016-01-10T21:23:06 | 49,384,018 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,040 | r | plot3.R | #Read Data downloaded
arquivo <- './exR/household_power_consumption.txt'
dados <- read.table(arquivo, header=FALSE, sep=';', skip='1')
#Set colum names
colunas <- readLines(arquivo,1)
colunas <- strsplit(colunas,';',fixed=TRUE)
names(dados) <- colunas[[1]]
#Read only Dates between 1/2/2007 and 2/2/2007 and formating Date and Time
dados2 <- dados[dados$Date %in% c('1/2/2007','2/2/2007'),]
#Formating
dados2$DateTime <- strptime(paste(dados2$Date, dados2$Time), '%d/%m/%Y %H:%M:%S')
#Constructing
plot(dados2$DateTime, dados2$Sub_metering_1, type = 'l', xlab = '', ylab = 'Energy sub metering')
points(dados2$DateTime, dados2$Sub_metering_2, type = 'l', xlab = '', ylab = 'Energy sub metering', col = 'red')
points(dados2$DateTime, dados2$Sub_metering_3, type = 'l', xlab = '', ylab = 'Energy sub metering',col = 'blue')
legend('topright', lty = 1, col = c('black', 'red', 'blue'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
#Saving file as PNG
dev.copy(png, file='plot3.png', height=480, width=480)
dev.off() |
04781df8211cf29169d1ef4264fcbc93c37a65c9 | e20a0d8db429b9f6905ec962baec713a6a760531 | /R/.Rprofile | 5f64e9bc44fcb86fb10bc37a449ddfb718215792 | [] | no_license | TheSeaGiraffe/dotfiles | e6df811982531eb8ec690665d6f942fc82886f20 | 0a9b0e1a2c3951f078f887c7d0f97af71dec6a88 | refs/heads/master | 2021-04-15T07:55:58.966207 | 2017-10-25T07:32:12 | 2017-10-25T07:32:12 | 126,684,712 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,704 | rprofile | .Rprofile | # Include local R library in library path
.libPaths(c('~/.Rlibs', .libPaths()))
#Load packages
#library(pacman)
#p_load(ggplot2, gridExtra, grid, reshape2, dplyr, utils)
#library(utils)
# Set install.packages to install to ~/.Rilbs
options(lib='~/.Rlibs')
# Don't automatically convert strings to factors
options(stringsAsFactors = F)
# Set R terminal width to 80
options(width = 80)
# Overwrite 'quit' function to disable save prompt upon quitting
# Figure out exactly what this does
assignInNamespace(
'q',
function(save = 'no', status = 0, runLast = T)
{
.Internal(quit(save, status, runLast))
},
'base'
)
# Set CRAN mirror
local({
r <- getOption('repos')
r['CRAN'] <- 'https://cran.csiro.au'
options(repos = r)
})
# Create a new invisible environment for your personal functions to go in
.myFuns <- new.env()
# Single character shortcuts for summary() and head()
.myFuns$s <- base::summary
.myFuns$h <- utils::head
# Function for a pager like less
.myFuns$less <- function(x) {
file <- tempfile()
sink(file)
on.exit(sink())
print(x)
file.show(file, delete.file = T)
}
# Breaks for ggplot2 histograms
# Sturges
.myFuns$stBreaks <- function(dataVar) {
dataVar <- na.omit(dataVar)
pretty(range(dataVar), n = nclass.Sturges(dataVar), min.n = 1)
}
# Freedman-Diaconis
.myFuns$fdBreaks <- function(dataVar) {
dataVar <- na.omit(dataVar)
pretty(range(dataVar), n = nclass.FD(dataVar), min.n = 1)
}
# Attach the environment above
attach(.myFuns)
# .First() run at the start of every R session
#.First <- function() {
#cat('Successfully loaded .Rprofile at', date(), '\n')
#}
# .Last() run at the end of the session
#.Last <- function() {
#cat('\nGoodbye at', date(), '\n')
#}
|
486ad12bbbb611e6228447642339a502fc9509d9 | d81656c6ee1d317722ef878ac3c8184825b342e3 | /R/clustView.ui.R | cc7d6dbc696e7dc70cad84e4026482157bb71f51 | [] | no_license | plger/clustView | d9c5540320716f0eefb4b667bf67d0fb30fe7835 | 3addd521ceaf57ec0cf4125d1ae107c3a714a464 | refs/heads/master | 2020-04-08T15:55:19.505780 | 2018-12-18T11:33:04 | 2018-12-18T11:33:04 | 159,497,315 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,181 | r | clustView.ui.R | #' clustView.ui
#'
#' UI function of `clustView`.
#'
#' @export
clustView.ui <- function(){
library(shiny)
library(DT)
library(shinycssloaders)
library(shinydashboard)
dashboardPage(
dashboardHeader(title="clustViewer"),
dashboardSidebar(
sidebarMenu(id="tabs",
menuItem("Clustree", tabName="tree"),
menuItem("Clusters overview", tabName="overview"),
menuItem("Cluster details", tabName="details"),
menuItem("Download", tabName="download")
),
selectInput('prefix', 'clustering', choices=c(), selectize=T),
selectInput('resolution', 'resolution', choices=c(), selectize=T),
selectInput('space', 'space', choices=c(), selectize=T)
),
dashboardBody(
tags$head(tags$style(type="text/css", '
.inlineDiv label { display: table-cell; vertical-align: middle; }
.inlineDiv .form-group { display: table-row; }
')),
tabItems(
tabItem("tree",
box(width=12,
tags$div(style="font-weight: bold;", textOutput('clustree_msg')),
withSpinner(plotOutput("clustree", height='600px', click="clustree_click"))
)
),
tabItem("overview",
box(width=12,
withSpinner(plotOutput('tsne_overview', height='700px', click="overviewPlot_click"))
)
),
tabItem("details",
fluidRow(
column( width=6, div(class="inlineDiv", selectInput('cluster', ' Cluster ', choices=c(), selectize=F) ) ),
column( width=6,
div(style="display: inline-block; vertical-align:top; width: 250px;", textInput('newname', label=NULL, placeholder = 'Enter new name')),
div(style="display: inline-block; vertical-align:top;", actionButton('save_newname', 'Rename cluster') ),
tags$p(textOutput('rename_msg'))
)
),
box( withSpinner(plotOutput('tsne_detail', click="detailPlot_click")) ),
box( title = "markers", solidHeader=T, collapsible=T,
div(style = 'height: 420px; overflow-x: scroll', tableOutput('markers') )
),
box( title = "Prediction from dataset A" ), # not yet implemented
box( title = "Prediction from dataset B" ),
uiOutput("go_ui"),
div(style="clear: both;")
),
tabItem("download",
box( tags$p("Download the Seurat object (with eventual modifications) in RDS format."),
downloadButton("downloadRDS", "Download RDS") )
)
) # end tabItems
)
)
} |
19167571fe2878f64ae7d892e1b65e21eaf41b0a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/reconstructr/examples/time_on_page.Rd.R | f6f350a9dc6b4175cec47db8b8d84470864aadda | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 500 | r | time_on_page.Rd.R | library(reconstructr)
### Name: time_on_page
### Title: Calculate time-on-page metrics
### Aliases: time_on_page
### ** Examples
#Load and sessionise the dataset
data("session_dataset")
sessions <- sessionise(session_dataset, timestamp, uuid)
# Calculate overall time on page
top <- time_on_page(sessions)
# Calculate time-on-page on a per_session basis
per_session <- time_on_page(sessions, by_session = TRUE)
# Use median instead of mean
top_med <- time_on_page(sessions, median = TRUE)
|
3702b2b731d85ce2609bd5059c0f71ded595c14c | ed2e3edc0b4cbf668cef83d1caab49a842ab3595 | /man/groc.Rd | c03b1cb1556019adf7c1034c6f7277b4d8b4fbb8 | [] | no_license | cran/groc | 9ed7bd0b43120ef08fabe0412d2b0f4c7a939ea6 | bd7e2c009b82e964f1d4aa28159e3f87ef9eed37 | refs/heads/master | 2021-01-25T08:28:37.222742 | 2020-12-18T06:30:16 | 2020-12-18T06:30:16 | 17,696,513 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 12,414 | rd | groc.Rd | \name{groc}
\encoding{utf8}
\alias{groc}
\alias{groc.default}
\title{groc method}
\description{
Generalized regression on orthogonal components.
}
\usage{
\method{groc}{default}(formula, ncomp, data, subset, na.action, plsrob =
FALSE, method = c("lm", "lo", "s", "lts"), D = NULL,
gamma = 0.75, Nc = 10, Ng = 20, scale = FALSE, Cpp =
TRUE, model = TRUE, x = FALSE, y = FALSE, sp = NULL, ...)
groc(\dots)
}
\arguments{
\item{formula}{a model formula. Most of the \code{lm} formula
constructs are supported. See below.}
\item{ncomp}{the number of components (orthogonal components) to include in the model.}
\item{data}{an optional data frame with the data to fit the model from.}
\item{subset}{an optional vector specifying a subset of observations
to be used in the fitting process.}
\item{na.action}{a function which indicates what should happen when
the data contain missing values.}
\item{plsrob}{logical. If \code{TRUE}, we use the \code{D=covrob} measure of
dependence with the least trimmed squares method="lts".}
\item{method}{character giving the name of the method to use. The
user can supply his own function. The methods available are linear
models, "lm", local polynomials, "lo", smoothing splines, "s", and least trimmed squares, "lts".}
\item{D}{function with two arguments, each one being a vector, which
measures the dependence between two variables using n observations from them. If \code{NULL}, the covariance measure will be used. The user can supply his own function.}
\item{gamma}{parameter used with the option \code{plsrob=TRUE}. It defines the quantile used to compute the "lts" regression. The default \code{gamma=0.75} gives a breakdown of 25\% for a good compromise between robustness and efficiency. The value \code{gamma=0.5} gives the maximal breakdown of 50\%.}
\item{Nc}{Integer, Number of cycles in the grid algorithm.}
\item{Ng}{Integer, Number of points for the grid in the grid algorithm.}
\item{scale}{Logical, Should we scale the data.}
\item{Cpp}{Logical, if \code{TRUE} this function will use a C++
implementation of the grid algorithm. The \code{FALSE} value should not be
used, unless to get a better understanding of the grid algorithm
or to compare the speed of computation between R and C++ versions of
this algorithm}
\item{model}{a logical. If \code{TRUE}, the model frame is returned.}
\item{x}{a logical. If \code{TRUE}, the model matrix is returned.}
\item{y}{a logical. If \code{TRUE}, the response is returned.}
\item{sp}{
A vector of smoothing parameters can be provided here.
Smoothing parameters must be supplied in the order that the
smooth terms appear in the model formula. Negative elements
indicate that the parameter should be estimated, and hence a
mixture of fixed and estimated parameters is
possible. 'length(sp)' should be equal to 'ncomp' and
corresponds
to the number of underlying smoothing parameters.
}
\item{\dots}{further arguments to be passed to or from methods.}
}
%\details{
%TODO
%}
\value{
\item{Y}{vector or matrix of responses.}
\item{fitted.values}{an array of fitted values.}
\item{residuals}{residuals}
\item{T}{a matrix of orthogonal components (scores). Each column corresponds to a component.}
\item{R}{a matrix of directions (loadings). Each column is a direction used to obtain the corresponding component (scores).}
\item{Gobjects}{contain the objects produced by the fit of the responses on the orthogonal components.}
\item{Hobjects}{contain the objects produced by the "lts" fit of each deflated predictors on the orthogonal components. \code{Hobjects} are produced when \code{plsrob=TRUE}.}
\item{B}{matrix of coefficients produced by the "lm" fit of each deflated predictors on the last component. \code{B} is produced when \code{plsrob=FALSE}.}
\item{Xmeans}{a vector of means of the X variables.}
\item{Ymeans}{a vector of means of the Y variables.}
\item{D}{Dependence measure used.}
\item{V}{a matrix whose columns contain the right singular vectors of
the data. Computed in the preprocessing to principal component scores when the number of
observations is less than the number of predictors.}
\item{dnnames}{dimnames of 'fitted.values'}
\item{ncomp}{the number of components used in the modelling.}
\item{method}{the method used.}
\item{scale}{Logical. \code{TRUE} if the responses have been scaled.}
\item{call}{the function call.}
\item{terms}{the model terms.}
\item{plsrob}{Logical. If \code{plsrob=TRUE}, a robust partial least squares fit.}
\item{model}{if \code{model=TRUE}, the model frame.}
}
\references{
Martin Bilodeau, Pierre Lafaye de Micheaux, Smail Mahdi (2015), The R
Package groc for Generalized Regression on Orthogonal Components,
\emph{Journal of Statistical Software}, 65(1), 1-29, \cr \url{https://www.jstatsoft.org/v65/i01/}
}
\author{Martin Bilodeau (\email{bilodeau@dms.umontreal.ca}) and Pierre Lafaye de Micheaux (\email{lafaye@unsw.edu.au}) and
Smail Mahdi (\email{smail.mahdi@cavehill.uwi.edu})
}
\examples{
\dontrun{
library(MASS)
########################
# Codes for Example 1 #
########################
require("groc")
data("wood")
out <- groc(y ~ x1 + x2 + x3 + x4 + x5, ncomp = 1, data = wood,
D = corrob, method = "lts")
corrob(wood$y, fitted(out)) ^ 2
plot(out)
########################
# Codes for Example 2 #
########################
data("trees")
out <- groc(Volume ~ Height + Girth, ncomp = 1, D = spearman,
method = "s", data = trees)
cor(trees$Volume, fitted(out)) ^ 2
plot(out$T, trees$Volume, xlab = "First component",
ylab = "Volume", pch = 20)
lines(sort(out$T), fitted(out)[order(out$T)])
out <- boxcox(Volume ~ Height + Girth, data = trees,
lambda = seq(-0.5, 0.5, length = 100), plotit = FALSE)
lambda <- out$x[which.max(out$y)]
out <- lm(Volume ^ lambda ~ Height + Girth, data = trees)
cor(trees$Volume, fitted(out)^(1/lambda)) ^ 2
########################
# Codes for Example 3 #
########################
data("wood")
plsr.out <- plsr(y ~ x1 + x2 + x3 + x4 + x5, data = wood)
groc.out <- groc(y ~ x1 + x2 + x3 + x4 + x5, data = wood)
apply(abs((fitted(plsr.out) - fitted(groc.out)) /
fitted(plsr.out)), 3, max) * 100
########################
# Codes for Example 4 #
########################
set.seed(1)
n <- 200
x1 <- runif(n, -1, 1)
x2 <- runif(n, -1, 1)
y <- x1 * x2 + rnorm(n, 0, sqrt(.04))
data <- data.frame(x1 = x1, x2 = x2, y = y)
plsr.out <- plsr(y ~ x1 + x2, data = data)
groc.out <- groc(y ~ x1 + x2, D = dcov, method = "s", data = data)
plsr.v <- crossval(plsr.out, segment.type = "consecutive")
groc.v <- grocCrossval(groc.out, segment.type = "consecutive")
groc.v$validation$PRESS
plsr.v$validation$PRESS
gam.data <- data.frame(y = y, t1 = groc.out$T[, 1], t2 = groc.out$T[, 2])
gam.out <- gam(y ~ s(t1) + s(t2), data = gam.data)
par(mfrow = c(1, 2))
plot(gam.out)
par(mfrow = c(1, 1))
PRESS <- 0
for(i in 1 : 10){
data.in <- data[-(((i - 1) * 20 + 1) : (i * 20)), ]
data.out <- data[((i - 1) * 20 + 1) : (i * 20), ]
ppr.out <- ppr(y ~ x1 + x2, nterms = 2, optlevel = 3, data = data.in)
PRESS <- PRESS + sum((predict(ppr.out, newdata = data.out)-data.out$y) ^ 2)
}
PRESS
########################
# Codes for Example 5 #
########################
data("yarn")
dim(yarn$NIR)
n <- nrow(yarn)
system.time(plsr.out <- plsr(density ~ NIR, ncomp = n - 2, data = yarn))
system.time(groc.out <- groc(density ~ NIR, Nc = 20, ncomp = n - 2, data = yarn))
max(abs((fitted(plsr.out) - fitted(groc.out)) / fitted(plsr.out))) * 100
plsr.v <- crossval(plsr.out, segments = n, trace = FALSE)
plsr.v$validation$PRESS
groc.v <- grocCrossval(groc.out, segments = n, trace = FALSE)
groc.v$validation$PRESS
groc.v$validation$PREMAD
########################
# Codes for Example 6 #
########################
data("prim7")
prim7.out <- groc(X1 ~ ., ncomp = 3, D = dcov, method = "s", data = prim7)
prim7.out$R
pca <- princomp(~ ., data = as.data.frame(prim7[, -1]))
prim7.pca <- data.frame(X1 = prim7$X1, scores = pca$scores)
prim7.pca.out <- groc(X1 ~ ., ncomp = 3, D = dcov, method = "s",
data = prim7.pca)
pca$loadings %*% prim7.pca.out$R
groc.v <- grocCrossval(prim7.out, segment.type = "consecutive")
groc.v$validation$PRESS
plsr.out <- plsr(X1 ~ ., ncomp = 3, data = prim7)
plsr.v <- crossval(plsr.out, segment.type = "consecutive")
plsr.v$validation$PRESS
PRESS <- 0
for(i in 1 : 10){
data.in <- prim7[-(((i - 1) * 50 + 1) : (i * 50)), ]
data.out <- prim7[((i - 1) * 50 + 1) : (i * 50), ]
ppr.out <- ppr(X1 ~ ., nterms = 3, optlevel = 3, data = data.in)
PRESS <- PRESS + sum((predict(ppr.out, newdata = data.out) - data.out$X1) ^ 2)
}
PRESS
########################
# Codes for Example 7 #
########################
n <- 50 ; B <- 30
mat.cor <- matrix(0, nrow = B, ncol = 3) ; mat.time <- matrix(0, nrow = B, ncol = 3)
for (i in 1:B) {
X <- matrix(runif(n * 5, -1, 1), ncol = 5)
A <- matrix(runif(n * 50, -1, 1), nrow = 5)
y <- (X[,1] + X[,2])^2 + (X[,1] + 5 * X[,2])^2 + rnorm(n)
X <- cbind(X, X %*% A)
D <- data.frame(X = X, y = y)
mat.time[i,1] <- system.time(out1 <- plsr(y ~ X, , ncomp = 2, data = D))[1]
mat.time[i,2] <- system.time(out2 <- ppr(y ~ X, , nterms = 2, data = D))[1]
mat.time[i,3] <- system.time(out3 <- groc(y ~ X, D = dcov, method = "s", ncomp = 2, data = D))[1]
mat.cor[i,] <- cor(y, cbind(fitted(out1)[,,2], fitted(out2), fitted(out3)[,,2]))
}
colMeans(mat.cor)
colMeans(mat.time)
########################
# Codes for Example 8 #
########################
data("oliveoil")
n <- nrow(oliveoil)
plsr.out <- plsr(sensory ~ chemical, data = oliveoil, method = "simpls")
groc.out <- groc(sensory ~ chemical, data = oliveoil)
max(abs((fitted(plsr.out) - fitted(groc.out)) / fitted(plsr.out))) * 100
groc.v <- grocCrossval(groc.out, segments = n)
groc.v$validation$PRESS
colMeans(groc.v$validation$PRESS)
Y <- oliveoil$sensory
for (j in 1 : ncol(Y)) print(cor(Y[, j], fitted(groc.out)[, j, 2]))
########################
# Codes for Example 9 #
########################
require("ppls")
data("cookie")
X <- as.matrix(log(cookie[1 : 40, 51 : 651]))
Y <- as.matrix(cookie[1 : 40, 701 : 704])
X <- X[, 2 : 601] - X[, 1 : 600]
data <- data.frame(Y = I(Y), X = I(X))
n <- nrow(data)
q <- ncol(Y)
xl <- "Wavelength index"
yl <- "First differences of log(1/reflectance)"
matplot(1:ncol(X), t(X), lty = 1, xlab = xl, ylab = yl, type = "l")
out1 <- plsr(Y ~ X, ncomp = n - 2, data = data)
cv <- crossval(out1, segments = n)
cv.mean <- colMeans(cv$validation$PRESS)
plot(cv.mean, xlab = "h", ylab = "Average PRESS", pch = 20)
h <- 3
for (j in 1 : q) print(cor(Y[, j], fitted(out1)[, j, h]))
set.seed(1)
out2 <- groc(Y ~ X, ncomp = h, data = data, plsrob = TRUE)
for (j in 1 : q) print(corrob(Y[, j], fitted(out2)[, j, h]))
plot(out2)
########################
# Codes for Example 10 #
########################
set.seed(2)
n <- 30
t1 <- sort(runif(n, -1, 1))
y <- t1 + rnorm(n, mean = 0, sd = .05)
y[c(14, 15, 16)] <- y[c(14, 15, 16)] + .5
data <- data.frame(x1 = t1, x2 = 2 * t1, x3 = -1.5 * t1, y = y)
out <- groc(y ~ x1 + x2 + x3, ncomp = 1, data = data, plsrob = TRUE)
tau <- scaleTau2(residuals(out), mu.too = TRUE)
std.res <- scale(residuals(out), center = tau[1], scale = tau[2])
index <- which(abs(std.res)>3)
prm.res <- read.table("prmresid.txt")
plot(t1, y, pch = 20)
matlines(t1, cbind(t1,fitted(out), y - prm.res), lty = 1 : 3)
legend(.4, -.5 , legend = c("true model","groc", "prm"), lty = 1 : 3)
text(t1[index], y[index], index, cex = .8, pos = 3)
########################
# Codes for Example 11 #
########################
data("pulpfiber")
X <- as.matrix(pulpfiber[, 1:4])
Y <- as.matrix(pulpfiber[, 5:8])
data <- data.frame(X = I(X), Y = I(Y))
set.seed(55481)
out.rob <- groc(Y ~ X, data = data, plsrob = TRUE)
plot(out.rob, cex = .6)
out.simpls <- groc(Y ~ X, data = data)
cv.rob <- grocCrossval(out.rob,segment.type = "consecutive")
PREMAD.rob <- cv.rob$validation$PREMAD[,4]
PREMAD.rob
cv.simpls <- grocCrossval(out.simpls,segment.type = "consecutive")
PREMAD.simpls <- cv.simpls$validation$PREMAD[,4]
PREMAD.simpls
(PREMAD.rob - PREMAD.simpls) / PREMAD.simpls * 100
}
}
\keyword{distribution} % Probability Distributions and Random Numbers
\keyword{htest} % Statistical Inference
|
10203986ea99360d1eb7078b1a60d0b81a364298 | dd380873cfb9355a760260f16bdc08e3f4a7550d | /data_analysis/compare_beta_omega.r | c00cff733f7d08b8532a86e43156be154b4cafa7 | [] | no_license | gerbiljames/swin | 59977af61dc1ec843088d91664660c70d3916e24 | 6db6773a0befde74dcec71fad257d537cc005a69 | refs/heads/master | 2021-01-01T16:06:42.840237 | 2015-06-28T17:26:24 | 2015-06-28T17:26:24 | 29,692,557 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,591 | r | compare_beta_omega.r | #!/usr/bin/env Rscript
library(stringr)
library(gtools)
library(effsize)
colours <- c("blue", "green", "orange", "purple", "yellow",
"black", "red", "pink", "brown", "lightblue")
logs_dir <- "../src/PiSwarmSimulator/logs/"
beta_file_paths <- list.files(path=logs_dir, pattern="^adv_beta", full.names=TRUE)
omega_file_paths <- list.files(path=logs_dir, pattern="^adv_omega", full.names=TRUE)
beta_file_paths <- mixedsort(beta_file_paths)
omega_file_paths <- mixedsort(omega_file_paths)
beta_data <- lapply(beta_file_paths, read.csv, header=FALSE)
omega_data <- lapply(omega_file_paths, read.csv, header=FALSE)
col_names <- c("time", "beacon_distance", "centroid_distance", "lost_robots")
for (i in 1:length(beta_data)) {
colnames(beta_data[[i]]) <- col_names
}
for (i in 1:length(omega_data)){
colnames(omega_data[[i]]) <- col_names
}
mean_beta_data <- NULL
for (data_type in c("beacon_distance", "centroid_distance", "lost_robots")) {
joined_beta_set <- NULL
for (data_set in beta_data){
trimmed_data <- data.frame(time=data_set$time, data_set[data_type])
if (is.null(joined_beta_set)) {
joined_beta_set <- trimmed_data
} else {
joined_beta_set <- merge(joined_beta_set, trimmed_data, by="time")
}
}
if (is.null(mean_beta_data)) {
mean_beta_data <- data.frame(time=joined_beta_set$time, rowMeans(joined_beta_set[,-1]))
} else {
new_data <- data.frame(time=joined_beta_set$time, rowMeans(joined_beta_set[,-1]))
mean_beta_data <- merge(mean_beta_data, new_data, by="time")
}
}
mean_omega_data <- NULL
for (data_type in c("beacon_distance", "centroid_distance", "lost_robots")) {
joined_omega_set <- NULL
for (data_set in omega_data){
trimmed_data <- data.frame(time=data_set$time, data_set[data_type])
if (is.null(joined_omega_set)) {
joined_omega_set <- trimmed_data
} else {
joined_omega_set <- merge(joined_omega_set, trimmed_data, by="time")
}
}
if (is.null(mean_omega_data)) {
mean_omega_data <- data.frame(time=joined_omega_set$time, rowMeans(joined_omega_set[,-1]))
} else {
new_data <- data.frame(time=joined_omega_set$time, rowMeans(joined_omega_set[,-1]))
mean_omega_data <- merge(mean_omega_data, new_data, by="time")
}
}
colnames(mean_beta_data) <- col_names
colnames(mean_omega_data) <- col_names
pdf("figures/comparison_beacon_distance.pdf")
plot(mean_beta_data$time, mean_beta_data$beacon_distance, type="l", xlab="Time (Seconds)",
ylab="Centroid Distance from Beacon(cm)", col=colours[[1]])
lines(mean_omega_data$time, mean_omega_data$beacon_distance, col=colours[[7]])
legend("topright", c("beta", "omega"), col=c(colours[[1]], colours[[7]]), lty=1)
rubbish <- dev.off())
pdf("figures/comparison_centroid_distance.pdf")
plot(mean_beta_data$time, mean_beta_data$centroid_distance, type="l", xlab="Time (Seconds)",
ylab="Mean Robot Distance from Centroid (cm)", col=colours[[1]], ylim=c(0, 80))
lines(mean_omega_data$time, mean_omega_data$centroid_distance, col=colours[[7]])
legend("right", c("beta", "omega"), col=c(colours[[1]], colours[[7]]), lty=1)
rubbish <- dev.off()
pdf("figures/comparison_lost_robots.pdf")
plot(mean_beta_data$time, mean_beta_data$lost_robots, type="l", xlab="Time (Seconds)",
ylab="Lost Robots", col=colours[[1]], ylim=c(0, 20))
lines(mean_omega_data$time, mean_omega_data$lost_robots, col=colours[[7]])
legend("topright", c("beta", "omega"), col=c(colours[[1]], colours[[7]]), lty=1)
rubbish <- dev.off()
print("Vargha-Delaney A measure for centroid_distance")
print(VD.A(mean_beta_data$centroid_distance, mean_omega_data$centroid_distance)) |
ddccdcde5be5a47bcccd9f68bceb11e6d7be8f9f | 9c53f6a0e7c059f46c9e446e1396ede06d4a0958 | /Week4/Code/HO-17.R | 064b08ee25b135d4c7c960a31743b343d18205ea | [] | no_license | tisssu/CMEECourseWork | 9f5dd832b7d227fccd85ea27199953858428d2ae | 31482f38cb0fe0a60025ce864f59a1372e583f32 | refs/heads/master | 2020-03-30T19:31:48.387316 | 2019-08-29T13:02:41 | 2019-08-29T13:02:41 | 151,547,136 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 434 | r | HO-17.R | rm(list=ls())
graphics.off()
hairEyes<-matrix(c(34,59,3,10,42,47),ncol=2,dimnames=list(Hair=c("Black","Brown","Blond"),Eyes=c("Brown","Blue")))
hairEyes
rowTot <- rowSums(hairEyes)
colTot = colSums(hairEyes)
tabTot<-sum(hairEyes)
Expected<-outer(rowTot,colTot)/tabTot
Expected
#calculate X^2
cellChi=(hairEyes-Expected)^2/Expected
tabChi = sum(cellChi)
tabChi
1-pchisq(tabChi,df = 2)
hairChi = chisq.test(hairEyes)
print(hairChi)
|
6f10eb586dad05b3cc9dda74703fbdee8e4eae67 | 3c3b9355189c483b1a6d13cc4a7a3c1ba3c66842 | /inst/templates/GOMFOFFSPRING.Rd | e7918fb3962e1feb771ab85143e8de8d014fbac1 | [] | no_license | SiYangming/AnnBuilder | 69cedc909988ca4323015ab34e80bc0f33bc0c1d | a353c7f1f6434233f9511b0e35b8b0928689734c | refs/heads/master | 2023-03-01T01:00:15.232713 | 2021-02-05T14:10:21 | 2021-02-05T14:10:21 | 336,283,722 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,408 | rd | GOMFOFFSPRING.Rd | \name{GOMFOFFSPRING}
\alias{GOMFOFFSPRING}
\title{Annotation of GO Identifiers to their Molecular Function Offspring}
\description{
This data set describes associations between GO molecular function (MF)
terms and their offspring MF terms, based on the directed acyclic
graph (DAG) defined by the Gene Ontology Consortium. The format is an R
environment mapping the GO MF terms to all offspring terms, where an
ancestor term is a more specific GO term that is preceded
by the given GO term in the DAG (in other words, the children and all
their children, etc.).
}
\details{
Each GO MF term is mapped to a vector of offspring GO MF terms.
Molecular function is defined as the tasks performed by individual
gene products; examples are transcription factor and DNA helicase as
defined by Gene Ontology Consortium.
Mappings were based on data provided by:
#GOSOURCE#
Package built: #DATE#
}
\references{
\url{http://www.geneontology.org/} and
\url{http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=gene}
}
\examples{
require("GO", character.only = TRUE) || stop("GO unavailable")
# Convert the environment object to a list
xx <- as.list(GOMFOFFSPRING)
# Remove GO identifiers that do not have any offspring
xx <- xx[!is.na(xx)]
if(length(xx) > 0){
# Get the offspring GO identifiers for the first two elents of xx
goids <- xx[1:2]
}
}
\keyword{datasets}
|
82b14d1b2093526be817b4a2a8caa1a03c444c10 | 463ee9b27bfa20430e7b1589109b45b6dc5e0528 | /man/mpm.Rd | 0e682d92eb891560f394d989e2353369d996680f | [] | no_license | ianjonsen/mpm | c01c18e6b58deb7026cc8e13df3196e00b38e902 | 9b6cee656c99e30d9561a5c21685eb0ff79a83a0 | refs/heads/master | 2021-01-24T09:31:14.129774 | 2019-09-27T20:53:35 | 2019-09-27T20:53:35 | 123,017,931 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 957 | rd | mpm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mpm.r
\name{mpm}
\alias{mpm}
\title{Move Persistence Model}
\usage{
mpm(data, optim = c("nlminb", "optim"), verbose = FALSE,
control = NULL, inner.control = NULL)
}
\arguments{
\item{data}{a data frame of observations (see details)}
\item{optim}{numerical optimizer}
\item{verbose}{report progress during minimization}
\item{control}{list of control parameters for the outer optimization (type ?nlminb or ?optim for details)}
\item{inner.control}{list of control parameters for the inner optimization}
}
\value{
a list with components
\item{\code{fitted}}{a dataframe of fitted locations}
\item{\code{par}}{model parameter summmary}
\item{\code{data}}{input dataframe}
\item{\code{tmb}}{the tmb object}
\item{\code{opt}}{the object returned by the optimizer}
}
\description{
fit a random walk with time-varying move persistence to location data
without measurement error
}
|
283f2fb0bcf53727d19a6f909b5c87640722ad04 | 1cce9f907f8b22ccb57303b4e7d419e8d4ffe3cd | /R/plotOneFit.R | cf70373060a6aa807d16ee93cbcd70699cd8201b | [
"BSD-3-Clause"
] | permissive | timpeters82/consensus | 81d39b8a27c8191e3bf8e2c43df9a88d008cd10d | 80e938a71d85efe500ad55fe09bcdbbff662b079 | refs/heads/master | 2021-05-10T15:51:40.417398 | 2020-05-22T02:49:49 | 2020-05-22T02:49:49 | 118,564,547 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 939 | r | plotOneFit.R | plotOneFit <- function(multimeas, idx, pal=palette(), ...){
if(length(pal) < length(multimeas@data)){
stop("Palette size must be at least the number of platforms/conditions")
}
if(!idx %in% rownames(multimeas@data[[1]])){
stop("Gene ID is not recognised. Check rownames of the data you passed to MultiMeasure().")
}
block <- getBlock(multimeas, idx)
plotrange <- range(block)
#Add 10% at bottom for legend
plotrange[1] <- plotrange[1] - diff(plotrange)*0.1
means <- colMeans(block)
plot(means, seq_len(length(means)), ylim=plotrange, xlab="Sample means", ylab="Sample measurements", main=idx, type="n")
for (i in seq_len(nrow(block))){
points(means, block[i,], pch=16, col=pal[i], cex=1.3)
fit.one <- lm(as.numeric(block[i,]) ~ means)
abline(fit.one, col=pal[i])
}
legend(min(means), min(block), names(multimeas), text.col=pal, horiz=TRUE, bty="n", ...)
}
|
89b686932761d61dcb724918a1a1c7297aaab6c1 | c1b923adaaa7eea133eca4eaa065ab0c8bb3e36d | /server.R | 85bff08683b045639b00a40eaa031d203fc994de | [] | no_license | Silvosus/DevelopingDataProducts | d939404542b3547f16165e1680d1f4694ed5c733 | 11cd32cd4ce673ea9710f8580a74664212a80f3d | refs/heads/master | 2021-01-22T22:13:10.893677 | 2017-04-02T16:29:38 | 2017-04-02T16:29:38 | 85,518,908 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,930 | r | server.R | # This server.R file is part of the fourth Developing Data Products' assignment
library(shiny)
#library(ggplot2)
function(input, output) {
output$distPlot <- renderPlot({
if (input$dist == 'bern') {
#bernoulli
set.seed(428)
p = seq(0 , 1, length = 1000)
y = p * (1 - p)
s <- t(replicate(input$rep, sample(y, size = input$samplesize)))
S <- apply(s, 1, sd)
hist(S, main = strwrap(paste("Histogram of the standard deviation of ",input$rep," samples of Bernoulli distributions"),width = 40),
xlab = "Standard Deviation")
abline(v=mean(S),col = 2)
output$helpbern <- renderText({"To your left, you can select the number of samples to catch up from a Bernoulli distribution, and each sample size."})
output$helpbinom <- renderText({""})
output$helpchi <- renderText({""})
}
if (input$dist == 'binom') {
#binomial
set.seed(428)
x <- 0:100
y <- dbinom(x,size = as.numeric(input$cant), as.numeric(input$prob))
s <- t(replicate(input$rep, sample(y, size = input$samplesize)))
S <- apply(s, 1, sd)
hist(S, main = strwrap(paste("Histogram of the standard deviation of ",input$rep," samples of Binomial distributions"),width = 40),
xlab = "Standard Deviation")
abline(v=mean(S),col = 2)
output$helpbern <- renderText({""})
output$helpbinom <- renderText({"To your left, you can 1. Select the number of draws with replacement to retrieve from a Binomial distribution, and each sample size.\n2. Select the number of draws with replacement\n3.Select the success probability in the binomial experiment."})
output$helpchi <- renderText({""})
}
if (input$dist == 'chi') {
#Chi-squared
set.seed(428)
x <- seq(-20,20,by = .4)
y <- dchisq(x, df = as.numeric(input$grados))
s <- t(replicate(input$rep, sample(y, size = input$samplesize)))
S <- apply(s, 1, sd)
hist(S, main = strwrap(paste("Histogram of the standard deviation of ",input$rep," samples of Chi-squared distributions"),width = 40),
xlab = "Standard Deviation")
abline(v=mean(S),col = 2)
output$helpbern <- renderText({""})
output$helpbinom <- renderText({""})
output$helpchi <- renderText({"To your left, you can 1. Select the number of samples to catch up from a Chi-squred distribution, and each sample size.\n2. Select the number of degrees of freedom of the Chi-squared distribution"})
}
})
output$text <- renderText({
"The central limit theorem establishes that, for the most commonly studied scenarios, when independent random variables are added, their sum tends toward a normal distribution even if the original variables themselves are not normally distributed."
})
} |
cb9fdc361da22e2e89b641d1970737810f3796b7 | 7d5d8492c2d88b88bdc57e3c32db038a7e7e7924 | /R-packages/GapAnalysis/R/gapAreas.R | 86eb6c849be34ade4e94bbef59303f366102fbe0 | [] | no_license | CIAT-DAPA/dapa-climate-change | 80ab6318d660a010efcd4ad942664c57431c8cce | 2480332e9d61a862fe5aeacf6f82ef0a1febe8d4 | refs/heads/master | 2023-08-17T04:14:49.626909 | 2023-08-15T00:39:58 | 2023-08-15T00:39:58 | 39,960,256 | 15 | 17 | null | null | null | null | UTF-8 | R | false | false | 1,814 | r | gapAreas.R | # Author: Julian Ramirez, dawnpatrolmustaine@gmail.com
# Date : December 2009
# Version 0.1
# Licence GPL v3
gapAreas <- function(pointdens, gthresh=10, evdist, ethresh=10, outfile='') {
if (outfile == '') {
stop('Please provide a valid name for your output file')
}
if (class(gthresh) != "numeric") {
stop('Radius must be a number')
} else if (gthresh < 0) {
stop('Radius must greater than or equal to 0')
}
if (class(ethresh) != "numeric") {
stop('Radius must be a number')
} else if (ethresh < 0) {
stop('Radius must greater than or equal to 0')
}
if (class(pointdens) != "RasterLayer" && !file.exists(pointdens)) {
stop('The file or object corresponding to point densities does not exist')
} else {
if (class(pointdens) == "character") {
pointdens <- raster(pointdens)
}
}
if (class(evdist) != "RasterLayer" && !file.exists(evdist)) {
stop('The file or object corresponding to environmental distances does not exist')
} else {
if (class(evdist) == "character") {
evdist <- raster(evdist)
}
}
if (!canProcessInMemory(pointdens, n=3)) {
stop('Cannot allocate the rasters in memory')
} else {
pointdens <- readAll(pointdens)
msk <- pointdens
pointdens[which(values(pointdens) >= gthresh)] <- NA
pointdens[which(values(!is.na(pointdens)))] <- 1
pointdens[which(values(is.na(pointdens)))] <- 0
pointdens <- mask(pointdens, msk)
rm(msk)
evdist <- readAll(evdist)
msk <- evdist
evdist[which(values(evdist) <= ethresh)] <- NA
evdist[which(values(!is.na(evdist)))] <- 2
evdist[which(values(is.na(evdist)))] <- 0
evdist <- mask(evdist, msk)
rm(msk)
rslt <- pointdens + evdist
rslt <- writeRaster(rslt, outfile, overwrite=TRUE)
return(rslt)
}
}
|
6f91961ccd621b934b60b688e6326492db4f078c | 9cfbc60f6152a3cd9399eeb4b913f2b6dbe5a86e | /agentmodel/man/novel.modi.Rd | ca0317f32f232fb326335cda28607068a7807188 | [] | no_license | joannasimms/peatlandagentmodel | 5d9bccdb355f535a8b33a9afce88aa1dab811f8f | a58b832ecfb2eb6fe452439c4be31606e40b11da | refs/heads/master | 2023-07-25T02:35:16.323792 | 2021-09-06T19:35:35 | 2021-09-06T19:35:35 | 401,614,130 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 237 | rd | novel.modi.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coefficients.R
\name{novel.modi}
\alias{novel.modi}
\title{Novel crop modifier}
\usage{
novel.modi(pop)
}
\description{
Demographic changes taken from LUKE.
}
|
a8e7877ef5d003e89319595c907fbd0c482975df | e031ec65e8c0648ca1a0cf41279a9e405d34ebf6 | /man/r.auc.gini.Rd | 089dcf478ae4563aa0f5ed52b623f2b9b82e6fa3 | [] | no_license | rocalabern/rmodel | f77486fd914941e939ff2e57f143c3e453277c0b | c0e663746d97a2a42237861ad90eeb73e2312b45 | refs/heads/master | 2020-04-06T06:49:26.113322 | 2016-09-05T14:11:06 | 2016-09-05T14:11:06 | 15,023,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 196 | rd | r.auc.gini.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rmodel_modelevaluation.R
\name{r.auc.gini}
\alias{r.auc.gini}
\title{r.auc.gini}
\usage{
r.auc.gini(score, target)
}
|
d4cdf3a86cd3993d9c70288a222d5d8c92936297 | c0c01af0eb7874f9b49f9e8bc6b7f1002ffcf254 | /cachematrix.R | d65abcf40df1f2b1612102b327050f58c8525bfe | [] | no_license | helixstring/ProgrammingAssignment2 | da06435f3f41568ecc91bb560d15d34eb44a29c6 | fb5a48238ca04aea499dd361f80602e04a6ed199 | refs/heads/master | 2021-01-19T11:28:58.078288 | 2017-04-11T21:30:50 | 2017-04-11T21:30:50 | 87,969,487 | 0 | 0 | null | 2017-04-11T19:03:53 | 2017-04-11T19:03:53 | null | UTF-8 | R | false | false | 1,957 | r | cachematrix.R | ## This is a function containing two subfunctions. The 1st one is called makeCacheMatrix.
## It basically is like makeVector in the example of the assignment, which creats a special
## matrix containing functions to: 1 set the value of the matrix 2 get the value of the matrix
## 3 set the value of the inverse 4 get the value of the inverse. The 2nd one is called
## cacheSolve. It is very much alike the cacheMean in the example. It caculates the inverse of
## matrix defined in the fist function. If the inverse is already calculated, it gets the
## inverse directly. Otherwise, it calculate the inverse, set the value of the inverse by
## setinverse function within the 1st function.
## The fist function makeCacheMatrix first defines the value of the matrix. It is special
## because it also contain functions. The default value of matrix is blank. m is set as NULL
## unless you really cacheSolve(x) using the cacheSolve function. If you already cacheSolve
## it, then next time you type yourmatrix$getinverse(), you can call it directly.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setinverse<-function(solve) m<<-solve
getinverse<-function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function is used to calculate the inverse of the matrix defined in the
## first function. If the inverse already been calculated before, this function will give
## message "getting cached data" and then give the value. Otherwise, it starts to calculate
## the inverse of the matrix using solve() function and return the calculated value.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinverse(m)
m
}
|
ee00fbc3be63435209438a07ed1956caaa89899e | 2632c34e060fe3a625b8b64759c7f1088dca2e28 | /man/comp_choirbm_glm.Rd | 4ba0c3bbca3565178df09fdc9d62d91bebbd161f | [
"MIT"
] | permissive | emcramer/CHOIRBM | 0319103968ac662cbec472585b9b2accfdea96b1 | 178c6833e1235c8d7f4d0fae9f550860eb7f36a3 | refs/heads/master | 2022-11-25T08:12:15.576043 | 2022-10-28T19:57:40 | 2022-10-28T19:57:40 | 329,775,974 | 5 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,053 | rd | comp_choirbm_glm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comp_choirbm_glm.R
\name{comp_choirbm_glm}
\alias{comp_choirbm_glm}
\title{Examine the effect of a continuous variable on CBM location endorsement}
\usage{
comp_choirbm_glm(in_df, comp_var, method = "bonferroni", ...)
}
\arguments{
\item{in_df}{a data.frame with at least one column for the CBM as a
delimited string, and another column as the continuous variable for
modeling.}
\item{comp_var}{the name of the variable to model as a string.}
\item{...}{additional parameters passed to glm.}
}
\value{
a data.frame with the following columns: id, term, estimate,
std.error, statistic, p.value. Each row is the result of one glm
using the continuous variable to predict CBM location endorsement.
}
\description{
Examine the effect of a continuous variable on CBM location endorsement
}
\examples{
\dontrun{
data(validation)
set.seed(123)
sampled_data <- validation[sample(1:nrow(validation), 100, replace = FALSE),]
model_ouput <- comp_choirbm_glm(sampled_data, "age")
}
}
|
a2f473c63366824b8d768a67ae5ea192d5a014e6 | 7594a119bfad01361c95676992b597f0e8619414 | /data/taxi_trip_demo.r | 6ce5419fa2229efa9c0803f1e336f4fca05c1647 | [] | no_license | DeMoehn/Cloudant-nyctaxi | 1a026a55ca77255d90fc1104d80dc2e9f75ebb0f | ad6352218e80c5636b97133bc5a5d27603dc92d3 | refs/heads/master | 2021-01-17T07:33:46.669560 | 2016-06-14T09:30:15 | 2016-06-14T09:30:15 | 40,601,243 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,849 | r | taxi_trip_demo.r | library(ibmdbR)
#Init
library(ggplot2)
#Connect to the database
idaInit(idaConnect("BLUDB","",""))
q <- ida.data.frame('"NYCTAXIDATA"')
names(q)
#Select only trips from Madison Square area to JFK
bdf <- q[(q$PICKUP_LATITUDE>40.759988)&(q$PICKUP_LATITUDE<40.765693)&
(q$PICKUP_LONGITUDE>-73.976693)&(-73.9677>q$PICKUP_LONGITUDE)&
(q$DROPOFF_LATITUDE>40.628024)&(q$DROPOFF_LATITUDE<40.672566)&
(q$DROPOFF_LONGITUDE>-73.858281)&(-73.715544>q$DROPOFF_LONGITUDE)
,]
dim(bdf)
#Load the data into R
date()
df <- as.data.frame(bdf)
date()
#Preprocess taxi data - Do date / time conversions
df$date <- strptime(df$PICKUP_DATETIME,'%Y-%m-%d %H:%M:%S')
df$hour <- format(df$date,'%H')
df$min <- format(df$date,'%M')
df$dayyear <- as.numeric(format(df$date,'%j'))
df$dayweeknum <- df$dayyear%%7
df$dayweek <- format(df$date,'%a')
df$day <- as.numeric(format(df$date,'%d'))
df$month <- as.numeric(format(df$date,'%m'))
df$dayweek <- as.factor(df$dayweek)
df$timeofday <- (as.numeric(df$hour)*60+as.numeric(df$min))/60.0
df$trip_distance <- as.numeric(df$TRIP_DISTANCE)
df$trip_time <- as.numeric(df$TRIP_TIME_IN_SECS)/60.0
df$speed <- as.numeric(df$TRIP_DISTANCE)/as.numeric(df$TRIP_TIME_IN_SECS)
df$EST <- format(df$date,'%Y-%m-%d')
#Remove outliers
df <- df[df$TRIP_DISTANCE>15,]
#Plot trip time
ggplot(df, aes(x=trip_time)) + stat_bin(aes(y=..count../sum(..count..))) + ylab('') + xlab('Trip time (minutes)')
#Plot trip time depending on time of day
ggplot(df,aes(timeofday,trip_time)) + geom_point() + ggtitle('Trip time IBM Manhattan office to JFK Airport (Weekdays)') + xlab('Time of day (hour)') + ylab('Trip time (minutes)') + layer(geom="smooth") + ylim(0,100)+ xlim(0,23) + geom_rug(col="darkred",alpha=.1)
ggplot(df[(df$dayweek!='Sat')&(df$dayweek!='Sun'),],aes(timeofday,trip_time)) + geom_point() + ggtitle('Trip time IBM Manhattan office to JFK Airport') + xlab('Time of day (hour)') + ylab('Trip time (minutes)') + layer(geom="smooth") + ylim(0,100)+ xlim(0,23) + geom_rug(col="darkred",alpha=.1)
#Sunday
ggplot(df[df$dayweek=='Sun',],aes(timeofday,trip_time)) + ggtitle('Trip time IBM Manhattan office to JFK Airport (Sunday)') + xlab('Time of day (hour)') + ylab('Trip time (minutes)') + geom_point()+layer(geom="smooth") + ylim(0,100) + xlim(0,23)+ geom_rug(col="darkred",alpha=.1)
################################################
#Load Weather data into table "nycweather2013"
################################################
dfWeather <- as.data.frame(ida.data.frame('"NYCWEATHER2013"'))
head(dfWeather)
df2 <- merge(df,dfWeather,by="EST")
df2 <- df2[df2$Precipitation<20,]
head(df2)
ggplot(df2, aes(x=Precipitation)) + stat_bin(aes(y=..count../sum(..count..))) + ylab('') + xlab('Niederschlag')
g <- gam(trip_time~s(timeofday,by=dayweek)+s(Precipitation,k=5),data=df2)
plot(g)
|
ad6e471c53f441ef96798720716c47a3647d75fb | 14e847798b6f1fe97a563a7fd899576235d03acd | /server.R | a91f43bbe9a6e49eaf99cb250567b2dc2b1dc654 | [] | no_license | matschmitz/MREG | 17b2f2d2dd83fc67fe395826f76d92770372beff | 04cab2fabd6c711f611787ef1929459ecb2de068 | refs/heads/master | 2022-12-30T07:44:07.135046 | 2020-10-21T18:31:31 | 2020-10-21T18:31:31 | 306,112,648 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,542 | r | server.R | library(shiny)
function(input, output) {
getb0now <- reactive({input$b0})
getb1now <- reactive({input$b1})
getb2now <- reactive({input$b2})
getb3now <- reactive({input$b3})
output$mainPlot <- renderPlotly({
b0 <- getb0now()
b1 <- getb1now()
b2 <- getb2now()
b3 <- getb3now()
# b0 <- 0
# b1 <- 1
# b2 <- 1
# b3 <- 0
GG <- expand.grid(
x1 = seq(-1, 1, length.out = 20),
x2 = seq(-1, 1, length.out = 20)
) %>% data.table()
GG[, y := b0 + b1*x1 + b2*x2 + b3*x1*x2]
z <- spread(GG, key = x2, value = y) %>% .[, 2:ncol(.)] %>% as.matrix %>% t
# Plot model and data
plot_ly() %>%
add_surface(
x = unique(GG$x1), y = unique(GG$x2), z = z,
# colors = c("dodgerblue4", 'dodgerblue3'),
colors = c("#006400", "#458B00"),
showscale = FALSE,
opacity = .8,
hoverinfo = "skip",
contours = list(
x = list(show = FALSE,
highlight = input$projectX2, #
highlightcolor = "white",
highlightwidth = 5,
color = "azure"),
y = list(show = FALSE,
highlight = input$projectX1, #
highlightcolor = "white",
highlightwidth = 5,
color = "azure"),
z = list(show = FALSE,
highlight = FALSE)
)) %>%
layout(
title = paste0(withMathJax(sprintf(
"$$Y = %s %s %s \\textit{X}_{1} %s %s\\textit{X}_{2} %s %s\\textit{X}_{1}\\textit{X}_{2}$$)",
b0, ifelse(b1>=0, "+", ""), b1, ifelse(b2>=0, "+", ""), b2, ifelse(b3>=0, "+", ""), b3))),
scene = list(
xaxis = list(title = "X1",
titlefont = list(color = "rgb(153, 0, 0)"),
tickfont = list(color = "grey"),
showspikes = FALSE),
yaxis = list(title = "X2",
titlefont = list(color = "rgb(153, 0, 0)"),
tickfont = list(color = "grey"),
showspikes = FALSE),
zaxis = list(title = "Y",
titlefont = list(color = "rgb(153, 0, 0)"),
tickfont = list(color = "grey"),
showspikes = FALSE),
camera = list(eye = list(x = 2))),
autosize = TRUE)
})
} |
e4a99c7c3269c2ac2da750add6cf3c4377d0df7e | 7b842e47b36c5eccaee6b71c77e22519b49c0168 | /R/07-datagroup-obj.R | 059e7d2e39776582b2e42b46374744cd2c162bbb | [] | no_license | cran/geoknife | 5dc92ca0aa7e7afe2afac3fd848e3b7fc99c07c4 | e6dba004a958f5954317cfcd7faaec1d8d094ae9 | refs/heads/master | 2023-07-20T09:34:10.506446 | 2023-07-06T07:00:12 | 2023-07-06T07:00:12 | 48,080,629 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,152 | r | 07-datagroup-obj.R | #' datagroup class
#'
#' contains collections of webdata that can be processed with
#' \code{\link{geoknife}}
#'
#' @slot group a list of webdata compatible elements
#' @rdname datagroup-class
setClass(
Class = "datagroup",
representation = representation(
group = 'list')
)
setMethod(f="initialize",signature="datagroup",
definition=function(
.Object,
group = list()){
.Object@group <- group
return(.Object)
})
#' create datagroup object
#' @description A class representing a geoknife job (\code{datagroup}).
#'
#' @return the datagroup object
#' @author Jordan S Read
#' @rdname datagroup-methods
#' @export
setGeneric("datagroup", function(...) {
standardGeneric("datagroup")
})
#' @param x a datagroup object
#' @param i index specifying elements to extract or replace.
#' @param j not implemented
#' @param drop not implemented
#' @param ... additional arguments passed to initialize method
#' @rdname datagroup-methods
#' @aliases datagroup,datagroup-methods
setMethod("datagroup", signature(), function(...) {
## create new geojob object
datagroup <- new("datagroup",...)
return(datagroup)
})
setAs('datagroup', 'webdata', function(from){
if (length(from@group) > 1){
warning('coercing datagroup into webdata. More than one dataset specified, using the first.')
}
.Object <- do.call(what = "webdata", args = list(url = from@group[[1]]$url))
return(.Object)
})
#' get abstract from a datagroup
#'
#' extracts the abstract information from a datagroup object
#'
#' @param .Object a datagroup object
#'@rdname abstract-datagroup
#'@aliases
#'abstract
#'title
#'@export
setGeneric(name="abstract",def=function(.Object){standardGeneric("abstract")})
#'@rdname abstract-datagroup
#'@aliases abstract
setMethod(f = "abstract",signature(.Object = "datagroup"),
definition = function(.Object){
return(sapply(.Object@group, function(x) x$abstract))
})
#' @rdname abstract-datagroup
#' @aliases
#' abstract
#' title
#'@export
setGeneric(name="title",def=function(.Object){standardGeneric("title")})
#'@rdname abstract-datagroup
#'@aliases
#'abstract
#'title
setMethod(f = "title",signature(.Object = "datagroup"),
definition = function(.Object){
return(sapply(.Object@group, function(x) x$title))
})
#'@rdname datagroup-methods
#'@aliases datagroup,datagroup-methods
setMethod(f = "length",signature(x = "datagroup"),
definition = function(x){
return(length(x@group))
})
#'@rdname datagroup-methods
#'@aliases datagroup,datagroup-methods
setMethod("[", signature(x='datagroup',i="ANY",j='ANY'), function(x, i, j, ..., drop = TRUE) {
if (is.character(i))
i = which(title(x) %in% i)
return(datagroup(x@group[i]))
})
#'@rdname datagroup-methods
#'@aliases datagroup,datagroup-methods
setMethod("[[", signature('datagroup',i="ANY",j='ANY'), function(x, i, j, ..., drop = TRUE) {
return(x@group[[i]])
}) |
c759fea39ff7c70e4fe8c269e8b7774c2c674c40 | fc35a2c3874c7a710154772e56fecac1ff379a6d | /CleanCode.R | 63718851f658e8af6d266ed886b39d2f61f98500 | [] | no_license | busatos/DSPG | ad107859d12588688e72d7b42216dc65e5a73c23 | cfe091e90e912eb9e75c1d9dcde6ed29b52fdb22 | refs/heads/master | 2022-12-09T22:07:07.741404 | 2020-08-19T23:55:11 | 2020-08-19T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,884 | r | CleanCode.R | library(plyr)
library(tidyr)
library(naniar)
library(ggpmisc)
library(tidyverse)
library(lubridate)
library(RColorBrewer)
library(directlabels)
library(gridExtra)
library(gtable)
library(grid)
library(lubridate)
library(readr)
library(broom)
library(hydrostats)
library(stargazer)
library(GGally)
library(zoo)
library(kableExtra)
library(knitr)
library(reactable)
library(htmlwidgets)
setwd("~/DSPG")
# Hourly PGE data
HourlyPGEData <- read_csv("newpgeData.csv", col_types = cols(Season = col_factor()))
# Daily USGS data
USGSData <- read_csv("AllUSGSData.csv", col_types = cols(Season = col_factor()))
MadrasData <- USGSData %>% filter(Location == "Madras") %>% select(-`Discharge (cfs)`)
MoodyData <- USGSData %>% filter(Location == "Moody") %>% select(-`Discharge (cfs)`)
CulverData <- USGSData %>% filter(Location == "Culver") %>% select(-`Discharge (cfs)`)
# ODFW Fish Count data (Monthly and Yearly)
ODFWDataMonthly <- read_csv("ODFWData.csv", col_types = cols(Season = col_factor()))
ODFWDataYearly <- read_csv("ODFWDataYearly.csv") # Actual is total directly from ODFW, Total is from sum of monthly provided data
# PGE Fish Count data (Daily 2014-2020)
PGEFishData <- read_csv("PGEFishData.csv",
col_names = c("Date_time", "Hatchery Summer Steelhead", "Summer Steelhead","Summer Steelhead RM",
"Summer Steelhead LM", "Hatchery Spring Chinook", "Wild Spring Chinook","Spring Chinook RM",
"Spring Chinook LM", "No Mark Sockeye", "Sockeye RM", "Sockeye LM", "Fall Chinook","Bull Trout",
"Rainbow Trout", "Total", "Year", "Season", "Month"),
col_types = cols(Season = col_factor(), Date_time = col_datetime()))[2:959,]
PGEFishData$Date_time <- ymd(PGEFishData$Date_time)
# ODEQ Water Quality parameters data
ODEQData <- read_csv("ODEQData.csv", col_types = cols(Season = col_factor()))
# John Day Data
JDReddsCountData <- read_csv("JohnDayReddCounts.csv")[1:13,1:17]
JohnDayBargeData <- read_csv("JohnDayBargeRates.csv", skip = 1)[,1:14]
colnames(JohnDayBargeData) <- c("Year","W_Observed","H_Observed","pHOSObserved","W_Captured","H_Captured","%H_Captured","NOSA",
"W_JDD","H_JDD","%H_JDD","PercentWBarged","PercentHBarged","Num_H","")
# Bonneville Dam Data
BonnevilleData <- read_csv("BonnevilleDamData.csv")
### ANALYSIS
## Seasonal and Yearly analysis
MadrasOLS <- MadrasData %>% group_by(Year, Season) %>% summarize(`Temperature` = median(Temperature, na.rm = T))
MoodyOLS <- MoodyData %>% group_by(Year, Season) %>% summarize(`Median Seasonal Temperature` = median(Temperature))
ols2data <- ODFWData %>% group_by(Year, Season) %>% summarize(`Fall Chinook` = sum(`Fall Chinook`),
`Hatchery Summer Steelhead` = sum(`Hatchery Summer Steelhead`),
`Wild Summer Steelhead` = sum(`Wild Summer Steelhead`))
lmdata <- MadrasOLS %>% left_join(ols2data, by = c("Year","Season")) %>% filter(Year > 1976 & Season != "Winter" & Year != 2017 &
Year != 2020)
lmdata2 <- MoodyOLS %>% left_join(ols2data, by = c("Year","Season")) %>% filter(Year > 1976)
lmdata$Total <- rowSums(lmdata[,4:6], na.rm = T)
lmdata2$Total <- rowSums(lmdata2[,4:6], na.rm = T)
fixed <- plm(Total ~ Temperature,
data = lmdata, index = c("Season", "Year"), model = "within")
fixed.time <- plm(Total ~ Temperature + I(Temperature^2) + factor(Year) - 1,
data = lmdata, index = c("Season", "Year"), model = "within")
summary(fixed.time)
pFtest(fixed.time, fixed)
plmtest(fixed, c("time"), type = "bp")
# John Day Data analysis
lm1 <- lm(pHOSObserved ~ log(Num_H) + NOSA, data = JohnDayBargeData)
lm2 <- lm(pHOSObserved ~ `PercentHBarged` + NOSA, data = JohnDayBargeData) #Current model
stargazer(lm1, lm2, type = "text") # Check slide 20 for source on regression
# Statistical evidence
# Testing for difference in season by Predam, PreSWW, PostSWW groupings
MadrasDataYearly <- MadrasData %>% group_by(Year, Season) %>% summarise(Temperature = mean(Temperature)) %>%
mutate(Group = case_when(Year <= 1956 ~ "PreDam", Year <= 2009 ~ "PreSWW", Year >= 2010 ~ "PostSWW"))
MadrasDataYearly$Group <- as.factor(MadrasDataYearly$Group)
MadrasDataYearly$Group <- factor(MadrasDataYearly$Group, levels = c("PreDam", "PreSWW", "PostSWW"))
ggplot(data = MadrasDataYearly, aes(x = Year, y = Temperature)) + geom_smooth(method = "lm", formula = formula, se = F) +
geom_line(aes(color = Season)) + facet_grid(Season ~ Group, scales = "free") +
stat_poly_eq(aes(label = paste(..rr.label..)), formula = formula, parse = T) #Redo with rolling 7 day average maximum START HERE
MadrasDataYearlyFall <- MadrasDataYearly %>% filter(Season == "Fall")
MadrasDataYearlyWinter <- MadrasDataYearly %>% filter(Season == "Winter")
MadrasDataYearlySpring <- MadrasDataYearly %>% filter(Season == "Spring")
MadrasDataYearlySummer <- MadrasDataYearly %>% filter(Season == "Summer")
# Summarized individually
summary(lm(Temperature ~ Group, data = MadrasDataYearlyFall))
summary(lm(Temperature ~ (Group), data = MadrasDataYearlyWinter))
summary(lm(Temperature ~ (Group), data = MadrasDataYearlySpring))
summary(lm(Temperature ~ (Group), data = MadrasDataYearlySummer))
Falllm <- lm(Temperature ~ Group, data = MadrasDataYearlyFall)
Winterlm <- lm(Temperature ~ (Group), data = MadrasDataYearlyWinter)
Springlm <- lm(Temperature ~ (Group), data = MadrasDataYearlySpring)
Summerlm <- lm(Temperature ~ (Group), data = MadrasDataYearlySummer)
# View all at once
stargazer(Falllm,Winterlm,Springlm,Summerlm, type = "html") # Order here is 1:Fall,2:Winter,3:Spring,4:Summer
### PLOTS
# Plots rainbow trout, hatchery steelhead, hatchery spring chinook, fall chinook for PGE Data by Season and Year
PGEFishDataGathered <- PGEFishData %>% gather(Variable, Value, -Date_time, -Year, -Season, -Month)
PGEFishData %>% gather(Variable, Value, -Date_time, -Year, -Season, -Month) %>%
filter(Variable == c("Hatchery Summer Steelhead","Hatchery Spring Chinook", "Fall Chinook", "Rainbow Trout")) %>%
ggplot(aes(Season, as.numeric(Value), color = Variable, fill = Variable)) + geom_col() + facet_grid(Variable ~ Year) +
theme_bw() + ggtitle("PGE Fish Count Data") + labs(y = "Number of Fish Captured") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5),
plot.title = element_text(hjust = 0.5),
legend.position = "none")
# ODFW Yearly Fish Count Data
ODFWFishPlot <- ODFWDataYearly %>% select(ActualHSS, Year, ActualWSS, ActualFC) %>% pivot_longer(-Year, names_to = "Variable",
values_to = "Count")
ggplot(data = ODFWFishPlot, aes(x = Year, y = Count, color = Variable)) + geom_line() +
labs(y = "Fish Count", x = "Date", title = "ODFW Fish Counts at Sherars Falls (RM 43)", color = "Fish") +
geom_vline(xintercept = 2010) + annotate(geom = "text", x = 2015.5, y = 3000, label = "SWW Installation") +
scale_color_manual(labels = c("Fall Chinook","Hatchery Summer Steelhead","Wild Summer Steelhead"),
values = c("blue","red","green")) +
theme_bw() + theme(plot.title = element_text(hjust = 0.5),
legend.position = "bottom",
legend.title = element_blank())
# Overplot of all variables by Season and Year from PGE data, very ugly and most fish are not significant at all
PGEFishData %>% gather(Variable, Value, -Date_time, -Year, -Season, -Month) %>%
ggplot(aes(Season, as.numeric(Value), color = Variable, fill = Variable)) + geom_col() + facet_grid(Variable ~ Year) +
theme_bw() + ggtitle("PGE Fish Count Data") + labs(y = "Number of Fish Captured") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5),
plot.title = element_text(hjust = 0.5),
legend.title = element_blank())
# Plot of Fall and Summer ODFW Fish Data by Column
formula <- y ~ x + I(x^2)
MergedFishData %>% filter(Year < 2014 & Season != "Spring") %>% group_by(Year) %>% ggplot(aes(x = Year, y = Total, fill = Season, color = Season)) +
geom_col(show.legend = F, position = "dodge") + geom_smooth(method = "lm", formula = formula, show.legend = F, color = "black") +
geom_vline(aes(xintercept = 2014), linetype = "dashed") +
facet_wrap( ~ Season) +
stat_poly_eq(aes(label = ..eq.label..), method = "lm", parse = T, formula = formula)
# Season interaction term plot shows the lack of data we are struggling with
ggplot(data = lmdata, aes(x = Year, y = Total, color = Season)) + geom_point(aes(x = Year, y = Total)) +
geom_point(aes(x = Year, y = `Temperature`), color = "red", size = 3) +
geom_smooth(method = "lm", se = F, formula = formula) + facet_wrap( ~ Season) + geom_vline(aes(xintercept = 2010)) +
stat_poly_eq(aes(label = paste(..eq.label.., ..adj.rr.label.., sep = "~~~~")), formula = formula, parse = T, angle = -30)
# Plot of pHOSObserved vs. log of number of hatchery barged
formula = y ~ x + I(x^2)
ggplot(JohnDayBargeData, aes(log(Num_H), pHOSObserved)) + geom_point() + geom_smooth(method = "lm", formula = formula, se = F) +
stat_poly_eq(aes(label = paste(..eq.label.., ..rr.label.., sep = "~~~~")), formula = formula, parse = T)
# Plot of proportion of fish barged vs HSS from ODFW Data Yearly
ggplot(testdf2, aes(Proportion, HSS)) + geom_point() + geom_smooth(method = "lm", formula = formula, se = F) +
stat_poly_eq(aes(label = paste(..eq.label.., ..adj.rr.label.., sep = "~~~~")), formula = formula, parse = T) #Figure out how to rebuild testdf2
# Plot of ODFW HSS Yearly Numbers by Year vs Bonneville Barged Numbers
BonnevilleDatavsODFW <- BonnevilleData %>% left_join(ODFWDataYearly, by = c("Year"))
ggplot(data = BonnevilleDatavsODFW) + geom_line(aes(as.Date(paste0(Year, "-01-01")), ActualHSS), color = "red") +
geom_point(aes(as.Date(paste0(Year, "-01-01")),ActualHSS), color = "red") +
geom_point(aes(as.Date(paste0(Year, "-01-01")),Hatchery), color = "black") +
geom_line(aes(as.Date(paste0(Year, "-01-01")),Hatchery), color = "black") # ActualHSS is ODFW Count Hatchery is Bonneville Barged Numbers
# Plot of ODFW Yearly vs Hatchery counts from Bonneville data
formula = y ~ x + I(x^2)
ggplot(data = BonnevilleDatavsODFW, aes(Hatchery, ActualHSS)) + geom_point() + geom_smooth(method = "lm", se = F, formula = formula) +
stat_poly_eq(aes(label = paste(..eq.label.., ..adj.rr.label.., sep = "~~~~")), formula = formula, parse = T)
# Plot of Hatchery vs Fall Chinook, negatively associated as expected
ggplot(data = BonnevilleDatavsODFW, aes(Hatchery, ActualFC)) + geom_point() + geom_smooth(method = "lm", se = F, formula = y ~ x) +
stat_poly_eq(aes(label = paste(..eq.label.., ..adj.rr.label.., sep = "~~~~")), formula = y ~ x, parse = T)
# Plot of missing USGS data
MissingDataPlot <- pivot_wider(USGSData, names_from = Location, values_from = Temperature, values_fn = max)
MissingDataPlot2 <- MissingDataPlot %>% group_by(Date_time) %>% summarise(Culver = mean(Culver, na.rm = T),
Moody = mean(Moody, na.rm = T),
Madras = mean(Madras, na.rm = T))
MissingDataPlot2 <- MissingDataPlot2 %>% mutate(Year = year(Date_time), Season = getSeason(Date_time), Julian = yday(Date_time))
MissingDataPlot2 %>% select(Moody, Madras, Culver, Year) %>% gg_miss_fct(Year) +
labs(title = "Percent of Yearly Temperature Data Available", x = "Date", y = "Location", fill = "% Missing Yearly") +
scale_fill_gradient(high = "#132B43",
low = "#56B1F7") +
theme(plot.title = element_text(hjust = 0.5))
# Plot of missing PGE Fish Count Data
PGEFishDataGathered <- PGEFishData %>% gather(Variable, Value, -Date_time, -Year, -Season, -Month)
`%notin%` <- Negate(`%in%`)
notfishList <- c("Season", "Year", "Total", "Month", "Date_time")
# Number of missing observations
PGEFishData %>% select(-notfishList) %>% gg_miss_var()
# Times where data is missing
PGEFishData %>% select(-Total, -Season, -Month, -Date_time) %>% gg_miss_fct(Year) +
scale_fill_gradient2(low = "white", high = "black") + labs(title = "PGE Fish Count Data Availability", fill = "% Missing") +
theme(axis.title.y = element_blank())
# Plot of missing ODFW Data Monthly
ODFWDataMonthly %>% select(-Season, -Month, -Date_time) %>% gg_miss_fct(Year) +
labs(title = "ODFW Fish Count Data Availability") + scale_fill_gradient2(low = "white", high = "black") + theme_bw() +
theme(plot.title = element_text(hjust = 0.5),
axis.title.y = element_blank(),
legend.position = "none")
ODFWDataMonthly %>% gather(Variable, Value, -Date_time, -Year, -Season, -Month) %>%
ggplot(aes(x = Year, y = Value)) + geom_miss_point() + scale_color_manual(values = c("white", "black")) + theme_dark() +
labs(x = "Date", y = "Fish Count", color = "Missing Observations", title = "ODFW Fish Count Data Availability") +
theme(plot.title = element_text(hjust = 0.5))
# ODEQ missing data
ODEQMissingPlot <- ODEQData %>% gather(Variable, Value, -Location, -Date_time, -Year, -Season, -Julian)
ODEQData %>% select(-Date_time, -Season, -Julian, -Location) %>% gg_miss_fct(Year) + scale_fill_viridis_c() +
labs(title = "ODEQ Water Quality Parameter Data Coverage", x = "Date", y = "Variable", fill = "% Missing Yearly") +
theme(plot.title = element_text(hjust = 0.5))
# PGE Missing Data
HourlyPGEData %>% select(-Date_time, -Season, -Julian) %>% gg_miss_fct(Year) + scale_fill_viridis_b() +
labs(title = "PGE Water Quality Parameter Data Coverage", x = "Date", y = "Variable", fill = "% Missing Yearly") +
theme(plot.title = element_text(hjust = 0.5),
legend.position = "none")
# Correlation matrix for season data
MadrasDataYearly <- MadrasData %>% group_by(Year, Season) %>% summarise(Temperature = mean(Temperature)) %>%
mutate(Group = case_when(Year <= 1956 ~ "PreDam", Year <= 2009 ~ "PreSWW", Year >= 2010 ~ "PostSWW"))
MadrasDataYearly$Group <- as.factor(MadrasDataYearly$Group)
MadrasDataYearly$Group <- factor(MadrasDataYearly$Group, levels = c("PreDam", "PreSWW", "PostSWW"))
CorrelogramData <- MadrasData %>%
mutate(Group = case_when(Year <= 1956 ~ "PreDam", Year <= 2009 ~ "PreSWW", Year >= 2010 ~ "PostSWW")) %>%
mutate(Temperature2 = rollmean(Temperature, k = 7, fill = NA))
colnames(CorrelogramData) <- c("Date","Temp","Location","Year","Season","Julian","Period","Temperature")
CorrelogramData$Period <- factor(CorrelogramData$Period, levels = c("PreDam", "PreSWW", "PostSWW"))
CorrelogramData %>% ggpairs(columns = c(1,5,7,2), aes(color = Period))
# Comparing Pre-Dam, Pre-SWW, Post-SWW at Madras
ggplot(data = CorrelogramData, aes(x = Date, y = Temperature)) +
geom_line(color = "darkcyan") + facet_wrap( ~ Period, scales = "free_x") +
labs(y = "Temperature (Celsius °)", title = "7 Day Rolling Average Temperature at Madras Gage") + theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))
# Table of means and medians of Pre-Dam, Pre-SWW, Post-SWW
MadrasDataYearly <- MadrasData %>% group_by(Year, Season) %>%
summarise(`Mean Temperature` = mean(Temperature, na.rm = T, trim = 2),
`Median Temperature` = median(Temperature, na.rm = T, trim = 2)) %>%
mutate(Group = case_when(Year <= 1956 ~ "PreDam", Year <= 2009 ~ "PreSWW", Year >= 2010 ~ "PostSWW"))
MadrasDataYearly <- MadrasDataYearly %>% drop_na() %>%
pivot_wider(names_from = Season, values_from = c("Mean Temperature", "Median Temperature"))
colnames(MadrasDataYearly) <- c("Year", "Period", "Winter Mean Temperature", "Spring Mean Temperature",
"Summer Mean Temperature", "Fall Mean Temperature")
MadrasDataYearly <- MadrasDataYearly[,1:6]
rtable <- reactable(MadrasDataYearly, defaultPageSize = 40)
html <- "rtable.html"
saveWidget(rtable,html)
webshot(html, "Table1.png")
# Stats to back up previous table/chart
stargazer(Falllm,Winterlm,Springlm,Summerlm, type = "html", out = "Models.htm", covariate.labels = c("Pre-SWW","Post-SWW"))
stargazer(Falllm,Winterlm,Springlm,Summerlm, type = "text")
# Plots for Sophia
MadrasDataMedians <- MadrasData %>% group_by(Year, Season) %>%
summarize(median = median(`Temperature`, na.rm = T), mean = mean(`Temperature`, na.rm = T)) %>%
filter(Year == 1953 | Year == 1955 | Year == 2008 | Year == 2009 | Year == 2016 | Year == 2019)
# Seasonal Mean Temperature pre and post dam comparison
MadrasDataMedians %>% ggplot(aes(Season, mean)) + geom_bar(aes(fill = as.factor(Year)), position = "dodge", stat = "identity") +
labs(y = "Mean Temperature", fill = "Year") + scale_fill_brewer(palette = "Dark2") + theme_bw()
temperatureColor <- "#C92A2A"
fishColor <- rgb(0.2, 0.6, 0.9, 1)
# Pre and post dam temperature comparison
longtermtempplot <- MadrasData %>% filter(Year == 1953 | Year == 1955 | Year == 2008 | Year == 2009 | Year == 2016 | Year == 2019) %>%
ggplot(aes(x = as.Date(Julian, origin = "1952-01-01"), y = Temperature, color = Year)) + geom_line(show.legend = F) +
facet_wrap( ~ as.factor(Year), ncol = 2) + theme_bw() +
scale_x_date(date_labels = "%b") + ggtitle("Temperature Before and After Dam Installation") + labs(x = "Date") +
theme(axis.title.y = element_text(color = temperatureColor, size = 13),
axis.title.x = element_text(color = fishColor, size = 13),
plot.title = element_text(hjust = 0.5))
colorset = c('1953' = "red", '1956' = "red", '2008' = "goldenrod", '2009' = "goldenrod", '2016' = "forestgreen", '2019' = "forestgreen")
longtermtempplot + scale_fill_manual(values = colorset)
|
a53d394ca1d87a30009b5ab968b1991e7ae060d3 | 8fa0976a8976d738b9ce034717c357d31ff7d261 | /utilities/house/nytimes.R | ed3394ff8b7058909e1abfef4d870a2d654607d3 | [] | no_license | USStudiesCentre/115th-senate | ef63d83692c173559193c071ba9aeb9dceb5d790 | d37df5e62a182844fcde6bedddadb3b5841ed372 | refs/heads/master | 2021-01-21T12:27:31.071960 | 2017-09-11T05:55:02 | 2017-09-11T05:55:02 | 102,070,424 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 881 | r | nytimes.R | ## loop over the unique entries in member data base
## get NYTimes bio info
## loop over ids in h112Member
## member info
source("processMemberData.R")
legisData <- getMemberData()
counter <- 1
library(RMySQL)
for(id in legisData$nameid){
out <- extractNYTimesMemberData(id,congress=112,chamber="House")
if(!is.null(out)){
con <- dbConnect(drv,group="ideal")
if(haveTable("nyTimesMemberInfo")){
res <- dbWriteTable(conn=con,
name="nyTimesMemberInfo",
value=out,
row.names=FALSE,
append=TRUE)
} else {
res <- dbWriteTable(conn=con,
name="nyTimesMemberInfo",
value=tmp[[i]],
row.names=FALSE,
overwrite=TRUE)
}
dbDisconnect(con)
}
}
|
8812936c3668b81932e6f32ca5315e5d694d2d8e | 4fda4291ed5f30c9110866db54d966766dd27a56 | /man/pnsdrm.Rd | 96e0f512b5ceb74d185dcd90ad136b9f1781bc04 | [] | no_license | cran/mrdrc | ad24d0609eecf34b926c70415a3125453425958b | 3dc543f1ebc5a45c676cb748c003fe4c2ce60484 | refs/heads/master | 2021-01-22T07:32:24.973345 | 2011-04-12T00:00:00 | 2011-04-12T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,681 | rd | pnsdrm.Rd | \name{pnsdrm}
\alias{pnsdrm}
\alias{pnsdrm.calc}
\alias{pns.plot1}
\title{Parametric, non-parametric or semi-parametric dose-response modelling}
\description{
Parametric, non-parametric or semi-parametric dose-response modelling of both continuous and quantal data.
}
\usage{
pnsdrm(predictor, response, weights, type = c("continuous", "binomial"),
model = c("semi-parametric", "non-parametric", "parametric"),
fct = NULL, robust = FALSE, respLev = c(10, 20, 50),
reference = NULL, level = 0.95, logex = FALSE)
pnsdrm.calc(predictor, response, weights, type = c("continuous", "binomial"),
model = c("semi-parametric", "non-parametric", "parametric"),
fct = NULL, robust = FALSE, respLev = c(10, 20, 50),
reference = NULL, level = 0.95, logex = FALSE)
}
\arguments{
\item{predictor}{numeric vector of concentrations/doses.}
\item{response}{numeric vector of response values (proportions in case of quantal data).}
\item{weights}{numeric vector of weights needed for quantal data.}
\item{type}{character string specifying the type of response.}
\item{model}{character string specifying the model to be fit.}
\item{fct}{a built-in function or a list of built-in functions from the package 'drc'.}
\item{robust}{logical specifying whether or not a robust approach should be used. Only for the
semi-parametric approach.}
\item{respLev}{numeric vector of requested ED level.}
\item{reference}{optional reference value for the lower limit.}
\item{level}{numeric specifying the confidence level.}
\item{logex}{logical indicating whether or not a logarithmic x axis should be used.}
}
\details{
The parametric estimation is based on the model fitting function \code{\link[drc]{drm}} in the package 'drc'.
The non-parametric estimation relies on the 'locfit' package.
The semi-parametric approach is mainly based on the development in Nottingham and Birch (2000), whereas the
non-parametric approach uses on the package 'EffectiveDose' which implements the method introduced in
Dette \emph{et al} (2004).
\code{plot} and \code{print} methods are available.
}
\value{
A list containing the requested ED values and additional information about the underlying
model fit(s).
}
\references{
Dette, H., Neumeyer, N. and Pilz, K. F. (2004) A Note on Nonparametric Estimation of the Effective Dose
in Quantal Bioassay, \emph{J. Amer. Statist. Assoc.}, \bold{100}, 503--510.
Nottingham, Q. and Birch, J. B. (2000) A Semiparametric Approach to Analysing Dose-Response Data,
\emph{Statist. Med.}, \bold{19}, 389--404.
}
\author{
Christian Ritz (wrapper functions)
Mads Jeppe Tarp-Johansen (internal functions)
}
%\note{
% The implementation of this function as well as all other functions in the package 'mrdrc' has been funded by
% European Centre for the Validation of Alternative Methods, EU Joint Research Centre under lot 3 of the
% project "Quality assessment and novel statistical analysis techniques for toxicological data".
%}
%\seealso{
% More examples are found in the help pages for \code{\link{bin.mat}} and \code{\link{exp.a}}.
%}
\examples{
## Analysing deguelin (in the package 'drc')
## Semi-parametric model
deguelin.mrr1 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = LL.2())
deguelin.mrr1
plot(deguelin.mrr1)
## The same
gmFct <- getMeanFunctions(fname = "LL.2")
deguelin.mrr1b <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = gmFct)
deguelin.mrr1b
plot(deguelin.mrr1b)
## The same again
deguelin.mrr1c <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = list(LL2.2()))
deguelin.mrr1c
plot(deguelin.mrr1c)
deguelin.mrr1d <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = W1.2())
deguelin.mrr1d
plot(deguelin.mrr1d)
## The same
gmFct <- getMeanFunctions(fname = "W1.2")
deguelin.mrr1e <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = gmFct)
deguelin.mrr1e
plot(deguelin.mrr1e)
### Parametric models
#deguelin.mrr2 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
#model = "parametric", fct = list(LL.2(), W1.2(), W2.2()))
#deguelin.mrr2
#plot(deguelin.mrr2)
### The same parametric models
#deguelin.mrr2b <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
#model = "parametric", fct = list(W2.2(), LL.2(), W1.2()))
#deguelin.mrr2b
#plot(deguelin.mrr2b)
## Non-parametric approach -- currently not available
#deguelin.mrr3 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
#model = "non-parametric")
#deguelin.mrr3
#plot(deguelin.mrr3)
## Semi-parametric model with reference level 0.3
deguelin.mrr4 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = LL.2(), reference = 0.3)
deguelin.mrr4
plot(deguelin.mrr4)
## Semi-parametric models
deguelin.mrr5 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = list(LL.2(), W1.2(), W2.2()))
deguelin.mrr5
plot(deguelin.mrr5)
## Analysing ryegrass (in the package 'drc')
ryegrass.mrr1 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
model = "semi-parametric", fct = LL.5())
ryegrass.mrr1
plot(ryegrass.mrr1)
plot(ryegrass.mrr1, log = "x")
ryegrass.mrr2 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
model = "semi-parametric", fct = list(LL.3(), LL.4(), LL.5()))
ryegrass.mrr2
plot(ryegrass.mrr2)
#ryegrass.mrr3 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
#model = "parametric", fct = list(LL.3(), LL.4(), LL.5()))
#ryegrass.mrr3
#plot(ryegrass.mrr3)
ryegrass.mrr4 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
model = "semi-parametric", fct = list(L.4(), LL.4(), W1.4(), W2.4()))
ryegrass.mrr4
plot(ryegrass.mrr4)
## Analysing lettuce (in the package 'drc')
lettuce.mrr1 <- pnsdrm(lettuce$conc, lettuce$weight, type = "continuous",
model = "semi-parametric", fct = LL.3())
lettuce.mrr1
plot(lettuce.mrr1)
lettuce.mrr2 <- pnsdrm(lettuce$conc, lettuce$weight, type = "continuous",
model = "semi-parametric", fct = BC.4())
lettuce.mrr2
plot(lettuce.mrr2)
#lettuce.mrr3 <- pnsdrm(lettuce$conc, lettuce$weight, type = "continuous",
#model = "semi-parametric", fct = LL.3(), robust = TRUE)
#lettuce.mrr3
#plot(lettuce.mrr3)
}
\keyword{models}
\keyword{nonlinear}
|
110e4e4b2c57f4c6e58a8923152dc3e77b3ee023 | dcdce563dc89b47cea2bcfee8ee608c291fdc2e9 | /man/sgnf.Rd | 2a6476a0da6d183522cb987d5ba14c1238673874 | [] | no_license | cran/CombMSC | cb4412f580013286fb28a69303cc8077be926476 | b17252d5f1924a5a7122fb20c6b9e307b41c80cc | refs/heads/master | 2021-01-22T05:15:50.935459 | 2019-01-04T17:23:10 | 2019-01-04T17:23:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 615 | rd | sgnf.Rd | \name{sgnf}
\alias{sgnf}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Significance of an msc object }
\description{
A convenience function which calculates, for each summary function of the msc object,
the difference between the best pure MSC and the best combined MSC (i.e., how much better
we can make each summary function by considering combined MSC instead of only the pure MSCs.)
The results may be useful if one is interested in testing the hypothesis that pure MSCs are as
good as any convex combination of MSCs.
}
\author{Andrew K. Smith}
\keyword{print} |
d99446646119e2b876d0607316bfbfc538f5e0e0 | 7c45e9b0e2c61db6eb6774575c1b879a5b899d2e | /exportando/matrizConfusion_to_matrizDiferencias.R | 784b5f664a7c28f77fe6850c155a2193408bf4fd | [] | no_license | danyche2005/clasificadoresJerarquicosAddClass | d6a88d4d5a67182440022dd6f224a4833a524273 | c135fc8028c670c296c4c415bbd984c12f0cb4be | refs/heads/master | 2021-09-15T04:41:16.092429 | 2018-05-26T09:54:31 | 2018-05-26T09:54:31 | 109,820,550 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,680 | r | matrizConfusion_to_matrizDiferencias.R | mconfusion2oMDiferencias <- function(tablaConfusion,metodo="chi"){
tablaConfusion<-t(tablaConfusion)
#Metodo Propuesto
# 1
if(metodo=="propio"){
#Genero la matriz Totales:
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
matrizTotales<-{}
for(j in 1:nrofilas){
matrizTotales[j]<-sum(tablaConfusion[j,])
}
##print(matrizTotales)
#Genero Matriz de Normalizada
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
matrizNormalizada<-tablaConfusion
for(i in 1:nrocolumnas){
for(j in 1:nrofilas){
#Evito la division para cero
if(matrizTotales[j]==0){
matrizNormalizada[j,i]<-0
}else{
matrizNormalizada[j,i]<-tablaConfusion[j,i]/matrizTotales[j]
}
}
}
##print(matrizNormalizada)
#Genero Matriz de Similitud
nrocolumnas<-ncol(matrizNormalizada)
nrofilas<-nrow(matrizNormalizada)
matrizSimilitud<-matrizNormalizada
matrizSimilitud[,]<-0
for(i in 1:nrocolumnas){
for(j in 1:nrofilas){
if(j>i){
confusiones<-matrizNormalizada[j,i]+matrizNormalizada[i,j]
aciertos<-matrizNormalizada[j,j]+matrizNormalizada[i,i]
matrizSimilitud[j,i]<-confusiones/2
}
}
}
##print(matrizSimilitud)
matrizDistancias<-1-matrizSimilitud
##print(matrizDistancias)
}
#Metodo Propuesto Modificado
# 1.1
if(metodo=="propioModificado"){
#Genero la matriz Totales:
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
#Obtengo el total de toda la matriz
valorTotal<- sum(tablaConfusion[,])
#Genero Matriz de Normalizada
matrizNormalizada<-tablaConfusion/valorTotal
print(matrizNormalizada)
#Genero Matriz de Similitud
nrocolumnas<-ncol(matrizNormalizada)
nrofilas<-nrow(matrizNormalizada)
matrizSimilitud<-matrizNormalizada
matrizSimilitud[,]<-0
for(i in 1:nrocolumnas){
for(j in 1:nrofilas){
if(j>i){
confusiones<-matrizNormalizada[j,i]+matrizNormalizada[i,j]
aciertos<-matrizNormalizada[j,j]+matrizNormalizada[i,i]
matrizSimilitud[j,i]<-confusiones/2
}
}
}
print(matrizSimilitud)
matrizDistancias<-1-matrizSimilitud
print(matrizDistancias)
}
#Metodo Distancia Euclidea
# 2
if(metodo=="euclidea"){
#Genero la matriz Totales:
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
matrizTotales<-{}
for(j in 1:nrofilas){
matrizTotales[j]<-sum(tablaConfusion[j,])
}
print(matrizTotales)
#Genero Matriz de Normalizada
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
matrizNormalizada<-tablaConfusion
for(i in 1:nrocolumnas){
for(j in 1:nrofilas){
#Evito la division para cero
if(matrizTotales[j]==0){
matrizNormalizada[j,i]<-0
}else{
matrizNormalizada[j,i]<-tablaConfusion[j,i]/matrizTotales[j]
}
}
}
print(matrizNormalizada)
#Genero Matriz de Distancia Euclidea
nrocolumnas<-ncol(matrizNormalizada)
nrofilas<-nrow(matrizNormalizada)
distancias<-{}
#Distancia Euclidea:
for(j in 1:(nrofilas-1)){
ini<-j+1
for(k in ini:nrofilas){
vectResta<-matrizNormalizada[j,]-matrizNormalizada[k,]
vectCuadrado<-(vectResta)^2
vectTot<-sum(vectCuadrado)
distancia<-(vectTot)^(1/2)
distancias<-c(distancias,distancia)
}
}
#Creo la matriz de distancias
matrizDistancias<-matrizNormalizada
matrizDistancias[,]<-1
k<-1
for(i in 1:(nrocolumnas-1)){
ini<-i+1
for(j in ini:nrofilas){
matrizDistancias[j,i]<-distancias[k]
k<-k+1
}
}
print(matrizDistancias)
}
#Metodo Distancia Euclidea Modificada
# 2
if(metodo=="euclideaModificado"){
#Genero la matriz Totales:
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
#Genero Matriz de Normalizada
#Obtengo el total de toda la matriz
valorTotal<- sum(tablaConfusion[,])
#Genero Matriz de Normalizada
matrizNormalizada<-tablaConfusion/valorTotal
print(matrizNormalizada)
#Genero Matriz de Distancia Euclidea
nrocolumnas<-ncol(matrizNormalizada)
nrofilas<-nrow(matrizNormalizada)
distancias<-{}
#Distancia Euclidea:
for(j in 1:(nrofilas-1)){
ini<-j+1
for(k in ini:nrofilas){
vectResta<-matrizNormalizada[j,]-matrizNormalizada[k,]
vectCuadrado<-(vectResta)^2
vectTot<-sum(vectCuadrado)
distancia<-(vectTot)^(1/2)
distancias<-c(distancias,distancia)
}
}
#Creo la matriz de distancias
matrizDistancias<-matrizNormalizada
matrizDistancias[,]<-1
k<-1
for(i in 1:(nrocolumnas-1)){
ini<-i+1
for(j in ini:nrofilas){
matrizDistancias[j,i]<-distancias[k]
k<-k+1
}
}
print(matrizDistancias)
}
#Metodo Distancia Bray-Curtis
# 3
if(metodo=="bray"){
#Genero la matriz Totales:
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
matrizTotales<-{}
for(j in 1:nrofilas){
matrizTotales[j]<-sum(tablaConfusion[j,])
}
print(matrizTotales)
#No se realiza una normalizacion en este caso
matrizNormalizada<-tablaConfusion
#Genero Matriz de Distancias
nrocolumnas<-ncol(matrizNormalizada)
nrofilas<-nrow(matrizNormalizada)
distancias<-{}
#Distancia Bray Curtis:
for(j in 1:(nrofilas-1)){
ini<-j+1
for(k in ini:nrofilas){
vectResta<-abs(matrizNormalizada[j,]-matrizNormalizada[k,])
total<-sum(vectResta)
distancia<-total/(matrizTotales[j]+matrizTotales[k])
distancias<-c(distancias,distancia)
}
}
#Creo la matriz de distancias
matrizDistancias<-matrizNormalizada
matrizDistancias[,]<-1
k<-1
for(i in 1:(nrocolumnas-1)){
ini<-i+1
for(j in ini:nrofilas){
matrizDistancias[j,i]<-distancias[k]
k<-k+1
}
}
print(matrizDistancias)
}
#Metodo Distancia Chi-Square
# 4
if(metodo=="chi"){
#Genero la matriz Totales:
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
matrizTotalesFilas<-{}
for(j in 1:nrofilas){
matrizTotalesFilas[j]<-sum(tablaConfusion[j,])
}
print(matrizTotalesFilas)
matrizTotalesCol<-{}
for(i in 1:nrocolumnas){
matrizTotalesCol[i]<-sum(tablaConfusion[,i])
}
print(matrizTotalesCol)
totalFilasCol<-sum(matrizTotalesFilas)
#Genero Matriz de Normalizada
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
matrizNormalizada<-tablaConfusion
for(i in 1:nrocolumnas){
for(j in 1:nrofilas){
#Evito la division para cero
if(matrizTotalesFilas[j]==0){
matrizNormalizada[j,i]<-0
}else{
matrizNormalizada[j,i]<-tablaConfusion[j,i]/matrizTotalesFilas[j]
}
}
}
print(matrizNormalizada)
#Obtengo Promedio
vectPromedios<-{}
for(i in 1:nrocolumnas){
vectPromedios[i]<-matrizTotalesCol[i]/totalFilasCol
}
print(vectPromedios)
#Genero Matriz de Distancia
nrocolumnas<-ncol(matrizNormalizada)
nrofilas<-nrow(matrizNormalizada)
distancias<-{}
#Distancia Chi Square:
for(j in 1:(nrofilas-1)){
ini<-j+1
for(k in ini:nrofilas){
vectResta<-matrizNormalizada[j,]-matrizNormalizada[k,]
vectCuadrado<-(vectResta)^2
vectTot<-vectCuadrado/vectPromedios
valSuma<-sum(vectTot)
distancia<-(valSuma)^(1/2)
distancias<-c(distancias,distancia)
}
}
#Creo la matriz de distancias
matrizDistancias<-matrizNormalizada
matrizDistancias[,]<-1
k<-1
for(i in 1:(nrocolumnas-1)){
ini<-i+1
for(j in ini:nrofilas){
matrizDistancias[j,i]<-distancias[k]
k<-k+1
}
}
print(matrizDistancias)
}
#Metodo Distancia Chi-Square Modificado
# 4
if(metodo=="chiModificado"){
#Genero la matriz Totales:
nrocolumnas<-ncol(tablaConfusion)
nrofilas<-nrow(tablaConfusion)
matrizTotalesCol<-{}
for(i in 1:nrocolumnas){
matrizTotalesCol[i]<-sum(tablaConfusion[,i])
}
print(matrizTotalesCol)
matrizNormalizada<-tablaConfusion
print(matrizNormalizada)
#Genero Matriz de Distancia
nrocolumnas<-ncol(matrizNormalizada)
nrofilas<-nrow(matrizNormalizada)
distancias<-{}
#Distancia Chi Square:
for(j in 1:(nrofilas-1)){
ini<-j+1
for(k in ini:nrofilas){
vectResta<-matrizNormalizada[j,]-matrizNormalizada[k,]
vectCuadrado<-(vectResta)^2
vectTot<-vectCuadrado/matrizTotalesCol
valSuma<-sum(vectTot)
distancia<-(valSuma)^(1/2)
distancias<-c(distancias,distancia)
}
}
#Creo la matriz de distancias
matrizDistancias<-matrizNormalizada
matrizDistancias[,]<-1
k<-1
for(i in 1:(nrocolumnas-1)){
ini<-i+1
for(j in ini:nrofilas){
matrizDistancias[j,i]<-distancias[k]
k<-k+1
}
}
print(matrizDistancias)
}
return(matrizDistancias)
}
#Pruebas de los Metodos:
# tablaConfusion<-matrix(c(59, 2, 3, 7, 15, 1,65,1,12,9,17,9,52,2,10,12,8,3,81,9,3,6,5,2,73), nrow=5, ncol=5)
#
# mresp1<-mconfusion2oMDiferencias(tablaConfusion,metodo = "propio")
# d<-as.dist(round(mresp1,3))
# d
# mresp2<-mconfusion2oMDiferencias(tablaConfusion,metodo = "propioModificado")
# d<-as.dist(round(mresp2,3))
# d
# mresp3<-mconfusion2oMDiferencias(tablaConfusion,metodo = "euclidea")
# d<-as.dist(round(mresp3,3))
# d
# mresp4<-mconfusion2oMDiferencias(tablaConfusion,metodo = "euclideaModificado")
# d<-as.dist(round(mresp4,3))
# d
# mresp5<-mconfusion2oMDiferencias(tablaConfusion,metodo = "bray")
# d<-as.dist(round(mresp5,3))
# d
# mresp6<-mconfusion2oMDiferencias(tablaConfusion,metodo = "chi")
# d<-as.dist(round(mresp6,3))
# d
# mresp7<-mconfusion2oMDiferencias(tablaConfusion,metodo = "chiModificado")
# d<-as.dist(round(mresp7,3))
# d
#
#
|
df46df9362747254413f4db69da888f15abd72a1 | dabbcf74c9ab630c9946091d1cd964ffceff1a62 | /Datacamp_Sandbox/ImportingData_Sandbox.R | f6e4dfeb43b109b824166f3da50d809773386152 | [] | no_license | FilipRychnavsky/SandBox_for_R | 9afb3d4c8c4c9b5398bc84cfbc3336ebc7298c44 | b53c2a5c6cc81e8253b149fd984ccb262def5349 | refs/heads/master | 2023-02-17T21:24:17.362750 | 2021-01-19T20:46:07 | 2021-01-19T20:46:07 | 81,656,191 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 42 | r | ImportingData_Sandbox.R | #install.packages("readr")
library(readr)
|
8baf006a03a957ec498a14969eef6f269c194205 | 0ee28ea496025a24b5a91af6bbdc93bb3d24a179 | /gRumble/man/RotData.Rd | b43bda1f46071a159766dd378d092fa1202f3c26 | [] | no_license | MBayOtolith/gRumble | 43c2142ae9f3f26baa3d39b4c23afd09c22795d0 | e0bb07972a37a55f25ef85de46d090aa3bec6d19 | refs/heads/master | 2021-06-19T02:56:12.168349 | 2020-04-21T20:54:42 | 2020-04-21T20:54:42 | 90,801,975 | 4 | 0 | null | 2019-01-15T19:23:49 | 2017-05-09T23:50:57 | R | UTF-8 | R | false | false | 763 | rd | RotData.Rd | \name{RotData}
\alias{RotData}
\title{
Rotate XYZ data
}
\description{
Rotate a series of XYZ data points by an equal number of XYZ rotation angles
}
\usage{
RotData(xyz,Xr,Yr,Zr)
}
\arguments{
\item{xyz}{
3 column XYZ data to be rotated
}
\item{Xr}{
rotation around X, roll
}
\item{Yr}{
rotation around Y, pitch
}
\item{Zr}{
rotation around Z, Yaw
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
returns a three column xyz matrix of the rotated data.
}
\author{
Connor F. White
}
\examples{
#Generate XYZ location data
xyz<-matrix(rep(c(0,0,-1),10),ncol=3)
#Rotate around the Z axis by 45 degrees (pi/4)
angs<-cbind(rep(0,10),rep(0,10),rep(pi/4,10))
RotData(xyz=xyz,Xr=angs[,1],Yr=angs[,2],Zr=angs[,3])
}
|
2c2aa655bd97a981e8ee43eea7b27213a1900550 | f99e6d183f97de1bae65a24d15738b1fae956398 | /man/parse_section.conduit_surcharge_summary.Rd | 37170a6d04c9163a9d1a1f9191b9d8d0be795384 | [] | no_license | dleutnant/swmmr | 30b985d4f14adadc432f7aac4fae57e3bf29a006 | 820fe76a47cf7a0ca2547ce35920aa30de94207b | refs/heads/master | 2023-05-25T21:32:34.072990 | 2022-02-02T09:43:32 | 2022-02-02T09:43:32 | 42,933,610 | 39 | 16 | null | 2023-05-11T03:50:29 | 2015-09-22T12:56:05 | R | UTF-8 | R | false | true | 328 | rd | parse_section.conduit_surcharge_summary.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_section.R
\name{parse_section.conduit_surcharge_summary}
\alias{parse_section.conduit_surcharge_summary}
\title{import helper}
\usage{
\method{parse_section}{conduit_surcharge_summary}(x, ...)
}
\description{
import helper
}
\keyword{internal}
|
b5595805497183368e255454a20bb18a045db2cd | d62c4ae3e2b435d543d256cd639395a7c2649c39 | /3def5bca-443c-4104-943e-e6412c443d5e/R/Temp/aLvZUWYvpFt86.R | 988ebc0824bf3645ae4bcf46fd259ad6e9735095 | [] | no_license | madhurashah/semoss-school-management | 635e544db2d7405a646dec17e6318f5cb8a26755 | 823238f46ea33672aabfac524767245b80e3c72d | refs/heads/master | 2020-08-24T07:47:17.347343 | 2019-10-22T10:34:56 | 2019-10-22T10:34:56 | 216,787,720 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 234 | r | aLvZUWYvpFt86.R | with(a86a739866fef48508e1c741e5aa12857, {ROOT <- 'D:/xampp/htdocs/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/School Management__02a1ac40-a208-4774-ad41-b2882fed4529/version/3def5bca-443c-4104-943e-e6412c443d5e';rm(list=ls())}); |
47b332c5215368a1ae4206fc0ed585c48cc3383b | 4bd7b025698ca334c8cbb1e9af78f3cec0c5580b | /intror/hw2/complete.R | 173d27ffaee1ff22f275d105bb6deaef94e53ac6 | [] | no_license | adithep/datasciencecoursera | 6eded7bf28272bf5b4fb5b633cad890cf43d0ec5 | a3dfa78cae0d0c5d0e164953af7fc81109c174da | refs/heads/master | 2021-01-01T19:51:31.727759 | 2014-10-27T11:11:20 | 2014-10-27T11:11:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 451 | r | complete.R | complete <- function(directory, id = 1:332) {
if (missing(directory)) {
stop("need directory!!")
}
if (missing(id)) {
stop("need id!!")
}
kd<-do.call(rbind, lapply(id,function(fn) {
filenam<-sprintf(fn, fmt="%03d.csv")
filename<-paste(directory, filenam, collapse = NULL, sep="/")
m<-read.csv(file=filename,header=T)
c(fn, nrow(m[complete.cases(m),]))
}))
colnames(kd) <- c("id","nobs")
data.frame(kd)
}
|
a25ade6b449747805beae89e6fd26997c1d0f840 | e8aa3475ff59827015f6e319e07452e26bc7ebfc | /extras.dse4KSS/check.dse2.R | ee9ba1ecad482d2ce3d714ba018e8592e5f952dc | [] | no_license | zejiang-unsw/esd_Rshiny | 76bc8c387f233ff6bf4f4dfeb9fda02ba7b98924 | 65ad96bbd4ce4b03f5d5b29255f12fb033e525f5 | refs/heads/master | 2020-12-22T19:45:20.233992 | 2019-02-08T13:08:52 | 2019-02-08T13:08:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 649 | r | check.dse2.R | ## Test the downscaling for wet-day frequency
library(esd)
param <- 't2m'
i <- 1
ip <- 1
files <- list.files(pattern='dse.kss',path='~/R/Rshiny/dse4KSS/data',full.names=TRUE)
files <- files[grep(param,files)]; files <- files[grep('eof',files)];
print(files[i])
load(files[i])
pca <- zoo(Z$pca[,ip])
print(Z$info); print(attr(Z,'predictor_file')); print(attr(Z,'predictor_lon')); print(attr(Z,'predictor_lat'))
Z$info <- NULL; Z$pca <- NULL; Z$eof <- NULL
x <- unlist(lapply(Z,function(x) coredata(x)[,ip]))
dim(x) <- c(length(Z[[1]][,1]),length(Z))
x <- zoo(x,order.by=index(Z[[1]]))
plot(x,plot.type='single',col=rgb(1,0.3,0.3,0.1))
lines(pca)
|
1a822022f2101eeb03a1af8bf13e76291f2ec60f | 27881be50d11de35841e83342a5df3ee355c17fc | /R/fit.gam.R | afb9c7424f20c658547cee267173a04b68697ee9 | [] | no_license | cran/robustgam | 6c6b28e19020725f7b18980656d9ea2b0b8c5b2b | d7237e085bfedf13586a8a3681729f071f003dd7 | refs/heads/master | 2016-09-06T18:19:32.482474 | 2013-05-07T00:00:00 | 2013-05-07T00:00:00 | 17,699,303 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 117 | r | fit.gam.R | fit.gam.sp1 <- function (Ry, RB, RrS, Rfamily)
.Call("fit_gam_sp_cpp", Ry, RB, RrS, Rfamily, PACKAGE = "robustgam")
|
8b7d358d0d0a0bb614a455154c1fbf3f8eb22b73 | 2193494cca453ee22ef9e5e42f87bcd2c28e435b | /CRISPRi/Guide_design/Step2_Get_All_potential_CRISPR_guides.R | 20e1c5fd07be985898fe2d99454969392ed8e995 | [
"MIT"
] | permissive | Christopher-Todd/Todd-eLife-2019 | 63f527dbfc44e4b19d7cc83c37e196be7927f92a | b928214cd4862d11a013c6ea32cebff3198060fd | refs/heads/master | 2020-05-18T16:55:15.591713 | 2019-05-31T07:16:52 | 2019-05-31T07:16:52 | 184,538,582 | 5 | 2 | null | 2019-05-23T12:45:08 | 2019-05-02T07:25:22 | R | UTF-8 | R | false | false | 2,426 | r | Step2_Get_All_potential_CRISPR_guides.R | #### User Inputs ####
#Set working directory
setwd("~/Guide_design")
#path to input fasta file
repeat.fa.file="~/Guide_design/RLTR13D5.fa"
#set output file name
output.name = "all_guide_hit_mat.txt"
#### Script ####
##getting and formating guides from fasta file
fasta = scan(repeat.fa.file, sep='\n', character())
newseq = grep('>',fasta)
seq = character(length(newseq))
for (i in 1:length(newseq)) {
first = newseq[i]+1
if (i==length(newseq)) {
last = length(fasta)
} else {
last = newseq[i+1]-1
}
seq[i] = toupper(paste(fasta[first:last],collapse=''))
}
names(seq) = unlist(lapply(strsplit(fasta[newseq],'[> ]'),function(x) x[2]))
##reverse compliment sequence function
rev.comp<-function(dna){
#reverse compliment function
seq<-strsplit(dna,split="")[[1]]
seq.rev<-rev(seq)
seq.rev<-paste(seq.rev,collapse = "")
sub<-gsub("C","g",seq.rev)
sub<-gsub("G","c",sub)
sub<-gsub("A","t",sub)
sub<-gsub("T","a",sub)
revcom<-toupper(sub)
return(revcom)}
#getting all the possible guide options
#loop through each fasta seq
seq.guides=list()
for(n in 1:length(seq)){
rep.seq=seq[n]
#get forward seqs
for.rep.seq=unlist(gregexpr("GG",rep.seq))
n.guides=length(for.rep.seq)
for.seq.guides=c()
for(i in 1:n.guides){
pam.pos=for.rep.seq[i]
for.seq.guides[i]=substr(rep.seq,pam.pos-21,pam.pos-2)
}
#cut to only full length guides
for.seq.guides=for.seq.guides[nchar(for.seq.guides)==20]
#get reverse seqs
rev.rep=rev.comp(rep.seq)
rev.rep.seq=unlist(gregexpr("GG",rev.rep))
n.guides=length(rev.rep.seq)
rev.seq.guides=c()
for(i in 1:n.guides){
pam.pos=rev.rep.seq[i]
rev.seq.guides[i]=substr(rev.rep,pam.pos-21,pam.pos-2)
}
#cut to only full length guides
rev.seq.guides=rev.seq.guides[nchar(rev.seq.guides)==20]
seq.guides[[n]]=unique(append(for.seq.guides,rev.seq.guides))
}
rm(for.rep.seq,for.seq.guides,i,last,n,first,n.guides,newseq,pam.pos,rep.seq,rev.rep,rev.seq.guides,rev.rep.seq)
all.guide.options=unique(unlist(seq.guides))
names(seq.guides)=names(seq)
guide.present=list()
for(i in 1:length(all.guide.options)){
guide.present[[i]]=grepl(all.guide.options[i],seq.guides)
print(paste(" ",i,"/",length(all.guide.options)," ",i/length(all.guide.options)*100,"%"," ",sep=" "))
}
mat=do.call(rbind,guide.present)
colnames(mat)=names(seq)
rownames(mat)=all.guide.options
write.table(mat,output.name,sep="\t",col.names = T,row.names = T,quote = F)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.