blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4ca1a643c48fdab2b3afdc39ab35dcfc46cbd3cb | 570d4141186786df5179cc4346dd3808c1c41f26 | /qc/06-protcoding2.R | 063d8b644c67973c8668dbe8e130da359618f599 | [
"MIT"
] | permissive | ArtemSokolov/amp-ad | 552fee92c0ec30539386745210f5ed2292931144 | dd5038f2497698b56a09471c89bb710329d3ef42 | refs/heads/master | 2021-06-21T21:04:44.368314 | 2019-09-10T17:40:48 | 2019-09-10T17:40:48 | 114,150,614 | 0 | 4 | MIT | 2019-09-10T17:40:49 | 2017-12-13T17:39:02 | HTML | UTF-8 | R | false | false | 2,583 | r | 06-protcoding2.R | ## Another pass at the analysis of protein coding regions
##
## by Artem Sokolov
library( tidyverse )
library( synapseClient )
synapseLogin()
## Retrieves a file from synapse to local disk and returns its local path
syn <- function( id, dlc = "~/data/AMP-AD/QC/06" )
{ synGet( id, downloadLocation = dlc )@filePath }
## Custom ggplot theme that boldifies text elements
bold_theme <- function()
{
etxt <- function(s, ...) {element_text( size = s, face = "bold", ... )}
theme_bw() + theme( axis.text = etxt(12), axis.title = etxt(14),
legend.text = etxt(12), legend.title = etxt(14),
strip.text = etxt(12) )
}
## Identifies the IDs of all relevant
idsBK <- function()
{
## Structure composes through dput() of the following
## source( "../R/resmine.R" )
## X1 <- allSettings( "rosmap" ) %>% filter( Method == "sklLR" )
## X2 <- allSettings( "syn15660477" ) %>% filter( Method == "sklLR" )
## XX <- bind_rows( X1, X2 ) %>% select( -Region, -Strategy, -Method ) %>%
## mutate( Files = map( settings_md5, synBySettingsMd5 ) ) %>%
## mutate( BKid = map_chr(Files, ~filter(.x, name=="background_auc.csv")$id) ) %>%
## select( -Files, -settings_md5 )
structure(list(Dataset = c("ROSMAPpc", "ROSMAPpc", "ROSMAPpc", "ROSMAP", "ROSMAP", "ROSMAP"),
Task = c("AB", "AC", "BC", "AB", "AC", "BC"),
BKid = c("syn15589822", "syn15589816", "syn15589810",
"syn15661345", "syn15661346", "syn15661344")),
class = c("tbl_df", "tbl", "data.frame"),
row.names = c(NA, -6L), .Names = c("Dataset", "Task", "BKid"))
}
mainPlot <- function()
{
## Load all the relevant entities
X <- idsBK() %>% mutate( AUCs = map(BKid, ~read_csv(syn(.x), col_types=cols())) )
## Reshape everything into a single data frame
XX <- X %>% mutate( AUCs = map( AUCs, gather, Size, AUC ) ) %>% unnest %>%
mutate_at( "Size", as.integer ) %>% select( -BKid )
## Tweak the names by hand
RR <- XX %>% mutate( `Gene Set` = c("ROSMAP" = "28.4k", "ROSMAPpc" = "ProtCod")[Dataset] )
## Compute summary distributions at key set sizes
SS <- RR %>% filter( Size %in% c( 100, 300, 500, 700, 900 ) )
## Plot the results
ggplot( RR, aes( x=Size, y=AUC, color=`Gene Set`) ) + theme_bw() +
geom_boxplot( aes(group=interaction(Size, `Gene Set`)), data=SS ) +
geom_smooth(se = FALSE) + facet_wrap( ~Task ) + bold_theme() +
scale_color_manual( values=c("28.4k"="tomato", "ProtCod"="steelblue") )
}
|
53dc27bdd005c933cd56cc7243f49c885bbef89b | 0b12fa717362a0b5d2810a55580e1b4563415ae6 | /modules/aml_sidebar_module.R | f044cce2269fb7ca1c85678c69474556e5afaca0 | [] | no_license | GeorgeOduor/amltools | 7ebe158ed5ba934d65d401b0f279b40092a79309 | 0724778fe57a8cb31da8fbb916282e53a2edf853 | refs/heads/master | 2022-11-13T07:18:56.294413 | 2020-07-05T10:18:33 | 2020-07-05T10:18:33 | 276,906,767 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 515 | r | aml_sidebar_module.R | aml_sidebar_UI <- function(id) {
ns <- NS(id)
tagList(
uiOutput(ns("sidebar_aml"))
)
}
aml_sidebar <- function(input, output, session) {
ns = session$ns
output$sidebar_aml <- renderUI({
sidebarMenu(id = "mainsidebar",
hr(),
menuItem("Home",icon = icon("home"),tabName = "hometab",selected = T),
menuItem(text = "Loan Limits",icon = icon("money"),
href = "http://127.0.0.1:2022",startExpanded = T)
)
})
}
|
ea3088c7466cc5823be4a55c6d8f43fb55a204f7 | 7a0e2bd98da6be2d7aa5005f21872d7c88a4d544 | /man/bakedpi.Rd | 12b1e0b3f23f210a1780672648d48488f2339a4b | [] | no_license | hansenlab/yamss | 1d4f337c8a96046c9bc7f73e9aef73a501f22460 | be8606da930bd60fab2c09aba4123b3d3068f094 | refs/heads/master | 2022-03-29T16:32:25.181063 | 2022-02-03T16:49:01 | 2022-02-03T16:49:01 | 69,065,057 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,661 | rd | bakedpi.Rd | \name{bakedpi}
\alias{bakedpi}
\title{Process raw data to compute density estimate.}
\usage{
bakedpi(cmsRaw, dbandwidth = c(0.005, 10), dgridstep = c(0.005, 1),
outfileDens = NULL, dortalign = FALSE, mzsubset = NULL, verbose = TRUE)
}
\arguments{
\item{cmsRaw}{An object of class \code{CMSraw}.}
\item{dbandwidth}{A length-2 vector indicating the kernel density bandwidth
in the M/Z and retention time (scan) directions. Default: \code{c(0.005,10)}}
\item{dgridstep}{A length-2 vector indicating the grid step sizes. Default:
\code{c(0.005,1)}.}
\item{outfileDens}{Name of a file to save density estimate. If NULL,
no output is saved.}
\item{dortalign}{A logical value. Should retention time correction be
performed?}
\item{mzsubset}{A length-2 vector indicating a subset of the M/Z range to
process. \code{NULL} otherwise.}
\item{verbose}{Should the function be verbose?}
}
\value{
An object of class \code{CMSproc} containing background corrected intensities,
the bivariate kernel density estimate, and quantiles of the nonzero values in
the density estimate.
}
\description{
The \code{bakedpi} method stands for bivariate approximate kernel density
estimation for peak identification. It performs background correction,
retention time correction, and bivariate kernel density estimation.
}
\details{
\code{bakedpi} first performs region-specific background correction. An
optional retention time correction step follows in which M/Z region-specific
shifts are computed to align the raw data. Next the two-dimensional density
estimate is computed. The purpose of this function is to take the raw data
read in by \code{readMSdata} and perform the steps necessary for bivariate
kernel density estimation. The output of this function is used by
\code{slicepi} to detect peaks and provide peak quantifications.
}
\examples{
## A very small dataset
data(cmsRawExample)
cmsProc1 <- bakedpi(cmsRawExample,
dbandwidth = c(0.01, 10), dgridstep = c(0.01, 1),
dortalign = TRUE, mzsubset = c(500,510))
## A longer example which takes a few minutes to run.
## This is still a smaller mz-slice of the full data.
\dontrun{
if (require(mtbls2)) {
data(mtbls2)
filepath <- file.path(find.package("mtbls2"), "mzML")
files <- list.files(filepath, pattern = "MSpos-Ex1", recursive = TRUE, full.names = TRUE)
colData <- DataFrame(sampClasses = rep(c("wild-type", "mutant"), each = 4))
cmsRaw <- readMSdata(files = files, colData = colData, verbose = TRUE)
cmsProc2 <- bakedpi(cmsRaw, dbandwidth = c(0.01, 10), dgridstep = c(0.01, 1),
outfileDens = NULL, dortalign = TRUE, mzsubset = c(500, 520))
}
}
} |
34131cecc792930ca51316cd28e4f075d2e6a092 | b8d522f0bca0b379e982c5b094243f8a75979472 | /R/repMask.R | 2e6394c3ac31d4af6d8e77eb20004de4cfa8afad | [] | no_license | aryeelab/scmeth | 45d2dc4ce77b2e42c67737a1eeffa8a4570ceab3 | b7b86da69dcfb13a14b8ab5099649c2d41469d55 | refs/heads/master | 2021-03-24T09:43:58.358028 | 2019-06-10T21:12:11 | 2019-06-10T21:12:11 | 75,974,085 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,009 | r | repMask.R | #'Provides Coverage metrics in the repeat masker region
#'@param bs bsseq object
#'@param organism scientific name of the organism of interest,
#'e.g. Mmusculus or Hsapiens
#'@param genome reference alignment, i.e. mm10 or hg38
#'@return Data frame with sample name and coverage in repeat masker regions
#'@examples
#'library(BSgenome.Mmusculus.UCSC.mm10)
#'library(AnnotationHub)
#'load(system.file("extdata", 'bsObject.rda', package='scmeth'))
#'repMask(bs, Mmusculus, 'mm10')
#'@importFrom DelayedArray colSums
#'@importFrom bsseq getCoverage
#'@export
repMask <- function(bs, organism, genome){
GenomeInfoDb::seqlevelsStyle(bs) <- "UCSC"
hub <- AnnotationHub::AnnotationHub()
repeatGr <- hub[[names(AnnotationHub::query(hub,
c("rmsk", GenomeInfoDb::organism(organism), genome)))]]
rep <- GenomicRanges::countOverlaps(bs, repeatGr)>0
cov <- bsseq::getCoverage(bs)
covDf <- data.frame(coveredCpgs = DelayedArray::colSums(cov[!rep,]>=1))
return(covDf)
}
|
bfcf2f8c77b9e876c1734328f1ddcadfed62cd3e | c2f9dc47aebb1b4a79acf054f158ba1fa7a4b89f | /Plugin.r | 9b475ccdb4ce1fa076832611fe0f5d9399e2b7e6 | [] | no_license | blackberry26/SMPR | b7c003ad8fe6ca0c7869c692e52683f25084b37b | 3336fce79a4f4d3b07c35926965438c43c002b16 | refs/heads/master | 2020-03-29T22:48:22.852797 | 2019-01-25T21:14:06 | 2019-01-25T21:14:06 | 150,441,215 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,849 | r | Plugin.r | estimateMu <- function(objects)
{
rows <- dim(objects)[1]
cols <- dim(objects)[2]
mu <- matrix(NA, 1, cols)
for (col in 1:cols)
{
mu[1, col] = mean(objects[,col])
}
return(mu)
}
## Восст-е ковариационной матрицы нормального распред-я
estimateCovarianceMatrix <- function(objects, mu)
{
rows <- dim(objects)[1]
cols <- dim(objects)[2]
sigma <- matrix(0, cols, cols)
for (i in 1:rows)
{
sigma <- sigma + (t(objects[i,] - mu) %*%
(objects[i,] - mu)) / (rows - 1)
}
return (sigma)
}
## Получ-е коэфф.
getPlugInDiskriminantCoeffs <- function(mu1, sigma1, mu2,
sigma2)
{
## Line equation: a*x1^2 + b*x1*x2 + c*x2 + d*x1 + e*x2
invSigma1 <- solve(sigma1)
invSigma2 <- solve(sigma2)
f <- log(abs(det(sigma1))) - log(abs(det(sigma2))) +
mu1 %*% invSigma1 %*% t(mu1) - mu2 %*% invSigma2 %*%
t(mu2);
alpha <- invSigma1 - invSigma2
a <- alpha[1, 1]
b <- 2 * alpha[1, 2]
c <- alpha[2, 2]
beta <- invSigma1 %*% t(mu1) - invSigma2 %*% t(mu2)
d <- -2 * beta[1, 1]
e <- -2 * beta[2, 1]
return (c("x^2" = a, "xy" = b, "y^2" = c, "x" = d, "y"
= e, "1" = f))
}
## Кол-во объектов в каждом классе
ObjectsCountOfEachClass <- 100
## библиот. для генерации многомерного норм. распред-я
library(MASS)
## Генерируем тестовые данные
Sigma1 <- matrix(c(10, 0, 0, 1), 2, 2)
Sigma2 <- matrix(c(8, 0, 0, 5), 2, 2)
Mu1 <- c(11, 3)
Mu2 <- c(3, 8)
xy1 <- mvrnorm(n=ObjectsCountOfEachClass, Mu1, Sigma1)
xy2 <- mvrnorm(n=ObjectsCountOfEachClass, Mu2, Sigma2)
## Собираем два класса в одну выборку
xl <- rbind(cbind(xy1, 1), cbind(xy2, 2))
## Рисуем обучающую выборку
colors <- c("red","blue","green")
plot(xl[,1], xl[,2], pch = 21, bg = colors[xl[,3]], asp = 1)
## Оценивание
objectsOfFirstClass <- xl[xl[,3] == 1, 1:2]
objectsOfSecondClass <- xl[xl[,3] == 2, 1:2]
mu1 <- estimateMu(objectsOfFirstClass)
mu2 <- estimateMu(objectsOfSecondClass)
sigma1 <- estimateCovarianceMatrix(objectsOfFirstClass,
mu1)
sigma2 <- estimateCovarianceMatrix(objectsOfSecondClass,
mu2)
coeffs <- getPlugInDiskriminantCoeffs(mu1, sigma1, mu2,
sigma2)
## Рисуем дискриминантую функцию –зеленая линия
x <- y <- seq(-10, 20, len=100)
z <- outer(x, y, function(x, y) coeffs["x^2"]*x^2 +
coeffs["xy"]*x*y
+ coeffs["y^2"]*y^2 + coeffs["x"]*x
+ coeffs["y"]*y + coeffs["1"])
contour(x, y, z, levels=0, drawlabels=FALSE, lwd = 3, col =
"green", add = TRUE)
|
c2778c623f14833f6f81865b17821a163a9b9a8c | f9fb8361c3ab28ba67f6345978ef01ac5a997599 | /Class_Practice/NetworkAnalysis/germany_passing.R | 6d0a7b7aaa57bbae49e4142ae6c7d6bafd9f6876 | [] | no_license | sahNarek/CSE_270_Practice | cb14736d0253cf578154ef0b407502235f109a3e | 9347947eae4d532f6cd32dd3482c0b89e7761394 | refs/heads/master | 2020-07-29T15:02:38.199858 | 2019-12-10T18:27:56 | 2019-12-10T18:27:56 | 209,854,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,079 | r | germany_passing.R | library(SportsAnalytics270)
library(igraph)
library(network)
library(intergraph)
library(circlize)
data("wc_14_final")
wc_14_final$germany_passing
i_ger <- igraph::graph.adjacency(as.matrix(wc_14_final$germany_passing),
mode = "directed", weighted = T, diag = F)
net_ger <- intergraph::asNetwork(i_ger)
network::set.vertex.attribute(net_ger, "player.name",
wc_14_final$germany_team$Player_name)
get.vertex.attribute(net_ger, "player.name")
network::set.vertex.attribute(net_ger, "player.position",
as.character(wc_14_final$germany_team$Position))
get.vertex.attribute(net_ger, "player.position")
network::set.vertex.attribute(net_ger, "passes.completed",
wc_14_final$germany_team$Pass_completed)
wc_14_final$germany_team$Completion <- wc_14_final$germany_team$Pass_completed /
wc_14_final$germany_team$Pass_attempted
network::set.vertex.attribute(net_ger, "completion",
wc_14_final$germany_team$Completion)
net_st <- get.inducedSubgraph(net_ger, v = 1:11)
names <- get.vertex.attribute(net_st, "player.name")
coords <- plot(net_st, suppress.axes = F)
ger_team <- wc_14_final$germany_team
ger_team[8,]
coords[1,] <- c(-6, 1.5)
coords[2,] <- c(-4, 3)
coords[8,] <- c(-4, 0)
coords[10,] <- c(-5, 2)
coords[3,] <- c(-5, 1)
coords[4,] <- c(-3, 2)
coords[11,] <- c(-3, 1)
coords[5,] <- c(-2, 3)
coords[7,] <- c(-2, 0)
coords[9,] <- c(-2, 1.5)
coords[6,] <- c(-1, 1.5)
plot(net_st, coord = coords, suppress.axes = F, displaylabels = T,
label = net_st %v% "player.name")
plot(net_st, coord = coords, edge.lwd ="weight",
vertex.col = "player.position")
plot(net_st, coord = coords, vertex.col = "player.position",
vertex.cex = 2*(net_st %v% "completion"),
label = net_st %v% "player.name")
st_mat <- as.matrix(net_st, matrix.type = "adjacency", attrname = "weight")
colnames(st_mat) <- net_st %v% "player.name"
rownames(st_mat) <- colnames(st_mat)
chordDiagram(st_mat, directional = T)
|
104fb76ae82c3fb81fe1fca609783dda58ff686d | da2c93c12cebc4205cf617eb791d9eaca9fb02a3 | /man/icbiplot.Rd | 775043b67a00d9061f3296a08eeab4ef42dfd76c | [] | no_license | cran/icensBKL | 7b30793201fc0d18cc45efefaa60a436e4dd6cc6 | a767317fd024ec338ba1b770734c17f55739a674 | refs/heads/master | 2022-09-23T22:31:46.955546 | 2022-09-19T13:26:13 | 2022-09-19T13:26:13 | 110,893,823 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,261 | rd | icbiplot.Rd | \name{icbiplot}
\alias{icbiplot}
\title{
Interval-censored biplot
}
\description{
Principal Component Analysis for interval-censored data as described
in Cecere, Groenen and Lesaffre (2013).
}
\usage{
icbiplot(L, R, p = 2, MaxIter = 10000, tol = 1e-06, plotit = TRUE, seed = NULL, \dots)
}
\arguments{
\item{L}{
Matrix of dimension number of individuals/samples by number of variables with left endpoints of observed intervals.
}
\item{R}{
Matrix of dimension number of individuals/samples by number of variables with right endpoints of observed intervals.
}
\item{p}{
Dimension of the solution. Default value is \eqn{p = 2}.
}
\item{MaxIter}{
Maximum number of iterations in the iterative minimazation algorithm
}
\item{tol}{
Tolerance when convergence is declared
}
\item{plotit}{
Logical value. Default equals TRUE. A biplot in dimension 2 is plotted.
}
\item{seed}{
The seed for the random number generator. If NULL, current R system seed is used.
}
\item{\dots}{
further arguments to be passed.
}
}
\value{
Returns a list with the following components
\item{X}{matrix of number of individuals times 2 (p) with coordinates representing the individuals}
\item{Y}{matrix of number of variables times 2 (p) with coordinates representing the variables}
\item{H}{matrix of number of individuals times number of variables with approximated events}
\item{DAF}{Disperssion accounted for (DAF) index}
\item{FpV}{matrix showing the fit per variable}
\item{iter}{number of iterations performed}
}
\references{
Cecere, S., Groenen, P. J. F., and Lesaffre, E. (2013).
The interval-censored biplot.
\emph{Journal of Computational and Graphical Statistics},
\bold{22}(1), 123-134.
}
\author{
Silvia Cecere, port into icensBKL by Arnošt Komárek \email{arnost.komarek@mff.cuni.cz}
}
\examples{
data("tandmob", package = "icensBKL")
Boys <- subset(tandmob, fGENDER=="boy")
L <- cbind(Boys$L14, Boys$L24, Boys$L34, Boys$L44)
R <- cbind(Boys$R14, Boys$R24, Boys$R34, Boys$R44)
L[is.na(L)] <- 0
R[is.na(R)] <- 20 ## 20 = infinity in this case
icb <- icbiplot(L, R, p = 2, MaxIter = 10000, tol = 1e-6,
plotit = TRUE, seed = 12345)
}
\keyword{survival}
\keyword{dplot}
|
edb7e3d5a47faa2ce265d0ef0b4c1cf3725983fa | c8b2e7eb5986165270a6c9c2e7d593597006c521 | /engagement_topics.R | d66b13ab74371dbd5d9020da46af77618002a301 | [] | no_license | milkha/FBElec16 | 094708f199a1237b7a2183dea261edf88844db10 | bf613bdffd6006f94fa06253e4bef2d8388f7bf5 | refs/heads/master | 2020-05-27T22:22:28.468637 | 2017-03-01T20:43:54 | 2017-03-01T20:43:54 | 83,597,089 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,752 | r | engagement_topics.R | library(tm)
library(topicmodels)
dtm <- readRDS("dtm.rds")
lda.clinton <- readRDS("c10.rds")
clinton.topics <- as.matrix(topics(lda.clinton))
clinton.terms <- as.matrix(terms(lda.clinton,20))
idx<-unlist(lapply(which(grepl('clinton', dtm$dimnames$Terms)), function(x) which(dtm$j %in% x)))
cidx <- sort(unique(dtm$i[idx]))
fb_data <- readRDS('fb_data.rds')
posts<-fb_data[[1]]
engagement <- (unlist(fb_data[[3]]))
p_id <- array(NA, length(engagement))
p_id[1:length(posts[[1]])] <- 1
last_id <- length(posts[[1]])
for (i in 2:10) {
p_id[(last_id+1):(last_id+length(posts[[i]]))] <- i;
last_id <- last_id+length(posts[[i]]);
}
engagement <- engagement[cidx];
nposts <- length(engagement)
npages <- 10;
ntopics <- 10;
p_id <- p_id[cidx];
t_id <- array(0, c(nposts, ntopics))
for (i in 1:nposts)
t_id[i, clinton.topics[i]] <- 1;
for (i in 1:npages)
engagement[p_id==i] = log(engagement[p_id==i]) - mean(log(engagement[p_id==i]))
library(rstan)
rstan_options(auto_write = TRUE);
options(mc.cores = parallel::detectCores());
sm <- stan_model("engagement_topics.stan");
fit_clinton <- sampling(sm,iter=500)
saveRDS(fit_clinton,"fit_clinton.rds")
### PLOTS
credplot.gg <- function(d){
# d is a data frame with 4 columns
# d$x gives variable names
# d$y gives center point
# d$ylo gives lower limits
# d$yhi gives upper limits
require(ggplot2)
p <- ggplot(d, aes(x=x))+
geom_pointrange(aes(y=y, ymin=ylo, ymax=yhi), fatten = 1, size = 1, color='darkblue')+
geom_hline(yintercept = 0, linetype=2)+
coord_flip()+
xlab('')+
ylab(d$source) +
theme_set(theme_gray(base_size = 15))
return(p)
}
source("multiplot.R")
sf <- summary(fit_clinton)$summary
topic_coef<-c();
for (i in 1:10)
topic_coef[[i]]<-sf[sprintf("t_coef[%d,%d]", i,1:10),c(5,6,7)]
p_names <- c("ABC", "BBC","CBS", "CNN", "Fox News", "NBC",
"NPR", "New York Times", "Washington Post", "Wall Street Journal")
png ("clinton_topics.png", height=10, width=7, units = 'in', res = 200)
plots<-c()
for (i in 1:10) {
df_plot<-data.frame(list(ylo=topic_coef[[i]][,1], yhi=topic_coef[[i]][,3], y=topic_coef[[i]][,2],
x=sprintf("topic%2d",1:10), source=p_names[i]))
plots[[i]]<-credplot.gg(df_plot)
}
multiplot(plotlist = plots, cols = 2)
invisible(dev.off())
#### trump
library(tm)
library(topicmodels)
dtm <- readRDS("dtm.rds")
lda.trump <- readRDS("t10.rds")
trump.topics <- as.matrix(topics(lda.trump))
trump.terms <- as.matrix(terms(lda.trump,20))
idx<-unlist(lapply(which(grepl('trump', dtm$dimnames$Terms)), function(x) which(dtm$j %in% x)))
tidx <- sort(unique(dtm$i[idx]))
fb_data <- readRDS('fb_data.rds')
posts<-fb_data[[1]]
engagement <- log(unlist(fb_data[[3]]))
p_id <- array(NA, length(engagement))
p_id[1:length(posts[[1]])] <- 1
last_id <- length(posts[[1]])
for (i in 2:10) {
p_id[(last_id+1):(last_id+length(posts[[i]]))] <- i;
last_id <- last_id+length(posts[[i]]);
}
engagement <- engagement[tidx];
npages <- 10;
ntopics <- 10;
p_id <- p_id[tidx];
dtm.trump <- dtm[ sort(unique(dtm$i[idx])),]
dtm.trump <- removeSparseTerms(dtm.trump,0.995)
ui = unique(dtm.trump$i)
engagement<-engagement[ui]
p_id <- p_id[ui]
nposts <- length(engagement)
t_id <- array(0, c(nposts, ntopics))
for (i in 1:nposts)
t_id[i, trump.topics[i]] <- 1;
for (i in 1:npages)
engagement[p_id==i] = log(engagement[p_id==i]) - mean(log(engagement[p_id==i]))
library(rstan)
rstan_options(auto_write = TRUE);
options(mc.cores = parallel::detectCores());
sm <- stan_model("engagement_topics.stan");
fit_trump <- sampling(sm,iter=500)
saveRDS(fit_trump,"fit_trump.rds")
### PLOTS
credplot.gg <- function(d){
# d is a data frame with 4 columns
# d$x gives variable names
# d$y gives center point
# d$ylo gives lower limits
# d$yhi gives upper limits
require(ggplot2)
p <- ggplot(d, aes(x=x))+
geom_pointrange(aes(y=y, ymin=ylo, ymax=yhi), fatten = 1, size = 1, color='darkred')+
geom_hline(yintercept = mean(d$y), linetype=2)+
coord_flip()+
ylab(d$source) +
xlab('')+
theme_set(theme_gray(base_size = 15))
return(p)
}
sf <- summary(fit_trump)$summary
topic_coef<-c();
for (i in 1:10)
topic_coef[[i]]<-sf[sprintf("t_coef[%d,%d]", i,1:10),c(5,6,7)]
p_names <- c("ABC", "BBC","CBS", "CNN", "Fox News", "NBC",
"NPR", "New York Times", "Washington Post", "Wall Street Journal")
png ("trump_topics.png", height=10, width=7, units = 'in', res = 200)
plots<-c()
for (i in 1:10) {
df_plot<-data.frame(list(ylo=topic_coef[[i]][,1], yhi=topic_coef[[i]][,3], y=topic_coef[[i]][,2],
x=sprintf("topic%2d",1:10), source=p_names[i]))
plots[[i]]<-credplot.gg(df_plot)
}
multiplot(plotlist = plots, cols = 2)
invisible(dev.off())
|
a2f4db64f860c1ff8cbbba494741f6051a5403c2 | 1eec56205889241fc9e47443f5534216acbac52c | /man/smargins_v2.Rd | 775e6ff31bf80d46533ff8cf88d8a1b5dd9df512 | [] | no_license | izahn/smargins | fac638a907e5ca75452bd399de27d007162ea68e | 949f8fc9e4729c19a22e2512c701fae2c422e69e | refs/heads/master | 2021-05-07T02:52:32.980547 | 2019-09-10T19:33:53 | 2019-09-10T19:33:53 | 110,725,408 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,201 | rd | smargins_v2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tmp.R
\name{smargins_v2}
\alias{smargins_v2}
\title{Temporary smargins variant}
\usage{
smargins_v2(model, vars, xvalues = NULL, n = 5000, coef.fun = coef,
sim.fun = I, linkinv.fun = family(model)$linkinv, vcov.fun = vcov,
model.frame.fun = model.frame, model.matrix.fun = function(x, data) {
model.matrix(formula(x), data = data) })
}
\arguments{
\item{model}{A fitted model object.}
\item{vars}{Character vector naming variables}
\item{xvalues}{Named list of values.}
\item{n}{Number of simulations to run.}
\item{coef.fun}{Function to use for computing coefficients.}
\item{sim.fun}{Function to use for adjusting simulations.}
\item{linkinv.fun}{Function to use for cmputing the inverse link.}
\item{vcov.fun}{Function to use for computing the variance-covariance matrix.}
\item{model.frame.fun}{Function to use for extracting the model.frame.}
\item{model.matrix.fun}{Function to use for extracting the model.matrix.}
}
\value{
A data.frame containing predictor variables values and
expected values of the dependant variable.
}
\description{
Temporary smargins variant
}
\author{
Ista Zahn
}
|
6ef53ae2317efeea8ab15ee71f2458abc641b3c8 | 96c4acaae35f9bed561260764bb86e5ed6a4662a | /TDS.ERPs/Participant Info.R | a157e5d295da4909ae705c87748669f9fbca24a9 | [] | no_license | SFord88/TDS-Package | 51624bf493944e0db23be4ae145aae3c03f69924 | 246a33760d076f56ae9f685e52ad3ea0e7d0ed79 | refs/heads/master | 2020-05-16T20:08:04.318938 | 2019-04-25T20:59:20 | 2019-04-25T20:59:20 | 183,265,388 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 406 | r | Participant Info.R | ## Participant info
## Install plyr package
library(plyr)
## Count number of participants
print(count(TDS[2:27,1]))
## Create gender variable
gender <- TDS[2:27,5]
## Count number of females and number of male participants
count(gender)
## Create age variable
age <- as.numeric(TDS[2:27, 6])
## Find mean age of participants
mean(age, na.rm=TRUE)
## Find age range
range(age, na.rm= TRUE)
|
aca6ab03f33185c2d7c638776a2c371c8da77f10 | 100ece4813a2fbb2c62d697def4c622f2a22b290 | /global.R | 0174770746cf6782f2deff319ef262adb4d2e915 | [] | no_license | bah-interactivemedia/Orioles-Hackathon | a3ae9cdb5a24465b080a51f9dc3c062b839377a9 | b7ad50e48f9d7339fc59190972ade1abea0b3fcd | refs/heads/master | 2016-08-12T01:19:30.009966 | 2016-03-02T16:39:17 | 2016-03-02T16:39:17 | 51,162,141 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,464 | r | global.R | library(dplyr)
library(tidyr)
library(shiny)
options(stringsAsFactors = F)
# setwd('C:/Users/B1GRU/Documents/Booz/Bahlmer/Flash Drive/Flash Drive/Data')
myplclust <- function(hclust, lab = hclust$labels, lab.col = rep(1, length(hclust$labels)),
hang = 0.1, ...) {
##
y <- rep(hclust$height, 2)
x <- as.numeric(hclust$merge)
y <- y[which(x < 0)]
x <- x[which(x < 0)]
x <- abs(x)
y <- y[order(x)]
x <- x[order(x)]
plot(hclust, labels = FALSE, hang = hang, ...)
text(x = x, y = y[hclust$order] - (max(hclust$height) * hang), labels = lab[hclust$order],
col = lab.col[hclust$order], srt = 90, adj = c(1, 0.5), xpd = NA, ...)
}
# pitch <- read.csv('Pitchfx.csv')
pitchType <- read.csv('sampleData.csv')
pitch_splits <- read.csv('Pitching_Splits.csv')
pitcher_id_name <- pitch_splits %>%
select(pitcher_id, Name) %>%
unique
fav_pitches <- read.csv('fav_pitches.csv')
# fav_pitches <- pitch %>%
# mutate(pitch_type = ifelse(pitch_type == '', 'blank', pitch_type)) %>%
# group_by(pitcher_id, pitch_type) %>%
# summarise(count = n()) %>%
# group_by(pitcher_id) %>%
# mutate(Total = sum(count)) %>%
# spread(pitch_type, count, fill = 0) %>%
# mutate_each(funs(. / Total), -c(pitcher_id, Total)) %>%
# left_join(pitcher_id_name, by = 'pitcher_id') %>%
# select(Name, everything())
#
# write.csv(fav_pitches, 'fav_pitches.csv', row.names = F)
|
83e17468954574c759ab14fd0058a0dbebe3e69b | fbfcb908f975799b43a64c51c9a380701626d488 | /man/autoSum.Rd | 8746f54534f9117bd533b8a908baebc0093c1786 | [] | no_license | BlasBenito/distantia | d29d099ae8740bfadf2a480f18ffdb4ffdbe5f41 | 0d4469f417a7e7970757ee7943ca0016c181f7ec | refs/heads/master | 2022-02-24T04:30:53.308716 | 2022-02-07T14:32:31 | 2022-02-07T14:32:31 | 187,805,264 | 7 | 4 | null | null | null | null | UTF-8 | R | false | true | 3,601 | rd | autoSum.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoSum.R
\name{autoSum}
\alias{autoSum}
\title{Computes sum of distances between consecutive samples in a multivariate time-series.}
\usage{
autoSum(
sequences = NULL,
least.cost.path = NULL,
time.column = NULL,
grouping.column = NULL,
exclude.columns = NULL,
method = "manhattan",
parallel.execution = TRUE
)
}
\arguments{
\item{sequences}{dataframe with one or several multivariate time-series identified by a grouping column.}
\item{least.cost.path}{a list usually resulting from either \code{\link{leastCostPath}} or \code{\link{leastCostPathNoBlocks}}.}
\item{time.column}{character string, name of the column with time/depth/rank data. The data in this column is not modified.}
\item{grouping.column}{character string, name of the column in \code{sequences} to be used to identify separates sequences within the file. This argument is ignored if \code{sequence.A} and \code{sequence.B} are provided.}
\item{exclude.columns}{character string or character vector with column names in \code{sequences}, or \code{squence.A} and \code{sequence.B} to be excluded from the analysis.}
\item{method}{character string naming a distance metric. Valid entries are: "manhattan", "euclidean", "chi", and "hellinger". Invalid entries will throw an error.}
\item{parallel.execution}{boolean, if \code{TRUE} (default), execution is parallelized, and serialized if \code{FALSE}.}
}
\value{
A list with slots named according \code{grouping.column} if there are several sequences in \code{sequences} or a number if there is only one sequence.
}
\description{
Computes the sum of distances between consecutive samples in a multivariate time-series. Required to compute the measure of dissimilarity \code{psi} (Birks and Gordon 1985). Distances can be computed through the methods "manhattan", "euclidean", "chi", and "hellinger", and are implemented in the function \code{\link{distance}}.
}
\details{
Distances are computed as:
\itemize{
\item \code{manhattan}: \code{d <- sum(abs(x - y))}
\item \code{euclidean}: \code{d <- sqrt(sum((x - y)^2))}
\item \code{chi}: \code{
xy <- x + y
y. <- y / sum(y)
x. <- x / sum(x)
d <- sqrt(sum(((x. - y.)^2) / (xy / sum(xy))))}
\item \code{hellinger}: \code{d <- sqrt(1/2 * sum(sqrt(x) - sqrt(y))^2)}
}
Note that zeroes are replaced by 0.00001 whem \code{method} equals "chi" or "hellinger".
}
\examples{
\donttest{
#loading data
data(sequenceA)
data(sequenceB)
#preparing datasets
AB.sequences <- prepareSequences(
sequence.A = sequenceA,
sequence.A.name = "A",
sequence.B = sequenceB,
sequence.B.name = "B",
merge.mode = "complete",
if.empty.cases = "zero",
transformation = "hellinger"
)
#computing distance matrix
AB.distance.matrix <- distanceMatrix(
sequences = AB.sequences,
grouping.column = "id",
method = "manhattan",
parallel.execution = FALSE
)
#computing least cost matrix
AB.least.cost.matrix <- leastCostMatrix(
distance.matrix = AB.distance.matrix,
diagonal = FALSE,
parallel.execution = FALSE
)
AB.least.cost.path <- leastCostPath(
distance.matrix = AB.distance.matrix,
least.cost.matrix = AB.least.cost.matrix,
parallel.execution = FALSE
)
#autosum
AB.autosum <- autoSum(
sequences = AB.sequences,
least.cost.path = AB.least.cost.path,
grouping.column = "id",
parallel.execution = FALSE
)
AB.autosum
}
}
\seealso{
\code{\link{distance}}
}
\author{
Blas Benito <blasbenito@gmail.com>
\itemize{
\item Birks, H.J.B. and Gordon, A.D. (1985) Numerical Methods in Quaternary Pollen Analysis. Academic Press.
}
}
|
40e708cd62425004e01b2b4555791395758d8038 | 4aab2fe7dc1cdbe82d37c0520f8a83cd8937bc53 | /RERConvergeScript.R | efdfe27c2fae59c303c931bab183c26126968148 | [] | no_license | Moreau-Lab/Genomics | 7e5c8a7273162dcfae21436fd3e50f4b967a711a | 4d1eab65c6f0fdbc9b3a0c72ab6b3dbcad4a3bf3 | refs/heads/master | 2020-09-11T14:16:14.195613 | 2020-07-10T15:19:32 | 2020-07-10T15:19:32 | 221,995,192 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,170 | r | RERConvergeScript.R | # Load packages:
library(tidyverse)
library(RERconverge)
library(ape)
# Make a phylo object of the master phylogeny:
AllSpecies <- read.tree(file = "KnownAntPhylogeny.txt")
# Read in the trees:
Test <- readTrees(file = "RERInput4610", masterTree = AllSpecies)
# Estimate relative evolutionary rates:
# Make a vector of species we want to use:
SpeciesToUse <- c("mpha", "aech", "acep", "cflo", "cobs", "hsal", "lhum", "pbar", "sinv", "cbir", "fexs", "veme", "waur", "pgra", "nful", "acol", "ccos","tcur", "dqua", "tcor", "tsep", "tzet")
RelativeEvolutionaryRates <- getAllResiduals(Test2, useSpecies = names(SpeciesToUse), transform = "sqrt", weighted = T, scale = T)
MammalRERs <- getAllResiduals(MammalToyTrees, transform = "sqrt", weighted = T, scale = T)
# You can save this output if desired by:
saveRDS(RelativeEvolutionaryRates, file = "RelativeEvolutionaryRates.rds")
# Visualizing relative evolutionary rates:
TestPlot <- treePlotRers(treesObj = Test2, rermat = RelativeEvolutionaryRates, index = "OG12878", type = "c", nlevels = 3)
# Read in phenotype csv:
AntPhenotypes <- read_csv("AntGenomesPhenotypes428.csv")
|
b82b0354284c371fa8ad4c43ec001e8b1b6f6c70 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/4772_1/rinput.R | 7b176b9e5cc5a70488e1ff752cd2e9f62255f434 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("4772_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4772_1_unrooted.txt") |
6e565ee1d8d6d0d317ec26c3ae3d4a0c662df136 | f9bc24751d593694fbc98648519df43c70d253ee | /inst/integrationTests/test_externalFileHandle.R | 447a347277a24b98e4e86b80f9103a9d10149bd5 | [] | no_license | brian-bot/rSynapseClient | cf607b242fa292902f832d6a5ecffceeba80eaef | cef1a6bb1f28034a9de826f3e92f1b1139e56c61 | refs/heads/master | 2020-04-05T22:52:30.912248 | 2017-04-28T17:45:58 | 2017-04-28T17:45:58 | 3,354,254 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,900 | r | test_externalFileHandle.R | # integration test for external file handle
#
# Author: bhoff
###############################################################################
.setUp <- function() {
## create a project to fill with entities
project <- createEntity(Project())
synapseClient:::.setCache("testProject", project)
}
.tearDown <- function() {
## delete the test projects
deleteEntity(synapseClient:::.getCache("testProject"))
synapseClient:::.unmockAll()
}
createFile<-function(content, filePath) {
if (missing(content)) content<-"this is a test"
if (missing(filePath)) filePath<- tempfile()
connection<-file(filePath)
writeChar(content, connection, eos=NULL)
close(connection)
filePath
}
integrationTestExternalLinkLocalFile<-function() {
project <- synapseClient:::.getCache("testProject")
pid<-propertyValue(project, "id")
# create a file to be uploaded
synapseStore<-FALSE
localfile<-createFile()
localfile<-normalizePath(localfile, winslash="/")
if (substr(localfile,1,2)=="C:") localfile=substr(localfile,3,nchar(localfile))
filePath<-paste0("file://", localfile)
file<-File(filePath, synapseStore, parentId=propertyValue(project, "id"))
# now store it
storedFile<-synStore(file)
# check that it worked
checkTrue(!is.null(storedFile))
id<-propertyValue(storedFile, "id")
checkTrue(!is.null(id))
checkEquals(propertyValue(project, "id"), propertyValue(storedFile, "parentId"))
checkEquals(filePath, getFileLocation(storedFile))
checkEquals(synapseStore, storedFile@synapseStore)
# now download it. This will pull a copy into the cache
downloadedFile<-synGet(id)
checkEquals(id, propertyValue(downloadedFile, "id"))
checkEquals(FALSE, downloadedFile@synapseStore)
fh<-downloadedFile@fileHandle
checkEquals(filePath, fh$externalURL)
checkEquals(file.info(localfile)$size, fh$contentSize)
checkEquals(tools::md5sum(path.expand(localfile))[[1]], fh$contentMd5)
}
|
6c57f30a91a205cb4f81a6a67aea6d7e718e0145 | d4790beac58cf10c5e0fc8a8a052ad0cba67049c | /R/data.R | 5c224194454e4dddc082b98f68d769b35417b926 | [] | no_license | Jas-debugging/diffusion | a09b850e3fbaa025f9a62cc93fdb5bfb801578e2 | f553058f01b97cf028c0ec86e2ef9ed13831f955 | refs/heads/master | 2023-07-06T14:16:41.068110 | 2021-08-12T13:57:57 | 2021-08-12T13:57:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,163 | r | data.R | #' A protein-protein physical interaction network (PPI network)
#'
#' An igraph object containing a protein-protein physical interaction network
#' (PPI network). The network is obtained as described in the article
#' cited in the source section. However, it was reduced in such a way
#' that only contains genes/proteins expressed in the adipose tissue.
#'
#' @format An igraph object containing 18062 binary interactions between 4317
#' proteins
#'
#' @docType data
#' @keywords datasets
#' @name PPI_Network
#' @usage data(PPI_Network)
#' @source Valdeolivas, A., Tichit, L., Navarro, C., Perrin, S., Odelin, G.,
#' Levy, N., … Baudot, A. (2017). Random Walk With Restart On Multiplex And
#' Heterogeneous Biological Networks. bioRxiv, 1–31.
#' https://doi.org/10.1101/134734
#' \url{https://www.biorxiv.org/content/early/2017/08/30/134734}
NULL
#' A pathway network (Pathway network)
#'
#' An igraph object containing a Pathway network.
#' The network is obtained as described in the article
#' cited in the source section. However, it was reduced in such a way
#' that only contains genes/proteins expressed in the adipose tissue.
#' @format An igraph object containing 62602 binary interactions between 3533
#' proteins
#'
#' @docType data
#' @keywords datasets
#' @name Pathway_Network
#' @usage data(Pathway_Network)
#' @source Valdeolivas, A., Tichit, L., Navarro, C., Perrin, S., Odelin, G.,
#' Levy, N., … Baudot, A. (2017). Random Walk With Restart On Multiplex And
#' Heterogeneous Biological Networks. bioRxiv, 1–31.
#' https://doi.org/10.1101/134734
#' \url{https://www.biorxiv.org/content/early/2017/08/30/134734}
NULL
#' A disease-disease similarity network.
#'
#' An igraph object containing a disease-disease similarity network.
#' The network is obtained as described in the article
#' cited in the source section.
#'
#' @format An igraph object containing 28246 binary relationships between 6947
#' diseases.
#'
#' @docType data
#' @keywords datasets
#' @name Disease_Network
#' @usage data(Disease_Network)
#' @source Valdeolivas, A., Tichit, L., Navarro, C., Perrin, S., Odelin, G.,
#' Levy, N., … Baudot, A. (2017). Random Walk With Restart On Multiplex And
#' Heterogeneous Biological Networks. bioRxiv, 1–31.
#' https://doi.org/10.1101/134734
#' \url{https://www.biorxiv.org/content/early/2017/08/30/134734}
NULL
#' Diseases and their causative genes
#'
#' A dataset containing some diseases and their causative genes.
#' The dataset is obtained as described in the article
#' cited in the source section.
#'
#' @format A data frame with 4496 rows and 2 variables:
#' \describe{
#' \item{hgnc_symbol}{Gene name, in HGNC format}
#' \item{mim_morbid}{Disease id, in mim code}
#' }
#'
#' @docType data
#' @keywords datasets
#' @name GeneDiseaseRelations
#' @usage data(GeneDiseaseRelations)
#' @source Valdeolivas, A., Tichit, L., Navarro, C., Perrin, S., Odelin, G.,
#' Levy, N., … Baudot, A. (2017). Random Walk With Restart On Multiplex And
#' Heterogeneous Biological Networks. bioRxiv, 1–31.
#' https://doi.org/10.1101/134734
#' \url{https://www.biorxiv.org/content/early/2017/08/30/134734}
NULL
|
40b22ccdc703fcfd510bec183601ae070a015657 | 2f9ff50ef8a7dc1b43fcc1fadd0d00a3b3cda611 | /R/BehavioralEconomicsR.R | 358f39780efd4c6bd4efa0ac34f014baf649ca82 | [
"MIT"
] | permissive | BESTDATASCIENCE/BehavioralEconomicsR | 7c08ba089ad6458d3c51ce6e43be142f6f13084d | 4099a5fc198544df20a0f5544e00e197c72f6a8b | refs/heads/master | 2020-03-22T23:15:56.183168 | 2018-10-12T01:45:36 | 2018-10-12T01:45:36 | 140,799,445 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 712 | r | BehavioralEconomicsR.R | #' Paquete de R disenado para trabajar de forma sencilla data de investigaciones de Economia Conductual. Creado por BEST
#'
#' @name BehavioralEconomicsR
#' @docType package
NULL
#' Paquete de R disenado para trabajar de forma sencilla data de investigaciones de Economia Conductual.
#'
#' Paquete de R disenado para trabajar de forma sencilla data de investigaciones de Economia Conductual.
#'
#' @docType data
#' @keywords datasets
#' @name ie_data
#' @usage data(ie_data)
#' @format Base de datos del libro Irrational Exuberance de Roberth Shiller, premio Nobel de Economia por su aportes en finanzas conductuales. Esta data es actualizada mensualmente por el propio autor y la Universidad de Yale.
#'
NULL
|
9d9d2e950e4e9f9df7a31ff4c6ab68f82532a704 | eb48ffe6a8e9f440ac991bcb4f7b6dd47f69d92c | /man/rm.na.Rd | e7781a3cce6a2de18736eed9d46f5ee76c0fcc6a | [] | no_license | xingyanwang-david/rareGWAMA | 5674ac285c2a0ef07df8c1363965e421033218b6 | 72e962dae19dc07251244f6c33275ada189c2126 | refs/heads/master | 2022-02-17T20:27:25.333156 | 2019-09-12T00:29:17 | 2019-09-12T00:29:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 217 | rd | rm.na.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare.input.R
\name{rm.na}
\alias{rm.na}
\title{Remove nas}
\usage{
rm.na(x)
}
\arguments{
\item{x}{input;}
}
\description{
Remove nas
}
|
8227774ed65cd2dbf32167df9ea6c96082d0746e | 6520167b619895d5a3f0c86d5119c7a4729cd52c | /ToolsComparison/Tools/Cibersort/CIBERSORT_Modified.R | e3d23af6d8caa6a7bf2cdd6d02117dc4e871c16c | [
"MIT"
] | permissive | frenkiboy/GEDIT | 8109b8f61fa1395dd2475f833004e475c545f4fd | 51fd5cb0cbcd7e6ebdf6b49c00d1ddd1e15d812d | refs/heads/master | 2023-02-23T17:38:25.404881 | 2021-01-25T00:23:56 | 2021-01-25T00:23:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 140 | r | CIBERSORT_Modified.R | CIBERSORT Code ommited from this directory for potential copyright reasons. See https://cibersort.stanford.edu/download.php to request code
|
1f3a5dd5b27b2989b236b98a73b2cd284903ea0e | 343c6301990352fe4ec142ffbb140864ea9a94b0 | /source/analyze_watson_data.R | bbf6f29d9faa8d1caeb6be8efeb47fb15d8ad20e | [] | no_license | dhudsmith/trolls | 9247b48fceac451b916f1e5dc39a0df7c68c6892 | f95d63118826ebbf3ce539cca394cbef6c51f0c3 | refs/heads/master | 2020-07-27T14:45:32.667518 | 2020-05-07T13:43:35 | 2020-05-07T13:43:35 | 209,128,754 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,872 | r | analyze_watson_data.R | library(dplyr)
library(ggplot2)
library(reshape2)
setwd('~/Code/trolls/')
# read in the data
df_ira <- read.csv('data/watson_ira_1000.csv')
df_control <- read.csv('data/watson_media_1000.csv')
# Clean and combine
df_ira_clean <-
df_ira %>%
select(sentiment_score, anger, disgust, fear, joy, sadness) %>%
mutate(
source = 'ira'
)
df_control_clean <-
df_control %>%
select(sentiment_score, anger, disgust, fear, joy, sadness) %>%
mutate(
source = 'control'
)
df_clean <- rbind(df_ira_clean, df_control_clean) %>% select(-sentiment_score)
# reshape the data for easier analysis
df_clean_long <-
df_clean %>%
melt(id.vars=c('source'))
# add a logit column for emotions variables
df_clean_long <-
df_clean_long %>%
mutate(
value_logit = gtools::logit(value),
value_logit = ifelse(value_logit < -100,-100, value_logit),
value_logit = ifelse(value_logit > 100,100, value_logit)
)
## make pictures!
# emotion scores
df_clean_long %>%
ggplot(aes(x=value, color=source)) +
facet_grid(rows='variable', scales='free_x') +
geom_density()
# logit transformed emotions
df_clean_long %>%
ggplot(aes(x=value_logit, color=source)) +
facet_grid(rows='variable', scales='free_x') +
geom_density() +
scale_x_continuous(limits=c(-7,4))
## t-tests
#
get_t_test_results <- function(df_emotion){
t.test(value_logit ~ source, df_emotion, na.action = na.omit)
}
df_clean_long %>% filter(variable=='anger' & !is.na(value_logit)) %>% get_t_test_results()
df_clean_long %>% filter(variable=='disgust' & !is.na(value_logit)) %>% get_t_test_results()
df_clean_long %>% filter(variable=='fear' & !is.na(value_logit)) %>% get_t_test_results()
df_clean_long %>% filter(variable=='joy' & !is.na(value_logit)) %>% get_t_test_results()
df_clean_long %>% filter(variable=='sadness' & !is.na(value_logit)) %>% get_t_test_results()
|
89184d396a7a8b18ca22888e75e0250e2a5efad1 | 9a2ef368d44c289d041fc606f29c4bdeec54442a | /man/mlp_net-MSE-gradients.Rd | 74a0d7593bf395474576de45a17fc845f42dbff2 | [] | no_license | cran/FCNN4R | 870e9c9d2ce3569a188603c2e53fa8a59a61030a | 91a366ead6f49c15b5e866d1dcf2c9c327432343 | refs/heads/master | 2016-09-16T16:43:28.342250 | 2016-03-09T00:57:57 | 2016-03-09T00:57:57 | 39,093,754 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,802 | rd | mlp_net-MSE-gradients.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mlp_net.R
\name{mlp_net-MSE-gradients}
\alias{mlp_grad}
\alias{mlp_gradi}
\alias{mlp_gradij}
\alias{mlp_jacob}
\alias{mlp_mse}
\alias{mlp_net-MSE-gradients}
\title{Computing mean squared error, its gradient, and output derivatives}
\usage{
mlp_mse(net, input, output)
mlp_grad(net, input, output)
mlp_gradi(net, input, output, i)
mlp_gradij(net, input, i)
mlp_jacob(net, input, i)
}
\arguments{
\item{net}{an object of \code{mlp_net} class}
\item{input}{numeric matrix, each row corresponds to one input vector,
the number of columns must be equal to the number of neurons
in the network input layer}
\item{output}{numeric matrix with rows corresponding to expected outputs,
the number of columns must be equal to the number of neurons
in the network output layer, the number of rows must be equal to the number
of input rows}
\item{i}{data row index}
}
\value{
\code{mlp_mse} returns mean squared error (numeric value).
\code{mlp_grad} returns two-element lists with the first
field (\code{grad}) containing numeric vector with gradient and the second
(\code{mse}) - the mean squared error.
\code{mlp_gradi} returns numeric vector with gradient.
\code{mlp_gradij} returns numeric matrix with gradients of outputs in
consecutive columns.
\code{mlp_jacob} returns numeric matrix with derivatives of outputs in
consecutive columns.
}
\description{
The functions use fast FCNN kernel routines and are intended for implementing
teaching and pruning algorithms.
}
\details{
\code{mlp_mse} returns the mean squared error (MSE). MSE is understood
as half of the squared error averaged over all outputs and data records.
\code{mlp_grad} computes the gradient of MSE w.r.t. network weights.
This function is useful when implementing batch teaching algorithms.
\code{mlp_gradi} computes the gradient of MSE w.r.t. network weights at the \code{i}th
data record. This is normalised by the number of outputs only,
the average over all rows (all i) returns the same as \code{grad(input, output)}.
This function is useful for implementing on-line teaching algorithms.
\code{mlp_gradij} computes gradients of network outputs,
i.e the derivatives of outputs w.r.t. active weights, at given data row.
The derivatives of outputs are placed in subsequent columns of the returned
matrix. Scaled by the output errors and averaged they give the same
as \code{gradi(input, output, i)}. This function is useful in implementing
teaching algorithms using second order corrections and Optimal Brain Surgeon
pruning algorithm.
\code{mlp_jacob} computes the Jacobian of network outputs, i.e the derivatives
of outputs w.r.t. inputs, at given data row.
The derivatives of outputs are placed in subsequent columns of the returned
matrix.
}
|
92d73dc4aad333a40dfe906584a8fd4cd81575d1 | 1947642e415118426f8e4d96eedc6d64a12672d6 | /man/forceround2.Rd | e57b3438f23f09baeab2c74aea116118709a4fd9 | [] | no_license | cquigley/rPerfFunc | 9917b4332391332842d69566aea0f63f95eb5ec3 | 478aff105bbcacb0e6e56346062c57763ec1647c | refs/heads/master | 2021-01-17T13:25:46.809452 | 2016-08-04T19:17:14 | 2016-08-04T19:17:14 | 64,801,245 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 262 | rd | forceround2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/performance.R
\name{forceround2}
\alias{forceround2}
\title{force round to 2 decimals}
\usage{
forceround2(n)
forceround2(n)
}
\description{
force round to 2 decimals
replace NAs
}
|
d09f24e840b86919ec4d3f94dbadcecb7412ee03 | 7b3747265bc72f28c6af7e35ccf1275c183a9494 | /EXIO_init.R | 5a6e64606359ee2aa61e99a0eeef8814bb4ba4d3 | [] | no_license | Jihoon/DLE_scripts | 2bb9f8c03a8aec972313fc9722e5c53391e002c5 | 2225630c710822812b2f9ba884aab99cabddd119 | refs/heads/master | 2023-03-03T11:32:37.095031 | 2022-01-14T08:54:29 | 2022-01-14T08:54:29 | 89,709,501 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,589 | r | EXIO_init.R | # Read in neccessary EXIO tables
# And derive indirect energy intensities per EXIO sector
# setwd("H:/MyDocuments/IO work/")
path_iot <- "P:/ene.general/DecentLivingEnergy/IO/Data - EXIOBASE/EXIOBASE2/mrIOT_PxP_ita_coefficient_version2.2.2/"
path_sut <- "P:/ene.general/DecentLivingEnergy/IO/Data - EXIOBASE/EXIOBASE2/mrSUT_version2.2.2/"
EXIO3_path = "H:/MyDocuments/Analysis/Final energy/Arkaitz/IOT/"
# From IoT folder
### final_demand
# 1. From EXIO2
final_demand <- read.table(paste(path_iot, "mrFinalDemand_version2.2.2.txt", sep=""), header=FALSE, sep="\t", dec=".", skip=2)
final_demand.name <- read.table(paste(path_iot, "mrFinalDemand_version2.2.2.txt", sep=""),
header=FALSE, sep="\t", dec=".", skip=1, nrow=1, stringsAsFactors=FALSE)[4:10]
final_demand <- final_demand[,c(-1,-2,-3)]
### Leontief inverse: L
# 1. From EXIO2
L_inverse <- read.table(paste(path_iot, "L_inverse.txt", sep=""), header=FALSE, sep=",", dec=".")
### Other EXIO data
factor_input <- read.table(paste(path_iot, "mrFactorInputs_version2.2.2.txt", sep=""), header=FALSE, sep="\t", dec=".", skip=2)
factor_input <- factor_input[,c(-1,-2)]
iot <- read.table(paste(path_iot, "mrIot_version2.2.2.txt", sep=""), header=FALSE, sep="\t", dec=".", skip=2)
iot <- iot[,c(-1,-2,-3)]
supplym <- read.table(paste(path_sut, "mrSupply_version2.2.2.txt", sep=""), header=FALSE, sep="\t", dec=".")
fd_materials <- read.table(paste(path_iot, "mrFDMaterials_version2.2.0.txt", sep=""), header=FALSE, sep="\t", dec=".", skip=2)
fd_materials <- fd_materials[,c(-1,-2)]
# Material extension with more energy carrier resolution from NTNU (ver 2.2.0)
# However these extensions are in TJ unit, which need to be divided by total use by product to get intensities.
materials <- read.table(paste(path_iot, "mrMaterials_version2.2.0.txt", sep=""), header=FALSE, sep="\t", dec=".", skip=2)
material.name <- materials[,1]
materials <- materials[,c(-1,-2)]
# materials_reduc <- read.table(paste(path_iot, "mrMaterials_version2.2.2.txt", sep=""), header=FALSE, sep="\t", dec=".", skip=2)
# materials_reduc <- materials_reduc[,c(-1,-2)]
final_demand_material <- read.table(paste(path_iot, "mrFDMaterials_version2.2.0.txt", sep=""), header=FALSE, sep="\t", dec=".", skip=2)
final_demand_material <- final_demand_material[,c(-1,-2)]
emissions <- read.table(paste(path_iot, "mrEmissions_version2.2.2.txt", sep=""), header=FALSE, sep="\t", dec=".",
skip=2, nrows=85, stringsAsFactors = FALSE)
emissions <- emissions %>% select(-V2, -V3) %>% filter(grepl('CH4|CO2|N2O', V1))
GHG_item <- emissions$V1
emissions <- emissions %>% select(-V1)
path_iot_2.3 <- 'C:/Users/min/SharePoint/DLE - Documents/IO/Data - EXIOBASE/extension2.3.0/'
emissions_2.3 <- read.table(paste(path_iot_2.3, "mrEmissions_pxp_version2.3.0.txt", sep=""), header=FALSE, sep="\t", dec=".",
skip=2, nrows=204, stringsAsFactors = FALSE)
emissions_2.3 <- emissions_2.3 %>% select(-V3) %>% filter(V2==" air") %>% filter(grepl('CH4|CO2|N2O', V1))
GHG_item_2.3 <- emissions_2.3$V1
emissions_2.3 <- emissions_2.3 %>% select(-V1, -V2)
gwp <- c(CH4=34, N20=298)
emissions_2.3[c(2,5,8,11),] <- emissions_2.3[c(2,5,8,11),] * gwp["CH4"]
emissions_2.3[c(3,6,9),] <- emissions_2.3[c(3,6,9),] * gwp["N20"]
# From SUT folder
# 1. From EXIO2
tot_use <- read.table(paste(path_sut, "mrUse_version2.2.2.txt", sep=""), header=FALSE, sep="\t", dec=".", skip=2)
tot_use <- tot_use[,c(-1,-2,-3)]
# Get total use by product
tot_demand <- rowSums(final_demand) + rowSums(tot_use) # X vector (global)
b <- rowSums(final_demand[,-seq(7, exio.fd.len, 7)]) # Excluding (export for global demand = 0)
fd.sum <- data.frame(name=EX_catnames, ind.use=rowSums(tot_use[IND_idx_ex,]),
hh.fd=rowSums(final_demand[IND_idx_ex,seq(1, exio.fd.len, 7)]),
oth.fd=rowSums(final_demand[IND_idx_ex,-seq(1, exio.fd.len, 7)])) %>%
mutate(tot.use = hh.fd + oth.fd + ind.use)
# To clean up the memory
save(L_inverse, file="./Saved tables/L_inverse.Rda")
save(iot, file="./Saved tables/iot.Rda")
save(indirect_E_int, file="./Saved tables/indirect_E_int.Rda")
save(tot_use, file="./Saved tables/tot_use.Rda")
save(supplym, file="./Saved tables/supplym.Rda")
save(final_demand, file="./Saved tables/final_demand.Rda")
save(tot_demand, file="./Saved tables/tot_demand.Rda")
save(materials, file="./Saved tables/materials.Rda")
save(fd_materials, file="./Saved tables/fd_materials.Rda")
save(indirect_pE_int.elec.prirow, file="./Saved tables/indirect_pE_int.elec.prirow.Rda")
|
ad16d6864d475f559f72e95e1794a8c258f9abc1 | 1becc546dddb6d4b689cd40bc4f91ad6d455f49d | /run_analysis.R | 5b45db9ab40a530f3b1a6cc56c3c33195ab25d64 | [] | no_license | krisstee/data-science-coursera-week4 | c66f8f2c1a04dae8e3d89938394f0211ffc3b337 | 514f4e42ae73d9a928ee477dc70d23bf98861836 | refs/heads/master | 2020-05-15T15:08:03.682519 | 2019-04-20T20:51:10 | 2019-04-20T20:51:10 | 182,361,886 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,534 | r | run_analysis.R | # Author: Kristi A.
# JHU Data Science - Getting and Cleaning Data
# Week 4 assignment
library(dplyr)
setwd("data-science-coursera-week4")
dataset_url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# download and unpack data set
download.file(dataset_url, "week4.zip", method = "curl")
unzip("week4.zip")
# collect training data
training_data <- read.table("UCI\ HAR\ Dataset/train/X_train.txt", header = FALSE)
training_lables <- read.table("UCI\ HAR\ Dataset/train/y_train.txt", header = FALSE)
training_subjects <- read.table("UCI\ HAR\ Dataset/train/subject_train.txt", header = FALSE)
# collect test data
test_data <- read.table("UCI\ HAR\ Dataset/test/X_test.txt", header = FALSE)
test_labels <- read.table("UCI\ HAR\ Dataset/test/y_test.txt", header = FALSE)
test_subjects <- read.table("UCI\ HAR\ Dataset/test/subject_test.txt", header = FALSE)
# get name of collected features
features <- read.table("UCI\ HAR\ Dataset/features.txt", header = FALSE)
# label training set
names(training_subjects) <- "subjects"
names(training_lables) <- "activity"
names(training_data) <- features$V2
# label test set
names(test_subjects) <- "subjects"
names(test_labels) <- "activity"
names(test_data) <- features$V2
# combine training data
training_set <- cbind(training_subjects, training_data, training_lables)
# combine test data
test_set <- cbind(test_subjects, test_data, test_labels)
# combine the training and test data
full_dataset <- rbind(training_set, test_set)
# collect mean and standard deviation measurements
mean_dataset <- full_dataset[,grep("[mM]ean", names(full_dataset))]
std_dataset <- full_dataset[,grep("[sS]td", names(full_dataset))]
activities_subjects <- full_dataset[, c("subjects", "activity")]
mean_std_dataset <- cbind(activities_subjects, mean_dataset, std_dataset)
# rename activities
activity_labels <- read.table("UCI\ HAR\ Dataset/activity_labels.txt", header = FALSE)
mean_std_dataset <- mean_std_dataset %>% arrange(activity) %>%
mutate(activity = as.character(factor(activity,
levels = 1:6,
labels = activity_labels$V2)))
# create separate dataset containing the average of each variable for each
# subject and activity
tidy_set <- mean_std_dataset %>% group_by(subjects, activity) %>%
summarize_all(mean)
# write completed table to a file
write.table(tidy_set, "tidy_dataset.txt", row.names = FALSE)
|
ce526c82417ab67616e03c4ebd7aef93d25a9b93 | fa4b331d6804c877eb62fc9566c3a652bccd08f1 | /man/odbcPreviewQuery.Rd | 6e2e6414f4038d149db684b9792f14511df4637c | [
"MIT"
] | permissive | r-dbi/odbc | 0091c72371abfe95f6d2e5ea940ab06c134e2063 | 56eef6949b4c63468015cd533bd6539f952877cd | refs/heads/main | 2023-08-31T15:19:29.556401 | 2023-08-04T00:49:58 | 2023-08-04T00:49:58 | 63,273,973 | 252 | 98 | NOASSERTION | 2023-09-04T18:48:42 | 2016-07-13T19:32:07 | C++ | UTF-8 | R | false | true | 929 | rd | odbcPreviewQuery.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Viewer.R
\name{odbcPreviewQuery}
\alias{odbcPreviewQuery}
\alias{odbcPreviewQuery.OdbcConnection}
\alias{odbcPreviewQuery.Microsoft SQL Server}
\alias{odbcPreviewQuery.Oracle}
\title{Create a preview query.}
\usage{
odbcPreviewQuery(connection, rowLimit, name)
\method{odbcPreviewQuery}{OdbcConnection}(connection, rowLimit, name)
\method{odbcPreviewQuery}{`Microsoft SQL Server`}(connection, rowLimit, name)
\method{odbcPreviewQuery}{Oracle}(connection, rowLimit, name)
}
\arguments{
\item{connection}{A connection object, as returned by \code{dbConnect()}.}
\item{rowLimit}{The maximum number of rows to display.}
\item{name}{Name of the object to be previewed}
}
\description{
Optimize against the rowLimit argument. S3 since some
back-ends do not parse the LIMIT syntax. Internal, not expected that
users would interact with this method.
}
|
a98a66e5f0f3e3e9bbe35a1f6250b68027b8709d | 84cf9b0a9edc96bac8a7ffaa73391370c0a39b40 | /test_1.R | 4ca2fa5dbd31faa0ff483ad0a912b5cc5b796758 | [] | no_license | hyrbrgh/version-control-test | aca12198b639d763a337e5b42697c7fa1958bc8e | 477a50c0f8a32a95350e9ca175526b3e1da6819c | refs/heads/master | 2021-01-13T08:26:59.041128 | 2016-10-22T22:05:31 | 2016-10-22T22:05:31 | 71,668,258 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 82 | r | test_1.R | # sample code
# this code is just a smaple
# it it to help me Learn
test <- 1:10
|
cf89bc954c6f3538a7bbe472b54624fcc62ae7f7 | 5906b6e56fd54b7a038961372318632a8f4009d1 | /man/phyEstimate.Rd | aea4cd3512e8d76a471bed0c428a0cb644438c94 | [] | no_license | skembel/picante | dc8c8b38c45f6d2088563d4e9119a0aa21e8f115 | b891440afaa83185442f98d45db90a515cf6ab8a | refs/heads/master | 2023-09-04T02:58:33.047287 | 2023-07-10T15:17:01 | 2023-07-10T15:17:01 | 13,666,942 | 25 | 14 | null | 2023-07-10T15:12:30 | 2013-10-18T02:14:54 | R | UTF-8 | R | false | true | 3,177 | rd | phyEstimate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phyEstimate.R
\name{phyEstimate}
\alias{phyEstimate}
\alias{phyEstimateDisc}
\title{Phylogenetic estimation of traits for unobserved taxa}
\usage{
phyEstimate(phy, trait, method = "pic", ...)
}
\arguments{
\item{phy}{phylo object}
\item{trait}{vector or data.frame containing trait values}
\item{method}{ancestral state estimation method used by \code{ace}
(default="pic")}
\item{...}{Additional arguments passed to \code{ace}}
\item{best.state}{estimate best-supported trait state for discrete
variables? (default=TRUE)}
\item{cutoff}{support cutoff required to declare a best.state}
}
\value{
phyEstimate produces a data frame with columns: \item{est}{
Estimated trait value } \item{se}{ Standard error of estimated trait value
} phyEstimateDisc produces a data frame with columns: \item{states 1..N}{ A
column with statistical support is produced for each discrete trait state }
\item{estimated.state}{ If best.state=TRUE, a column with the state with
the highest support } \item{estimated.state.support}{ Statistical support
for the state with the highest support }
}
\description{
Uses phylogenetic ancestral state reconstruction to estimate trait values
for unobserved taxa.
}
\details{
These functions use phylogenetic ancestral state estimation to infer trait
values for novel taxa on a phylogenetic tree, for continuous
(\code{phyEstimate}) and discrete (\code{phyEstimateDisc}) traits.
The required input is a phylogenetic tree object plus a vector or
data.frame containing estimated trait values for a subset of the taxa in
the phylogenetic tree. Trait values for taxa that are present in the tree
but not the trait data will be estimated using ancestral state estimation
(Garland and Ives 2000). Briefly, for each taxon present in the tree but
not the trait data, the phylogeny is rerooted at the most recent common
ancestor of the novel taxon and the rest of the phylogeny, and the trait
value of the novel taxon is estimated from the reconstructed trait value at
the root of the rerooted phylogeny.
For \code{phyEstimateDisc}, the state with the highest support will be
reported if argument \code{best.state=TRUE}. If the best-supported state's
support is less than the specified \code{cutoff}, no best state is reported
and a \code{NA} value will be returned.
}
\examples{
#generate random phylogeny
randtree <- rcoal(50)
#simulate trait evolution for a subset of taxa on phylogeny
randtraits <- sample(rTraitCont(randtree, sigma=10, root.value=100), 40)
#estimate trait values for "missing" taxa using PIC method
phyEstimate(randtree, randtraits, method="pic")
}
\references{
T. Garland Jr., and A.R. Ives. 2000. Using the past to predict
the present: confidence intervals for regression equations in phylogenetic
comparative methods. American Naturalist 155:346364.
S.W. Kembel, M. Wu, J.A. Eisen, and J.L. Green. 2012. Incorporating 16S
gene copy number information improves estimates of microbial diversity and
abundance. PLoS Computational Biology 8(10):e1002743.
}
\author{
Steven Kembel \href{mailto:steve.kembel@gmail.com}{steve.kembel@gmail.com}
}
\keyword{univar}
|
95c0732731488ebad86c2512789afc4197e0f4c2 | e4e79cc749ade9df9d1e48d972ac9683dee85933 | /original/IntelliKeys/WindowsOld/Control Panel/REALbasic Plugin and bundle/QT6/Interfaces & Libraries/QTDevWin/RIncludes/MacTypes.r | 8606b72a4f8bca5c41b0f0c74e564b638fd68900 | [
"MIT"
] | permissive | ATMakersOrg/OpenIKeys | f3420332714a844262bd3ebfbcf09405b7b3c05b | 629f88a35322245c623f59f387cc39a2444f02c4 | refs/heads/master | 2022-03-03T16:05:58.189792 | 2019-10-29T00:30:55 | 2019-10-29T00:30:55 | 107,173,263 | 12 | 4 | MIT | 2019-10-29T00:30:55 | 2017-10-16T19:24:54 | C | WINDOWS-1252 | R | false | false | 3,026 | r | MacTypes.r | /*
File: MacTypes.r
Contains: Basic Macintosh data types.
Version: Technology: Mac OS 9
Release: QuickTime 6.0.2
Copyright: © 1985-2001 by Apple Computer, Inc., all rights reserved.
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef __MACTYPES_R__
#define __MACTYPES_R__
#ifndef __CONDITIONALMACROS_R__
#include "ConditionalMacros.r"
#endif
#ifndef __SCRIPT_R__
#include "Script.r" /* to get Region codes for 'vers' resource */
#endif
#define normal 0
#define bold 1
#define italic 2
#define underline 4
#define outline 8
#define shadow 0x10
#define condense 0x20
#define extend 0x40
/* Version Release Stage Codes */
#define developStage 0x20
#define alphaStage 0x40
#define betaStage 0x60
#define finalStage 0x80
/*----------------------------STR ¥ Pascal-Style String--------------------------------*/
type 'STR ' {
pstring; /* String */
};
/*----------------------------STR# ¥ Pascal-Style String List---------------------------*/
type 'STR#' {
integer = $$Countof(StringArray);
array StringArray {
pstring; /* String */
};
};
/*----------------------------RECT ¥ single rectangle-----------------------------------*/
type 'RECT' { rect; };
/*----------------------------vers ¥ Version--------------------------------------------*/
type 'vers' {
hex byte; /* Major revision in BCD*/
hex byte; /* Minor vevision in BCD*/
hex byte development = 0x20, /* Release stage */
alpha = 0x40,
beta = 0x60,
final = 0x80, /* or */ release = 0x80;
hex byte; /* Non-final release # */
integer; /* Region code */
pstring; /* Short version number */
pstring; /* Long version number */
};
/*----------------------------utxt ¥ Unicode text (analogous to 'TEXT')-----------------*/
type 'utxt' {
wide array UnicodeText {
unsigned hex integer;
};
};
/*----------------------------utx# ¥ Unicode string list (analogous to 'STR#')----------*/
type 'utx#' {
unsigned integer = $$Countof(StringArray);
array StringArray {
unsigned integer = $$Countof(UnicodeText);
wide array UnicodeText {
unsigned hex integer;
};
};
};
#endif /* __MACTYPES_R__ */
|
ceb11fffc5127d6f5535ca818ebf1fc740919928 | a07326a1d02cc345cc94faf86894e7317ffea68f | /app/server.R | aa9b45490c0f46345b4fa8bf099bdcf451cae0f8 | [] | no_license | Juju1OO1/final_project | c91dc305ff27cc0bb47ceb14f2bb682991077bdb | 12f6a487f805b960eff0e87f9bdc1c8eba6bc675 | refs/heads/main | 2023-05-31T07:40:21.537295 | 2021-07-12T07:29:27 | 2021-07-12T07:29:27 | 375,265,452 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 267 | r | server.R | # makecondition -----------
library(shiny)
library(ggplot2)
# server --------
server <- function(input, output){
output$distPlot <- shiny::renderPlot({
ggplot(data = faithful) + geom_histogram(aes(x = eruptions),
bins = as.numeric(input$bins))
})
}
|
37ba6ebb64e13f3fb6c20c4d0a003d9ede10e7bc | 744eb9be6455f86de1c53c571a5051a8ce7d1ac4 | /pca3d.R | a2d9578c9bf75ef08d9f8537332f4c0d596ebe51 | [] | no_license | derek-corcoran-barrios/Meta | 77037a4bb48a9ef8a2a216369609e90c470f3e0f | 7794a07c4ddb4f1c3136786882bd3f62095157ab | refs/heads/master | 2021-03-19T13:24:17.685664 | 2017-03-27T01:00:13 | 2017-03-27T01:00:13 | 82,729,469 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 458 | r | pca3d.R |
coord <- read.csv("~/Documents/Art classification proyect/ART/coord.csv")
Art <- read.csv("~/Documents/Art classification proyect/ART/Art.csv")
pca.3d <- cbind(coord$Dim.1, coord$Dim.2, coord$Dim.3, Art$movimiento)
colnames(pca.3d) <- c("X", "Y", "Z", "Mov")
library(rgl)
attach(data.frame(pca.3d))
plot3d(X,Y,Z,col=Mov, size=4, type="s")
if (!rgl.useNULL())
+ movie3d(spin3d(axis = c(1, 0.2, 0.5)), duration = 20,
dir = getwd())
|
ce102741775410d95213080a8f0865698a273af1 | 50284f0424b73a2fabe8705809c3fa018867ccd6 | /R/qqplot_fancy.R | 7a93ff7fde62a13e902e05a7a6bdfc8a3be82cf4 | [] | no_license | InfProbSciX/PeRLib | 2756830a8b897f3fad94dc417b2eb8eeafb64b8c | 9f68dc8c903950de2b2025fbaa79de6ae3a21b90 | refs/heads/master | 2020-03-22T08:21:55.349877 | 2018-07-04T20:11:32 | 2018-07-04T20:11:32 | 139,762,021 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,278 | r | qqplot_fancy.R | #' Make a pretty QQ-Plot
#'
#' \code{qqplot_fancy} Extends the qqplot() function by using ggplot2 for making a pretty qqplot and adding a best-fitting distribution functionality.
#'
#' @param sample A numeric vector with the "actual" observations.
#' @param dist Either (1) a string (default "normal") specifying a known distribution Or (2) another vector of theoretical/model observations. The distribution can be one of beta, exponential, gamma, log-normal, lognormal, normal, pareto, t or weibull.
#' @param col (optional) A string specifying point colour. Defaults to a random colour from the ggplots color wheel.
#' @param start (optional) A list of named starting/initial values for fitditr.
#' @param xlab (optional) A string containing the x axis label.
#' @param ylab (optional) A string containing the y axis label.
#' @param title (optional) A string containing the title.
#' @examples
#' qqplot_fancy(rnorm(1000)) ## pretty line
#' qqplot_fancy(rexp(1000), "exponential") ## pretty line
#' qqplot_fancy(rexp(1000), rexp(1000)) ## pretty line
#' qqplot_fancy(rnorm(10), col = "blue") ## pretty blue line
#' @export
qqplot_fancy <- function(sample, dist = "normal", col = sample(ggcols(50)[c(1:5, 17:38, 47:49)], 1), start = NULL, xlab = NULL, ylab = NULL, title = NULL){
if(length(dist) >= 2){
if(length(dist) == length(sample)){
Theo <- dist
}else{
Theo <- quantile(dist, ppoints(length(sample)))
}
fitr.dist <- "TheoreticalSample"
}else{
if(dist == "beta" & is.null(start)){
start <- list(shape1 = 1, shape2 = 1)
sample <- (sample/max(sample))
sample[sample == 1] <- sample[sample == 1] - 0.00001
sample[sample == 0] <- sample[sample == 0] + 0.00001
}else if(dist == "pareto" & is.null(start)){
start <- list(shape = 1, scale = 10)
}
if((dist == "gamma" | dist == "exponential" | dist == "weibull" | dist == "log-normal" | dist == "lognormal") & is.null(start)){
sample[sample == 0] <- sample[sample == 0] + 0.00001
}
fitr.dists <- c("beta",
"exponential",
"gamma",
"log-normal",
"lognormal",
"normal",
"t",
"weibull")
eval.dists <- c("beta",
"exp",
"gamma",
"lnorm",
"lnorm",
"norm",
"t",
"weibull")
n <- length(sample)
if(dist == "pareto"){
ppareto <- actuar::ppareto
dpareto <- actuar::dpareto
qpareto <- actuar::qpareto
suppressWarnings(params <- fitdistrplus::fitdist(sample, "pareto", start = start)$estimate)
fitr.dist <- dist
eval.dist <- dist
}else if(sum(as.numeric(dist == fitr.dists)) == 0){
print("Distribution not recognised. Try: beta, exponential, gamma, log-normal, lognormal, normal, t or weibull")
}else{
fitr.dist <- fitr.dists[dist == fitr.dists]
eval.dist <- eval.dists[dist == fitr.dists]
}
if(dist != "pareto")
suppressWarnings(params <- MASS::fitdistr(sample, fitr.dist, start = start)$estimate)
if(fitr.dist != "t")
eval(parse(text = paste0("Theo <- q", eval.dist, "(ppoints(n), ", toString(paste(paste0(attr(params, "names"), " = ", params), sep = ", ")), ")")))
else if(fitr.dist != "pareto"){
eval(parse(text = paste0("Theo <- params[1]+(params[2] * q", eval.dist, "(ppoints(n), ", toString(paste(paste0(attr(params[3], "names"), " = ", params[3]), sep = ", ")), "))")))
}else{
eval(parse(text = paste0("Theo <- q", eval.dist, "(ppoints(n), ", toString(paste(paste0(attr(params, "names"), " = ", params), sep = ", ")), ")")))
}
}
if(is.null(xlab))
xlab <- paste0("Theoretical Quantiles ", "(", fitr.dist, ")")
if(is.null(ylab))
ylab <- "Actual Quantiles (data)"
if(is.null(title))
title <- "QQPlot"
ggplot2::ggplot(data.frame("Actual" = sort(sample), "Theoretical" = sort(Theo)),
ggplot2::aes(x = Theoretical, y = Actual)) +
ggplot2::geom_point(alpha = 0.8, color = col) +
ggplot2::geom_abline(slope = 1, intercept = 0) +
ggplot2::labs(title = title, x = xlab, y = ylab)
}
|
b390d4f24fe7bc7d6c8f91f0feeb745562b79ded | e56da52eb0eaccad038b8027c0a753d9eb2ff19e | /man/Renumber.Rd | 24a276778607d93e08c8acdb78bb166a445cf969 | [] | no_license | ms609/TreeTools | fb1b656968aba57ab975ba1b88a3ddf465155235 | 3a2dfdef2e01d98bf1b58c8ee057350238a02b06 | refs/heads/master | 2023-08-31T10:02:01.031912 | 2023-08-18T12:21:10 | 2023-08-18T12:21:10 | 215,972,277 | 16 | 5 | null | 2023-08-16T16:04:19 | 2019-10-18T08:02:40 | R | UTF-8 | R | false | true | 1,791 | rd | Renumber.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phylo.R
\name{Renumber}
\alias{Renumber}
\title{Renumber a tree's nodes and tips}
\usage{
Renumber(tree)
}
\arguments{
\item{tree}{A tree of class \code{\link[ape:read.tree]{phylo}}.}
}
\value{
\code{Renumber()} returns a tree of class \code{phylo}, numbered in a
\link{Cladewise} fashion consistent with the expectations of \pkg{ape} functions.
}
\description{
\code{Renumber()} numbers the nodes and tips in a tree to conform with the
\code{phylo} standards.
}
\details{
The \pkg{ape} class \code{phylo} is not formally defined, but expects trees' internal
representation to conform to certain principles: for example, nodes should
be numbered sequentially, with values increasing away from the root.
\code{Renumber()} attempts to reformat any tree into a representation that will
not cause \pkg{ape} functions to produce unwanted results or to crash R.
}
\examples{
tree <- RandomTree(letters[1:10])
Renumber(tree)
}
\seealso{
\code{Preorder()} provides a faster and simpler alternative, but also
rotates nodes.
Other tree manipulation:
\code{\link{AddTip}()},
\code{\link{CollapseNode}()},
\code{\link{ConsensusWithout}()},
\code{\link{DropTip}()},
\code{\link{EnforceOutgroup}()},
\code{\link{ImposeConstraint}()},
\code{\link{KeptPaths}()},
\code{\link{KeptVerts}()},
\code{\link{LeafLabelInterchange}()},
\code{\link{MakeTreeBinary}()},
\code{\link{RenumberTips}()},
\code{\link{RenumberTree}()},
\code{\link{RootTree}()},
\code{\link{SortTree}()},
\code{\link{Subtree}()},
\code{\link{TipTimedTree}()},
\code{\link{TrivialTree}}
}
\author{
\href{https://orcid.org/0000-0001-5660-1727}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
\concept{tree manipulation}
|
ff84a918aae7944c9ab5473a8cd11610c2536539 | 570bd0fea4dcd3d84ef9f9981d073911702bbbc3 | /teaching/expdes/old/lastyear/retro.copies.R | a0d5c54a73278058a92638474e3017f124fa0c25 | [] | no_license | coleoguy/coleoguy.github.io | ab193cced8a974046fc5eafea365c2c88c4b84f0 | fdeb359db691c2c99a018a4d5312f83013c9399f | refs/heads/master | 2023-08-31T15:28:24.557517 | 2023-08-27T19:16:33 | 2023-08-27T19:16:33 | 33,425,755 | 0 | 6 | null | null | null | null | UTF-8 | R | false | false | 1,123 | r | retro.copies.R | dat <- read.csv("retrogene.csv")
parents <- daughters <- matrix(0, 10000, 10)
colnames(parents) <- colnames(daughters) <- colnames(dat)[2:11]
for(i in 1:10000){
print(i)
par.probs <- unlist(dat[1, 2:11] / dat[1, 12])
cur.parents <- sample(1:10, size = 142, replace = T, prob = par.probs)
dau.probs <- unlist(dat[2, 2:11] / dat[2, 12])
cur.daughters <- sample(1:10, size = 142, replace = T, prob = par.probs)
cur.parents <- as.data.frame(table(cur.parents))
cur.daughters <- as.data.frame(table(cur.daughters))
parents[i, cur.parents$cur.parents] <- cur.parents$Freq
daughters[i, cur.daughters$cur.daughters] <- cur.daughters$Freq
}
par(mfcol=c(2,5))
for(i in 1:10){
hist(parents[,i], main=paste("Lg",i,sep=""))
abline(v=dat[3,(i+1)], col="red")
}
for(i in 1:10){
print(sum(parents[,i] >= dat[3,(i+1)])/ 10000)
}
for(i in 1:10){
print(sum(daughters[,i] >= dat[4,(i+1)])/ 10000)
}
plot(par.probs~dau.probs)
abline(a=0,b=1)
fit <- lm(unlist(dat[1,2:11])~unlist(dat[2,2:11]))
plot(unlist(dat[1,2:11])~unlist(dat[2,2:11]),
xlab="chromosome size",
ylab="number of genes")
abline(fit)
|
defcceb9d7ca1a0cae5f3d1bcf2ca0c61d41dbfe | d9e2ad6cf05909880592de47e072c2c3d7484b1a | /best.R | ba2944976c1b8039d6138ce1ef009bf1b5c966d7 | [] | no_license | rmuscat/rprogrammingcourse | b2b983c07f68ae9a37120831ed9615904924e87a | f56365109c3c84be3eb7d8e4dec466b2984000f0 | refs/heads/master | 2021-01-18T21:33:25.499009 | 2014-12-16T07:27:11 | 2014-12-16T07:27:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,605 | r | best.R | best <- function(state, outcome) {
## Read outcome data
setwd("C:\\Users\\Robert\\Coursera\\2. R Programming\\A3")
outcomeList <- c("heart attack","heart failure","pneumonia")
outcomeData <- read.csv("outcome-of-care-measures.csv",colClasses="character")
## Check that state and outcome are valid
library(datasets)
data(state)
if (!state %in% state.abb) {
stop ("invalid state")
}
if (!outcome %in% outcomeList) {
stop ("invalid outcome")
}
#2. Hospital Name: varchar (50) Lists the name of the hospital.
#7. State: varchar (2) Lists the 2 letter State code in which the hospital is located.
#11. Hospital 30-Day Death (Mortality) Rates from Heart Attack: Lists the risk adjusted rate (percentage) for each hospital.
#17. Hospital 30-Day Death(Mortality) Rates from Heart Failure: Lists the risk adjusted rate (percentage) for each hospital.
#23. Hospital 30-Day Death (Mortality) Rates from Pneumonia: Lists the risk adjusted rate (percentage) for each hospital.
outcomeCol = switch(outcome,"heart attack" = 11,"heart failure" = 17,"pneumonia" = 23)
outcomeDF <- data.frame(outcomeData[which(outcomeData[,7]==state),][,2],
outcomeData[which(outcomeData[,7]==state),][,7],
suppressWarnings(as.numeric(outcomeData[which(outcomeData[,7]==state),][,outcomeCol])),stringsAsFactors=FALSE)
colnames(outcomeDF) <- c("name","state","outcome")
minimumOD <- min(outcomeDF$outcome,na.rm=TRUE)
return ((sort(outcomeDF[which(outcomeDF$outcome==minimumOD),]$name))[1])
## Return hospital name in that state with lowest 30-day death
## rate
} |
003feed9bf8866a33f154959df4f817b3196ac15 | 35b5db382d27cea75316ceb90f37faccaa59425c | /plot1.R | 243872788e7d10358799a0ffbea5d95360a460e7 | [] | no_license | maxreimerson/ExData_Plotting1 | 486b8f9e9c5608308c199d6c3db9722ec5e27d92 | 953d15a14a7db3ced5b9bdf4199c6dc307fbbd15 | refs/heads/master | 2021-01-11T15:01:44.888547 | 2017-01-29T07:54:41 | 2017-01-29T07:54:41 | 80,282,651 | 0 | 0 | null | 2017-01-28T12:14:33 | 2017-01-28T12:14:33 | null | UTF-8 | R | false | false | 646 | r | plot1.R | setwd("~/coursera/data/")
setClass('myDate')
setAs("character","myDate", function(from) { as.Date(from, format="%d/%m/%Y") } )
df <- read.csv("household_power_consumption.txt",
header = TRUE, sep = ';',
colClasses = c('myDate','character', 'numeric','numeric','numeric','numeric','numeric','numeric','numeric'),
na.strings = c('?'))
df <- subset(df, df$Date >= "2007-02-01" & df$Date <= "2007-02-02")
setwd("~/coursera/repos/ExData_Plotting1")
hist(df$Global_active_power, col = "red", xlab = "Global Active Power(kilowatts)", main = "Global Active Power")
dev.copy(png, "plot1.png")
dev.off()
|
bdeef3c801353921e44ea1203eed0ba423431330 | e5415904a4d103d45d04957a3aadf22eeaf1fbe3 | /man/google_mobility.Rd | dc1abbfcaddbcdcae79767930f7d9cd849be886f | [
"MIT"
] | permissive | kjhealy/covmobility | ccca030e3469b848ee42bfca2ab4debb8ad135a0 | 1b982c3e5615363f6492db05971d81cb9390f867 | refs/heads/main | 2023-03-19T06:45:09.201464 | 2021-03-09T20:50:06 | 2021-03-09T20:50:06 | 300,909,097 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,041 | rd | google_mobility.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{google_mobility}
\alias{google_mobility}
\title{Google Mobility Data}
\format{
A data frame with 27,094,812 rows and 11 variables:
\describe{
\item{\code{country_region_code}}{character Country Code}
\item{\code{country_region}}{character Country or Region name}
\item{\code{sub_region_1}}{character Subregion (e.g. US state) name}
\item{\code{sub_region_2}}{character Subregion (e.g. US county) name}
\item{\code{metro_area}}{Metropolitan area name}
\item{\code{iso3166_2}}{character ISO 3166-2 Country/Region code}
\item{\code{census_fips_code}}{character US Census FIPS code}
\item{\code{place_id}}{character Place ID (hashed)}
\item{\code{date}}{double Date in yyyy-mm-dd format}
\item{\code{type}}{character Type of location. Values are retail, grocery (and pharmacy), parts, transit (hubs/stations), workplaces, and residential}
\item{\code{pct_diff}}{integer Percent change from baseline activity}
}
}
\source{
Google LLC "Google COVID-19 Community Mobility Reports." https://www.google.com/covid19/mobility/ Accessed: 2021-03-09
}
\usage{
google_mobility
}
\description{
Data from Google's Community Mobility Reports on relative changes in movement trends by location type.
}
\details{
Table: Data summary\tabular{ll}{
\tab \cr
Name \tab google_mobility \cr
Number of rows \tab 27094812 \cr
Number of columns \tab 11 \cr
_______________________ \tab \cr
Column type frequency: \tab \cr
Date \tab 1 \cr
character \tab 9 \cr
numeric \tab 1 \cr
________________________ \tab \cr
Group variables \tab None \cr
}
\strong{Variable type: Date}\tabular{lrrlllr}{
skim_variable \tab n_missing \tab complete_rate \tab min \tab max \tab median \tab n_unique \cr
date \tab 0 \tab 1 \tab 2020-02-15 \tab 2021-03-05 \tab 2020-09-01 \tab 385 \cr
}
\strong{Variable type: character}\tabular{lrrrrrrr}{
skim_variable \tab n_missing \tab complete_rate \tab min \tab max \tab empty \tab n_unique \tab whitespace \cr
country_region_code \tab 17244 \tab 1.00 \tab 2 \tab 2 \tab 0 \tab 134 \tab 0 \cr
country_region \tab 0 \tab 1.00 \tab 4 \tab 22 \tab 0 \tab 135 \tab 0 \cr
sub_region_1 \tab 459858 \tab 0.98 \tab 3 \tab 74 \tab 0 \tab 1860 \tab 0 \cr
sub_region_2 \tab 4500534 \tab 0.83 \tab 2 \tab 56 \tab 0 \tab 9915 \tab 0 \cr
metro_area \tab 26945472 \tab 0.01 \tab 21 \tab 34 \tab 0 \tab 65 \tab 0 \cr
iso3166_2 \tab 22258770 \tab 0.18 \tab 4 \tab 6 \tab 0 \tab 2224 \tab 0 \cr
census_fips_code \tab 21336414 \tab 0.21 \tab 5 \tab 5 \tab 0 \tab 2838 \tab 0 \cr
place_id \tab 48912 \tab 1.00 \tab 27 \tab 27 \tab 0 \tab 13249 \tab 0 \cr
type \tab 0 \tab 1.00 \tab 5 \tab 11 \tab 0 \tab 6 \tab 0 \cr
}
\strong{Variable type: numeric}\tabular{lrrrrrrrrrl}{
skim_variable \tab n_missing \tab complete_rate \tab mean \tab sd \tab p0 \tab p25 \tab p50 \tab p75 \tab p100 \tab hist \cr
pct_diff \tab 10131130 \tab 0.63 \tab -13.02 \tab 31.05 \tab -100 \tab -31 \tab -10 \tab 5 \tab 1206 \tab ▇▁▁▁▁ \cr
}
Location accuracy and the understanding of categorized places varies from region to region, so Google does not recommend using this data to compare changes between countries, or between regions with different characteristics (e.g. rural versus urban areas). Regions or categories are omitted if Google does not have have sufficient statistically significant levels of data for it. Changes for each day are compared to a baseline value for that day of the week. The baseline is the median value, for the corresponding day of the week, during the 5-week period Jan 3–Feb 6, 2020. What data is included in the calculation depends on user settings, connectivity, and whether it meets our privacy threshold. If the privacy threshold isn’t met (when somewhere isn’t busy enough to ensure anonymity) we don’t show a change for the day. As a result, you may encounter empty fields for certain places and dates. We calculate these insights based on data from users who have opted-in to Location History for their Google Account, so the data represents a sample of our users.
As with all samples, this may or may not represent the exact behavior of a wider population. Google updated the way we calculate changes for Groceries & pharmacy, Retail & recreation, Transit stations, and Parks categories. For regions published before May 2020, the data may contain a consistent shift either up or down that starts between April 11–18, 2020.
On October 5, 2020, Google added an improvement to the dataset to ensure consistent data reporting in the Groceries & pharmacy, Retail & recreation, Transit, Parks, and Workplaces categories. The update applies to all regions, starting on August 17, 2020. For more detailed information on considerations to bear in mind before using this data,
see \href{https://support.google.com/covid19-mobility/answer/9824897?hl=en&ref_topic=9822927}{this overview from Google}.
}
\author{
Kieran Healy
}
\keyword{datasets}
|
ef536142746a0818a13b15b756863dba57b81325 | 062429fb6bc0d8a75a45a56b5d2e17ca660b8b6a | /Professor-View/utils.R | 8c98a2b6ef4e42ed76f6b81ec3920cbe61757fc6 | [] | no_license | owbezick/Mastery-System-Gradebook | b3ba277252b7cce3e2cca613488e89adef0a2619 | fa3ca21c59f3e92c65243129b27f28ffae686a91 | refs/heads/master | 2022-12-05T10:44:17.464761 | 2020-09-08T14:46:30 | 2020-09-08T14:46:30 | 263,782,893 | 1 | 0 | null | 2020-05-30T13:32:12 | 2020-05-14T01:19:00 | R | UTF-8 | R | false | false | 302 | r | utils.R |
grade_max <- function(x){
max <- "NA"
for (grade in x){
if (max != "M" & grade == "M"){
max <- "M"
} else if (max != "J" & max != "M" & grade == "J"){
max <- "J"
} else if (max != "A" & max != "J" & max != "M" & grade == "A"){
max <- "A"
}
}
return(max)
}
|
ff0016efbe8d0ee31e3b3dcd18fccead89edd73b | 64f4fc78642f0d59d49fc400ad66ce039cdd3fd2 | /eulerProblems.R | 2c1b84188572cafc6b412a8eec19f3da471d6acd | [] | no_license | leguiato/projectEuler | 490f077571c48dd6f2d009b3cdc3268092d7f1aa | 3eb45bd3631d11bd096e3b9af07704ec4ee7098a | refs/heads/master | 2020-05-18T02:11:50.483747 | 2015-01-26T22:51:32 | 2015-01-26T22:51:32 | 29,275,105 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,245 | r | eulerProblems.R | ### Euler Prject Problem 1
##If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
##The sum of these multiples is 23.
##Find the sum of all the multiples of 3 or 5 below 1000.
#load library
source('C:/Users/Tony Leguia/Desktop/projectEuler/eulerLibrary.R')
# solve problem by running eulerProb1(1000)
eulerProb1 <- function(num){
sum = 0;
for(n in 1:num-1){
if(isMultiple3(n)||isMultiple5(n)){
sum = sum + n;
}
}
sum;
}
#Test for Euler Problem 1
testProb1 <- function(){
print(c("Is eulerProb1(10)==23? ", 23==eulerProb1(10)));
print(c("Is eulerProb1(1000)==233168? ", 233168==eulerProb1(1000)));
}
### Euler Project Problem 2
##Each new term in the Fibonacci sequence is generated by adding the previous two terms.
##By starting with 1 and 2, the first 10 terms will be:
## 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
##By considering the terms in the Fibonacci sequence whose values do not exceed
##four million, find the sum of the even-valued terms.
eulerProb2 <- function(num){
fibSeq <- fibonacci(num);
sum = sum(fibSeq[fibSeq%%2==0&fibSeq<4000000])
sum
}
testProb2 <- function(){
print(c("Is eulerProb1(1000000)==4613732? ", 4613732==eulerProb2(800)));
} |
bb4bd003a09885ca72c114a9a37727bb3cc706d5 | 710b9c5b5a125fc072162ba01ae417a46797ba69 | /R/RcppExports.R | a1751ab2bd4678ca9c9bcdccb91e22ff8e84bd35 | [] | no_license | PatrickSaoud/TStools | 5891e9b1f87ec90aa5cb490b5983eccd51b44b14 | 222cc772ef126242548bf80d1fc600787fb53b99 | refs/heads/master | 2021-01-12T20:59:20.421421 | 2015-12-04T02:59:58 | 2015-12-04T02:59:58 | 47,412,476 | 0 | 0 | null | 2015-12-04T15:34:41 | 2015-12-04T15:34:41 | null | UTF-8 | R | false | false | 2,099 | r | RcppExports.R | # This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
initparams <- function(Ttype, Stype, datafreq, obsR, yt, damped, phi, smoothingparameters, initialstates, seasonalcoefs) {
.Call('TStools_initparams', PACKAGE = 'TStools', Ttype, Stype, datafreq, obsR, yt, damped, phi, smoothingparameters, initialstates, seasonalcoefs)
}
etsmatrices <- function(matxt, vecg, phi, Cvalues, ncomponentsR, seasfreq, Ttype, Stype, nexovars, matxtreg, estimpersistence, estimphi, estiminit, estiminitseason, estimxreg) {
.Call('TStools_etsmatrices', PACKAGE = 'TStools', matxt, vecg, phi, Cvalues, ncomponentsR, seasfreq, Ttype, Stype, nexovars, matxtreg, estimpersistence, estimphi, estiminit, estiminitseason, estimxreg)
}
fitterwrap <- function(matxt, matF, matw, yt, vecg, Etype, Ttype, Stype, seasfreq, matwex, matxtreg) {
.Call('TStools_fitterwrap', PACKAGE = 'TStools', matxt, matF, matw, yt, vecg, Etype, Ttype, Stype, seasfreq, matwex, matxtreg)
}
forecasterwrap <- function(matxt, matF, matw, h, Ttype, Stype, seasfreq, matwex, matxtreg) {
.Call('TStools_forecasterwrap', PACKAGE = 'TStools', matxt, matF, matw, h, Ttype, Stype, seasfreq, matwex, matxtreg)
}
errorerwrap <- function(matxt, matF, matw, yt, h, Etype, Ttype, Stype, seasfreq, trace, matwex, matxtreg) {
.Call('TStools_errorerwrap', PACKAGE = 'TStools', matxt, matF, matw, yt, h, Etype, Ttype, Stype, seasfreq, trace, matwex, matxtreg)
}
optimizerwrap <- function(matxt, matF, matw, yt, vecg, h, Etype, Ttype, Stype, seasfreq, trace, CFt, normalizer, matwex, matxtreg) {
.Call('TStools_optimizerwrap', PACKAGE = 'TStools', matxt, matF, matw, yt, vecg, h, Etype, Ttype, Stype, seasfreq, trace, CFt, normalizer, matwex, matxtreg)
}
costfunc <- function(matxt, matF, matw, yt, vecg, h, Etype, Ttype, Stype, seasfreq, trace, CFt, normalizer, matwex, matxtreg, bounds, phi, Theta) {
.Call('TStools_costfunc', PACKAGE = 'TStools', matxt, matF, matw, yt, vecg, h, Etype, Ttype, Stype, seasfreq, trace, CFt, normalizer, matwex, matxtreg, bounds, phi, Theta)
}
|
277f652404ca8f98d02762e99a8db0d79c71ef2e | 67222f69dd1a5b5ced1d28df833a303924dbde35 | /2. Algorithms on Datasets/Hypothesis Testing/LabTAT_HypothesisTesting/LabTAT_HypothesisTesting.R | 1e4a2b80f9d1550651fa732fda6d207da3aaf027 | [] | no_license | mandarmakhi/DataScience-R-code | 4f75906507e303fb9b438b99a5eab0a74bcc77f6 | 8c1728b306e53668b1814283da9936503e0554b9 | refs/heads/master | 2023-01-19T04:55:11.171455 | 2020-11-28T07:59:55 | 2020-11-28T07:59:55 | 263,417,867 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 822 | r | LabTAT_HypothesisTesting.R |
#Hypothesis Testing
#Analyze the data and determine whether there is any difference in average TAT among the different laboratories at 5% significance level
#H0: = 0 (Average TAT are same)
#Ha: !=0 (Average TAT are not same)
LabTAT <- read.csv("C:/Users/Mandar/Desktop/data/assignments/Hypothesis Testing/LabTAT.csv")
View(LabTAT)
lab1 <- LabTAT$Laboratory.1
lab2 <- LabTAT$Laboratory.2
lab3 <- LabTAT$Laboratory.3
lab4 <- LabTAT$Laboratory.4
summary(LabTAT)
sd(lab1)
sd(lab2)
sd(lab3)
sd(lab4)
combined_group <- data.frame(cbind(lab1,lab2,lab3,lab4))
combined_group
stacked_group <- stack(combined_group)
stacked_group
anova_result <- aov(stacked_group$values~. , data = stacked_group)
anova_result
summary(anova_result)
#The p value is less than 0.05 i.e 5% significance level so we reject H0 and the result is
|
91a8debb2e7794816dd861b1251f10b2b65265be | cc740bb752b3fab2d7b7019c7b5a6b9099a99568 | /R/CMRAnnotation.R | d077105c7046d16e86fe720c9258829ff995e489 | [] | no_license | cma2015/PEA | bf7eca23be6cc09308cfc9156ff46b1e4e890b90 | be1f9762f9c6eac526f1e16d7467d2ff2d1df00b | refs/heads/master | 2023-03-05T23:36:50.394537 | 2023-02-27T14:44:11 | 2023-02-27T14:44:11 | 104,353,389 | 5 | 5 | null | null | null | null | UTF-8 | R | false | false | 20,834 | r | CMRAnnotation.R |
#' @export
runTopGO <- function(geneID, statistic = "fisher", algorithm = "elim",
topNodes = 20,
dataset = "athaliana_eg_gene", plot = TRUE){
# if(!require(biomaRt)){
# source("https://bioconductor.org/biocLite.R")
# biocLite("biomaRt")
# }
#
# if(!require(topGO)){
# source("https://bioconductor.org/biocLite.R")
# biocLite("topGO")
# }
mart <- useMart(biomart = "plants_mart", dataset = dataset, host = 'plants.ensembl.org')
GTOGO <- getBM(attributes = c( "ensembl_gene_id", "go_id"), mart = mart)
GTOGO <- GTOGO[GTOGO$go_id != '', ]
geneID2GO <- by(GTOGO$go_id, GTOGO$ensembl_gene_id, function(x) as.character(x))
all.genes <- sort(unique(as.character(GTOGO$ensembl_gene_id)))
int.genes <- geneID
int.genes <- intersect(int.genes, names(geneID2GO))
int.genes <- factor(as.integer(all.genes %in% int.genes))
names(int.genes) = all.genes
go.obj.BP <- new("topGOdata", ontology='BP',
allGenes = int.genes,
annot = annFUN.gene2GO,
gene2GO = geneID2GO)
go.obj.MF <- new("topGOdata", ontology='MF',
allGenes = int.genes,
annot = annFUN.gene2GO,
gene2GO = geneID2GO)
go.obj.CC <- new("topGOdata", ontology='CC',
allGenes = int.genes,
annot = annFUN.gene2GO,
gene2GO = geneID2GO)
##########retrieve the gene list related to a GO ID######################
allGO.BP <- genesInTerm(object = go.obj.BP)
allGO.MF <- genesInTerm(object = go.obj.MF)
allGO.CC <- genesInTerm(object = go.obj.CC)
#########retrive the significant GO terms
results.BP <- runTest(go.obj.BP, algorithm = algorithm, statistic = statistic)
results.tab.BP <- GenTable(object = go.obj.BP, elimFisher = results.BP,
topNodes = topNodes)
gene.BP <- genesInTerm(object = go.obj.BP, whichGO = results.tab.BP$GO.ID)
inter.gene.BP <- lapply(X = gene.BP, FUN = function(x) intersect(x, geneID))
inter.gene.BP <- unlist(lapply(inter.gene.BP, function(x) paste(x, collapse = ';')))
results.tab.BP$significantGene <- inter.gene.BP
if(length(which(results.tab.BP$elimFisher == "< 1e-30")) != 0){
results.tab.BP[which(results.tab.BP$elimFisher == "< 1e-30"), ]$elimFisher <- 1e-30
}
results.MF <- runTest(go.obj.MF, algorithm = algorithm, statistic = statistic)
results.tab.MF <- GenTable(object = go.obj.MF, elimFisher = results.MF,
topNodes = topNodes)
gene.MF <- genesInTerm(object = go.obj.MF, whichGO = results.tab.MF$GO.ID)
inter.gene.MF <- lapply(X = gene.MF, FUN = function(x) intersect(x, geneID))
inter.gene.MF <- unlist(lapply(inter.gene.MF, function(x) paste(x, collapse = ';')))
results.tab.MF$significantGene <- inter.gene.MF
if(length(which(results.tab.MF$elimFisher == "< 1e-30")) != 0){
results.tab.MF[which(results.tab.MF$elimFisher == "< 1e-30"), ]$elimFisher <- 1e-30
}
results.CC <- runTest(go.obj.CC, algorithm = algorithm, statistic = statistic)
results.tab.CC <- GenTable(object = go.obj.CC, elimFisher = results.CC,
topNodes = topNodes)
gene.CC <- genesInTerm(object = go.obj.CC, whichGO = results.tab.CC$GO.ID)
inter.gene.CC <- lapply(X = gene.CC, FUN = function(x) intersect(x, geneID))
inter.gene.CC <- unlist(lapply(inter.gene.CC, function(x) paste(x, collapse = ';')))
results.tab.CC$significantGene <- inter.gene.CC
if(length(which(results.tab.CC$elimFisher == "< 1e-30")) != 0){
results.tab.CC[which(results.tab.CC$elimFisher == "< 1e-30"), ]$elimFisher <- 1e-30
}
if(plot){
df <- data.frame(Category = c(rep("BP", topNodes), rep("CC", topNodes), rep("MF", topNodes)),
x = c(results.tab.BP$Significant, results.tab.CC$Significant,
results.tab.MF$Significant),
y = c(-log10(as.numeric(results.tab.BP$elimFisher)),
-log10(as.numeric(results.tab.CC$elimFisher)),
-log10(as.numeric(results.tab.MF$elimFisher))),
size = c(-log10(as.numeric(results.tab.BP$elimFisher)),
-log10(as.numeric(results.tab.CC$elimFisher)),
-log10(as.numeric(results.tab.MF$elimFisher)))
)
kk <- ggplot(data = df, aes(x = x, y = y)) +
geom_point(aes(color = Category, size = size)) +
scale_size_continuous(range = c(2,10)) +
labs(x = "The number of significant genes", y = "The adjusted p-values for each GO term")
print(kk)
}
results <- list(BP = results.tab.BP, CC = results.tab.CC, MF = results.tab.MF)
results
}
#' @export
searchMotifPos <- function(sequence, motif = "[ag][ag]ac[act]", cenPos = 2){
Seqs <- read.fasta(sequence, as.string = T)
if(length(Seqs) != 1){
resPos <- sapply(Seqs, function(x) words.pos(motif, x))
resPos <- sapply(resPos, function(x) x+cenPos)
}else{
res <- sapply(Seqs, function(x) words.pos(motif, x))
resPos <- list()
if(is.null(dim(res))){
resPos[[names(Seqs)]] <- res[1] + cenPos
}else{
resPos[[names(Seqs)]] <- res[,1] + cenPos
}
}
resPos
}
#' @export
motifScan <- function(sequence, motif = "[ag][ag]ac[act]"){
peakSeq <- read.fasta(file = sequence, as.string = T)
motifPos <- searchMotifPos(sequence = sequence)
kk <- unlist(lapply(X = motifPos, function(x) length(x)))
motifPos <- motifPos[which(kk != 0)]
ll <- 1
resSeq <- list()
for(i in 1:length(motifPos)){
curID <- names(motifPos)[i]
curPos <- motifPos[[i]]
if(length(curPos) != 0){
for(j in 1:length(curPos)){
curSeq <- substr(peakSeq[[curID]], curPos[j]-2, curPos[j]+2)
resSeq[[ll]] <- curSeq
ll <- ll + 1
}
}
}
pfw <- matrix(0, 4, ncol = 5)
rownames(pfw) <- c("a", "c", "g", "t")
for(i in 1:ncol(pfw)){
curSeq <- substr(resSeq, i, i)
curFreq <- table(curSeq)/length(curSeq)
curFreq <- curFreq[which(names(curFreq) != "N")]
pfw[names(curFreq), i] <- curFreq
}
pwm <- makePWM(pfw)
seqLogo(pwm)
pwm
}
#' @export
motifDetect <- function(sequence, plot = T, ...){
sequence <- readDNAStringSet(filepath = sequence, format = 'fasta')
results <- GADEM(sequence, verbose = 1, ...)
motifList <- results@motifList
resList <- list()
for(i in 1:length(motifList)){
resList[[i]] <- motifList[[i]]@pwm
}
if(plot){
for(i in 1:length(resList)){
seqLogo(pwm = resList[[i]])
}
}
resList
}
.getUTR <- function(curID, GTF){
cur5UTRMat <- subset(GTF, GTF$V3 == "five_prime_utr" & GTF$V6 == curID)[,4:5]
# cur5UTRMat <- as.numeric(GTF[which(GTF[,3] == 'five_prime_utr' &
# GTF[,6] == curID), 4:5, drop = FALSE])
cur3UTRMat <- subset(GTF, GTF$V3 == "three_prime_utr" & GTF$V6 == curID)[,4:5]
curIDRange <- subset(GTF, GTF$V3 == "CDS" & GTF$V6 == curID)[,4:5]
if(nrow(cur5UTRMat) >= 1){
len.5 <- sum(cur5UTRMat$V5 - cur5UTRMat$V4 + 1)
}else{
len.5 <- 0
}
if(nrow(cur3UTRMat) >= 1){
len.3 <- sum(cur3UTRMat$V5 - cur3UTRMat$V4 + 1)
}else{
len.3 <- 0
}
cds.len <- sum(curIDRange$V5 - curIDRange$V4 + 1)
if(len.5 != 0){
UTR5Range <- c(1, len.5)
CDSRange <- c((len.5 + 1), (len.5 + cds.len))
}else{
UTR5Range <- c(0, 0)
CDSRange <- c(1, cds.len)
}
if(len.3 != 0){
UTR3Range <- c((cds.len + len.5 + 1), (len.5 + len.3 + cds.len))
}else{
UTR3Range <- c(0, 0)
}
res <- unlist(c(UTR5Range, CDSRange, UTR3Range))
}
#' @export
getUTR <- function(GTF, cpus = 1){
GTF <- read.table(GTF, sep = "\t", header = F,
stringsAsFactors = F, quote = "")
GTF$V3[grep(pattern = "5|five", x = GTF$V3)] <- "five_prime_utr"
GTF$V3[grep(pattern = "3|three", x = GTF$V3)] <- "three_prime_utr"
GTF$V9 <- gsub(pattern = "\"", replacement = "", x = GTF$V9)
curGTF <- subset(GTF, GTF$V3 == "CDS" | GTF$V3 == "five_prime_utr" | GTF$V3 == "three_prime_utr")
curGTF$V6 <- sapply(curGTF$V9, .extractTranscriptID)
transcriptID <- unique(curGTF[,6])
if(cpus == 1){
UTRMat <- t(sapply(X = transcriptID, FUN = .getUTR, GTF = curGTF))
}else{
sfInit(parallel = TRUE, cpus = cpus)
sfExport(".getUTR", namespace = "PEA")
UTRMat <- t(sfSapply(transcriptID, .getUTR, GTF = curGTF))
sfStop()
}
UTRMat <- cbind(rownames(UTRMat), UTRMat)
colnames(UTRMat) <- c("cDNA_ID",
"five_UTR_Start",
"five_UTR_End",
"CDS_Start",
"CDS_End",
"three_UTR_Start",
"three_UTR_End")
rownames(UTRMat) <- UTRMat[,1]
UTRMat <- UTRMat[, -1]
UTRMat
}
.G2T <- function(posVec, exonGTF){
curCHR <- posVec[1]
curExon <- exonGTF[which(exonGTF[,1] == curCHR), ]
curPos1 <- as.numeric(posVec[2])
curPos2 <- as.numeric(posVec[3])
index1 <- which((as.numeric(curExon[,4]) <= curPos1) & (as.numeric(curExon[,5]) >= curPos1))
index2 <- which((as.numeric(curExon[,4]) <= curPos2) & (as.numeric(curExon[,5]) >= curPos2))
index <- unique(c(index1, index2))
if(length(index) == 0){
resMat <- NULL
}else{
resSamples1 <- NULL
resSamples2 <- NULL
resMat <- matrix(NA, nrow = length(index), ncol = 3)
for(j in 1:length(index)){
curidx <- index[j]
curExonParen <- curExon[curidx, 6]
curStrand <- curExon[curidx, 7]
curExonStart <- as.numeric(curExon[curidx, 4])
curExonStop <- as.numeric(curExon[curidx, 5])
curTranscript <- curExon[which(curExon[,6] == curExonParen), , drop = FALSE]
if(curStrand == "-"){
curTranscript <- curTranscript[order(as.numeric(curTranscript[,4]), decreasing = T), , drop = FALSE]
}
if(is.element(curidx, index1) & is.element(curidx, index2)){
curExonidx1 <- which(as.numeric(curTranscript[,4]) <= curPos1 & as.numeric(curTranscript[,5]) >= curPos1)
curExonidx2 <- which(as.numeric(curTranscript[,4]) <= curPos2 & as.numeric(curTranscript[,5]) >= curPos2)
if(curStrand == "-"){
tmpPos1 <- as.numeric(curTranscript[curExonidx1, 5]) - curPos2 + 1
tmpPos2 <- as.numeric(curTranscript[curExonidx2, 5]) - curPos1 + 1
}else{
tmpPos1 <- curPos1 - as.numeric(curTranscript[curExonidx1, 4]) + 1
tmpPos2 <- curPos2 - as.numeric(curTranscript[curExonidx2, 4]) + 1
}
ExonLen <- as.numeric(curTranscript[,5]) - as.numeric(curTranscript[,4]) + 1
ExonLen <- c(0, ExonLen)
resPos1 <- sum(ExonLen[1:curExonidx1]) + tmpPos1
resPos2 <- sum(ExonLen[1:curExonidx2]) + tmpPos2
# resSamples1 <- c(resSamples1, paste0(curExonParen, "_", as.numeric(resPos1)))
# resSamples2 <- c(resSamples2, paste0(curExonParen, "_", as.numeric(resPos2)))
}else if(is.element(curidx, index1)){
curExonidx1 <- which(as.numeric(curTranscript[,4]) <= curPos1 & as.numeric(curTranscript[,5]) >= curPos1)
curExonidx2 <- curExonidx1
if(curStrand == "-"){
tmpPos1 <- as.numeric(curTranscript[curExonidx1, 5]) - curPos1 + 1
tmpPos2 <- 0
}else{
tmpPos1 <- curPos1 - as.numeric(curTranscript[curExonidx1, 4]) + 1
tmpPos2 <- as.numeric(curTranscript[curExonidx2, 5]) - as.numeric(curTranscript[curExonidx2, 4]) + 1
}
ExonLen <- as.numeric(curTranscript[,5]) - as.numeric(curTranscript[,4]) + 1
ExonLen <- c(0, ExonLen)
resPos1 <- sum(ExonLen[1:curExonidx1]) + tmpPos1
resPos2 <- sum(ExonLen[1:curExonidx2]) + tmpPos2
# resSamples1 <- c(resSamples1, paste0(curExonParen, "_", as.numeric(resPos1)))
# resSamples2 <- c(resSamples2, paste0(curExonParen, "_", as.numeric(resPos2)))
}else{
curExonidx2 <- which(as.numeric(curTranscript[,4]) <= curPos2 & as.numeric(curTranscript[,5]) >= curPos2)
curExonidx1 <- curExonidx2
if(curStrand == "-"){
tmpPos1 <- as.numeric(curTranscript[curExonidx1, 5]) - as.numeric(curTranscript[curExonidx1, 4]) + 1
tmpPos2 <- as.numeric(curTranscript[curExonidx2, 5]) - curPos2 + 1
}else{
tmpPos1 <- 0
tmpPos2 <- curPos2 - as.numeric(curTranscript[curExonidx2, 4]) + 1
}
ExonLen <- as.numeric(curTranscript[,5]) - as.numeric(curTranscript[,4]) + 1
ExonLen <- c(0, ExonLen)
resPos1 <- sum(ExonLen[1:curExonidx1]) + tmpPos1
resPos2 <- sum(ExonLen[1:curExonidx2]) + tmpPos2
}
resStart <- min(c(resPos1, resPos2))
resStop <- max(c(resPos1, resPos2))
resMat[j,] <- c(curExonParen, resStart, resStop)
}
}
resMat
}
.extractTranscriptID <- function(x){
res <- unlist(strsplit(x, ";"))
idx <- grep("transcript_id", res)
res <- res[idx]
resID <- gsub(pattern = "transcript_id| ", replacement = "", x = res)
resID
}
#' @export
G2T <- function(bedPos, GTF){
GTF <- read.table(GTF, sep = "\t", quote = "", header = F, stringsAsFactors = F)
interChr <- intersect(GTF[,1], bedPos[,1])
if(length(interChr) == 0){
cat("The chromosomes are not consistent in the GTF and bedPos, please provide right input!")
return(NULL)
}
GTF$V9 <- gsub(pattern = "\"", replacement = "", x = GTF$V9)
exonGTF <- GTF[which(GTF$V3 == "exon"),]
exonGTF[,6] <- apply(exonGTF[, 9, drop = FALSE], 1, .extractTranscriptID)
if(ncol(bedPos) > 3){
bedPos <- bedPos[,1:3]
}
resPos <- apply(bedPos, 1, .G2T, exonGTF = exonGTF)
resPos <- do.call(rbind, resPos)
resPos
}
.geneID <- function(GTF){
GTF <- read.table(file = GTF, header = F, sep = "\t", stringsAsFactors = F, quote = "")
GTF$V9 <- gsub(pattern = "\"", replacement = "", x = GTF$V9)
GTF <- subset(x = GTF, GTF$V3 == "transcript")
curMat <- strsplit(GTF$V9, ";")
curMat <- lapply(curMat, FUN = function(x) x[1:2])
curMat <- do.call(what = rbind, curMat)
curMat[,1] <- substr(curMat[,1], 9, nchar(curMat[,1]))
curMat[,2] <- substr(curMat[,2], 15, nchar(curMat[,2]))
resMat <- curMat[,1:2]
resMat[,1] <- gsub(pattern = " ", replacement = "", resMat[,1])
resMat[,2] <- gsub(pattern = " ", replacement = "", resMat[,2])
resMat
}
.T2G <- function(transcript, geneMat){
if(is.element(transcript, rownames(geneMat))){
res <- geneMat[transcript, 1]
}else{
res <- NA
}
res
}
#' @export
CMRAnnotation <- function(cmrMat = NULL, genomic = F, UTRMat = NULL, GTF = NULL, SNR = T,
annotation = c("location", "motifScan", "motifDetect", "GO"),
cmrSeq = NULL, RNAseq = NULL, motifPos = NULL, plot = T, ...){
if(length(annotation) > 1){
cat("Warnings: multiple annotation was provided, the first one will be used!")
annotation <- annotation[1]
}
if(genomic){
cmrMat <- G2T(bedPos = cmrMat, GTF = GTF)
#peakMat <- do.call(peakMat, rbind)
}else{
cmrMat <- cmrMat
}
if(annotation == "location"){
if(is.null(GTF)){
stop("Please provide the GTF!", "\n")
}
geneID <- .geneID(GTF = GTF)
geneID <- unique(geneID)
geneID[,2] <- gsub(" ", "", x = geneID[,2], fixed = TRUE)
rownames(geneID) <- geneID[,2]
if(is.null(UTRMat)){
UTRMat <- getUTR(GTF = GTF)
}
class(UTRMat) <- "numeric"
if(!SNR){
if(is.null(RNAseq)){
stop("Please provide the RNA sequence!")
}
peaks <- cmrMat
if(is.null(motifPos)){
motifPos <- searchMotifPos(sequence = RNAseq, ...)
}
.convertSNR <- function(inputVec, motifPos){
curID <- inputVec[1]
curStart <- as.numeric(inputVec[2])
curEnd <- as.numeric(inputVec[3])
curMotif <- motifPos[[curID]]
curPos <- curMotif[which(curMotif >= curStart & curMotif <= curEnd)]
tmpMat <- matrix(NA, nrow = length(curPos), 2)
tmpMat[,1] <- curID
tmpMat[,2] <- curPos
tmpMat
}
resPosMat <- apply(cmrMat, 1, .convertSNR, motifPos = motifPos)
resPosMat <- do.call(rbind, resPosMat)
colnames(resPosMat) <- c("IDs", "Position")
peakMat <- resPosMat
}else{
peakMat <- cmrMat
}
################CMR distribution################################
.UTRPosition <- function(inputVec, UTRMat){
posSampleID <- inputVec[1]
posSamplePos <- as.numeric(inputVec[2])
curUTR <- as.numeric(UTRMat[posSampleID, ])
if(posSamplePos >= curUTR[5] & posSamplePos <= curUTR[6]){
res <- "three prime UTR"
}else if(posSamplePos >= curUTR[3] & posSamplePos <= curUTR[4]){
res <- "CDS"
}else{
res <- "five prime UTR"
}
res
}
peakMat <- peakMat[which(peakMat[,1] %in% rownames(UTRMat)),]
finalPosition <- apply(peakMat, 1, .UTRPosition, UTRMat = UTRMat)
peakMat <- cbind(peakMat, finalPosition)
resPos <- table(finalPosition)
########################CMR normalized distribution################
.normalizeDis <- function(inputVec, UTRMat){
curID <- inputVec[1]
curPos <- as.numeric(inputVec[2])
curRegion <- inputVec[3]
if(curRegion == "five prime UTR"){
curRes <- curPos/UTRMat[curID, 2]
}else if(curRegion == "CDS"){
curRes <- (curPos - UTRMat[curID,2] + 1)/((UTRMat[curID, 4] - UTRMat[curID, 3]) + 1) + 1
}else{
curRes <- (curPos - UTRMat[curID,5] + 1)/((UTRMat[curID, 6] - UTRMat[curID, 5]) + 1) + 2
}
curRes
}
positive.sample.position <- apply(peakMat, 1, .normalizeDis, UTRMat = UTRMat)
if(plot){
if(SNR){
par(mfrow = c(1,3))
par(mar=c(2, 2, 2, 2))
pie(resPos, col = c('yellow', "green", 'red'), labels = names(resPos),
main = "CMR distribution along the transcript")
plot(density(positive.sample.position), main =
"Distribution of CMR in the cDNA",
col = "red", lwd = 2, xaxt = "n")
rug(seq(0, 1, 0.001), col = "lightgreen")
rug(seq(1, 2, 0.001), col = "cadetblue3")
rug(seq(2, 3, 0.001), col = "red")
legend("topleft", col = c("lightgreen", "cadetblue3", "red"), lwd = c(5, 5, 5),
legend = c("5'UTR", "CDS", "3'UTR"))
aa <- table(peakMat[,1])
bb <- table(aa)
barplot(bb, col = rainbow(length(bb)),
main = "Transcripts with different CMRs.")
}else{
par(mfrow = c(2,2))
par(mar=c(2, 2, 2, 2))
pie(resPos, col = c('yellow', "green", 'red'), labels = names(resPos),
main = "CMR distribution along the transcript")
plot(density(positive.sample.position), main =
"Distribution of CMR in the cDNA",
col = "red", lwd = 2, xaxt = "n")
rug(seq(0, 1, 0.001), col = "lightgreen")
rug(seq(1, 2, 0.001), col = "cadetblue3")
rug(seq(2, 3, 0.001), col = "red")
legend("topleft", col = c("lightgreen", "cadetblue3", "red"), lwd = c(5, 5, 5),
legend = c("5'UTR", "CDS", "3'UTR"))
.peakmotifNumber <- function(inputVec, motifPos){
curID <- inputVec[1]
curMotif <- motifPos[[curID]]
motifNumber <- length(which(curMotif >= as.numeric(inputVec[2]) & curMotif <= as.numeric(inputVec[3])))
motifNumber
}
peakMotifNumber <- apply(cmrMat, 1, .peakmotifNumber, motifPos = motifPos)
tt <- table(peakMotifNumber)
barplot(tt, col = rainbow(length(tt)), main = "Peaks with different CMRs")
aa <- table(peakMat[,1])
bb <- table(aa)
barplot(bb, col = rainbow(length(bb)), main = "Transcripts with different CMRs.")
}
}
resList <- list(cmrMat = cmrMat, position = finalPosition,
distribution = positive.sample.position)
return(resList)
}
if(annotation == "motifScan"){
if(is.null(cmrSeq)){
stop("Please provide the CMR-related sequences.")
}
results <- motifScan(sequence = cmrSeq, ...)
return(results)
}
if(annotation == "motifDetect"){
if(is.null(cmrSeq)){
stop("Please provide the CMR-related sequences.")
}
results <- motifDetect(sequence = cmrSeq, ...)
return(results)
}
if(annotation == "GO"){
if(is.null(GTF)){
stop("Please provide the GTF file name!")
}
geneID <- .geneID(GTF = GTF)
resGene <- geneID[which(geneID[,2] %in% cmrMat[,1]),1]
resGO <- runTopGO(geneID = resGene, ...)
return(resGO)
}
resList
}
|
9a0e9a556e85181d15f5a21f4ee1dd534514e69c | 2319dc0887e4e1f7d0848b1276838a30166030fd | /data-raw/books_data.R | 884a7cb7d8a22f8b6bc80364365d85642595bb35 | [
"MIT"
] | permissive | TNaidoo/booksr | b6499e27e4a374ff280da15df2a311ecc304f657 | c00e8024d36a967de090f29cef528cb805036b21 | refs/heads/main | 2023-01-11T11:38:44.069573 | 2020-11-20T16:58:29 | 2020-11-20T16:58:29 | 314,609,664 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 530 | r | books_data.R | ## code to prepare `books_data` dataset goes here
library(dplyr)
books_data <- utils::read.csv("books.csv", header = TRUE) %>%
dplyr::mutate(average_rating = as.numeric(as.character(average_rating)),
num_pages = as.numeric(num_pages),
ratings_count = as.numeric(ratings_count),
text_reviews_count = as.numeric(text_reviews_count),
publication_date = as.Date(publication_date, tryFormats = c("%m/%d/%Y")))
usethis::use_data(books_data, overwrite = TRUE)
|
0c76e5a6b3af24540c1eae36ae1d7a999c3e1ae0 | bb9741193ef932fcf02b1d821921f1116ba36fbd | /SGDCI_R_package/R/SARRegression2.R | a3f8ee4210ddc295e1c6ea5183a8aea85fd4ebb3 | [] | no_license | ganluannj/Spatial_SGD_Inference | 1213b6f0544265de9aef55f563ee71cb6675ff8e | a83edc1aee6a1858942936519c7d2258be84a154 | refs/heads/master | 2023-04-24T08:30:29.794070 | 2021-05-11T14:09:35 | 2021-05-11T14:09:35 | 265,364,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,067 | r | SARRegression2.R |
#' @name SAR
#' @title parameter estimation and confidence interval construction for spatial autoregressive (SAR) model with SGD
#' @description This for applying stochastic gradient descent (SGD) for parameter estimation of
#' SAR model. \code{SGD} apply SGD for parameter estimation and \code{SGDCI}
SARSGD<-function(X, Y, W, lr0, C=2/3, burnin=0.2, K=30){
# X is the covariates matrix wtih size of N*p
# Y is the matrix for response variable with size of N*1
# W is the neighborhood matrix
# lr0 is the initial learning rate
# and learning rate for kth data is lr0*C^(-k)
# burnin is the proportion of interation discard
# get the sample size
N = dim(X)[1]
p = dim(X)[2]
# get Wdialist
Wdialist=Powerdiagnoal(W, K)
# initialize parameters
set.seed(10)
beta0=runif(p)
rho0=runif(1)
sigsq0 = runif(1,0,1)
phi0=log(sigsq0)
theta0=asin(rho0)
Par0=c(beta0, sigsq0, rho0, phi0, theta0)
Parlist=matrix(rep(0, (N+1)*(p+4)), nrow=p+4)
# set the first column of Parlist be Par0
Parlist[,1]=Par0
LRSGD=lr0*(1:N)^(-C)
# SGD process
for (j in 1:N){
LR=LRSGD[j]
Parlist[,j+1]=Update(Par=Parlist[,j], W=W, Wdialist = Wdialist,
X=X, Y=Y, LR=LR, j=j, K=K)
}
# remove the burnin
Nburn=as.integer(N*burnin)
Parlist=Parlist[,(Nburn+2):(N+1)]
Estimate=rowMeans(Parlist)
parameters=c()
for (i in 0:(p-1)){
parameters=c(parameters, paste0('beta',toString(i)))
}
parameters=c(parameters, 'sigmasq', 'rho')
Result=data.frame(parameters=parameters, estimate=Estimate[1:(p+2)])
return (Result)
}
SARSGDCI<-function(X, Y, W, lr0, C=2/3, burnin=0.2, K=30, B=200,
per_dis='Poi',CI.level=0.95, CI.method='sd'){
# get the sample size
N = dim(X)[1]
p = dim(X)[2]
# get Wdialist
Wdialist=Powerdiagnoal(W, K)
# initialize parameters
set.seed(10)
beta0=runif(p)
rho0=runif(1)
sigsq0 = runif(1,0,1)
phi0=log(sigsq0)
theta0=asin(rho0)
Par0=c(beta0, sigsq0, rho0, phi0, theta0)
ParCI0=rep(Par0,B+1)
ParCIlist=matrix(rep(0, (N+1)*(p+4)*(B+1)), ncol=N+1)
ParCIlist[,1]=ParCI0
LRSGD=lr0*(1:N)^(-C)
# SGDCI process
for (j in 1:N){
LR=LRSGD[j]
if(per_dis=='Poi'){
faclist=c(1, rpois(B, 1))
}
if (per_dis=='exp'){
faclist=c(1,rexp(B,1))
}
ParCIlist[,j+1]=Updateper(ParCI=ParCIlist[,j], W=W, Wdialist = Wdialist,
X=X, Y=Y, LR=LR, j=j, K=K, faclist = faclist)
}
# remove the burnin
Nburn=as.integer(N*burnin)
ParCIlist=ParCIlist[,(Nburn+2):(N+1)]
# get the estimate
Estimatelist=ParCIlist[1:(p+4),]
Estimate=rowMeans(Estimatelist)
# remove the last two estimates (phi and theta)
Estimate=Estimate[1:(p+2)]
# get the ParCIlist for CI construction
ParCIlist=ParCIlist[(p+5):((p+4)*(B+1)),]
# first get the mean for each perturbed estiamte
ParCImean=rowMeans(ParCIlist)
ParCImean=matrix(ParCImean, nrow=p+4, byrow = FALSE)
# getrid of the last two rows
ParCImean=ParCImean[1:(p+2),]
if (CI.method=='sd'){
# get the sample standard deviation from perturbed estimates
SD=apply(X=ParCImean, MARGIN = 1, FUN='sd')
# get z score
z=qnorm(1-(1-CI.level)/2)
# lower bound
lower=Estimate - z*SD
# upper bound
upper=Estimate + z*SD
}
if (CI.method=='quantile'){
lo = (1-CI.level)/2
up = CI.level+lo
# lower bound
lower = 2*Estimate - apply(X=ParCImean,
MARGIN = 1, FUN=function(x) Quantile(x,up))
# upper bound
upper = 2*Estimate - apply(X=ParCImean,
MARGIN = 1, FUN=function(x) Quantile(x,lo))
}
# create a vector for parameter names
parameters=c()
for (i in 0:(p-1)){
parameters=c(parameters, paste0('beta',toString(i)))
}
parameters=c(parameters, 'sigmasq','rho')
SGDEstimate=data.frame(parameters=parameters, estimate=Estimate)
CI=data.frame(parameters=parameters, lowbd=lower, upbd=upper, conf_level = rep(CI.level, p+2))
return(list(SGDEstimate, CI))
}
###########################################################
##### function to update estimate #########
############################################################
Update<-function (Par, W, Wdialist, X, Y, LR, j, K=K, fac=1){
# par is a vector with length p+4
# par[1:p] is betaest
# par[p+1] is sigsqest
# par[p+2] is rhoest
# par[p+3] is phiest
# par[p+4] is thetaest
# W neighborhood matrix
# Wdialist is a list of vectors,
# with each vector is the ith diagnoal element of W^k
# X is the dataset of size N*p
# Y is a response variable of size N*1
# LR is the learning rate
# j is the jth data of the whole dataset
# K is the number of terms used to calcuate dignoal element of A^(-1)
p=length(Par)-4
betaest=Par[1:p]
sigsqest=Par[p+1]
rhoest=Par[p+2]
phiest=Par[p+3]
thetaest=Par[p+4]
# generate rholist
rholist=Rholistgen(rhoest, K)
# calculate the jth diagnoal element of A^(-1)
# InvAjj=sum(rholist*Wdialist[[j]])
InvAjj = InvAdig(rholist, Wdialist, j)
# calculate weightmean of neighbors
Neighmean = W[j,]%*%Y
# update beta
littlex=X[j,]
sqpart=(Y[j] - rhoest*Neighmean-sum(littlex*betaest))[1,1]
derbeta= sqpart*littlex/sigsqest
betaesttemp=betaest+LR*derbeta*fac
# update sigmasq/phi
dersigsq=-1/(2*sigsqest) + 1/(2*sigsqest**2)*sqpart**2
derphi=dersigsq*exp(phiest)
phiest=phiest+LR*derphi*fac
# update rho/theta
derrho=-(1/rhoest*InvAjj-1/rhoest)+1/sigsqest*sqpart*Neighmean
dertheta=derrho*cos(thetaest)
thetaest=thetaest+LR*dertheta*fac
# update rho, beta, and sigsq
Par2=rep(0,p+4)
Par2[1:p]=betaesttemp
Par2[p+1]=exp(phiest)
Par2[p+2]=sin(thetaest)
Par2[p+3]=phiest
Par2[p+4]=thetaest
return (Par2)
}
###########################################################
##### function to update perturbed estimate #########
############################################################
Updateper<-function(ParCI, W, Wdialist, X, Y, LR, j, K=K, faclist){
# faclist is the list of perturbation parameters, faclist[1]=1
# Q=length(faclist)
# Par is a vector containing all parameters
# ParCI is a vector containing Q Par
# with first Par corresponds to regular SGD
# and the rest correspond to perturbed estimates
Q=length(faclist)
Qk=length(ParCI)
k=as.integer(Qk/Q) # size of the parameter
ParCI2=rep(0,Qk)
for (i in 1:Q){
ParCI2[(k*(i-1)+1):(k*i)]=Update(Par=ParCI[(k*(i-1)+1):(k*i)],W=W, X=X, Y=Y,
Wdialist=Wdialist, LR=LR, j=j, K=K, fac=faclist[i])
}
return (ParCI2)
}
###########################################################
##### function to generate digonal element of W^k #########
############################################################
Powerdiagnoal<-function(W, K){
# W is a neighborhood matrix of size N by N
# K is an integer
# let I be the identity matrix of size N by N
# we have to calculate I W W^2 W^3 ... W^(K-1)
# it returns a list of vectors, the list is of length K
# the ith element of the diagonal elements of W^(i-1)
N = length(W[1,])
L = list()
L[[1]]=rep(1,N)
temp = diag(N)
for (i in 2:K){
temp = temp%*%W
L[[i]]=diag(temp)
}
return (L)
}
###########################################################
##### function to generate a vector of rho^k #########
############################################################
Rholistgen<-function(Rho, K){
# this function will generate a vector of length N
# the ith element of the list is Rho^(i-1)
V<-rep(1,K)
for (i in 2:K){
V[i]=Rho*V[i-1]
}
return (V)
}
InvAdig<-function(Rholist, Wdialist,j){
K=length(Rholist)
Sum=0
for (i in 1:K){
Sum=Sum+Rholist[i]*Wdialist[[i]][j]
}
return (Sum)
}
Quantile<-function(x, q){
# order x by accending order
# get the upper q quantile of x
# for example 5% quantile, is the top 5% smallest number in x
x<-x[order(x)]
N=as.integer(length(x)*q)
return(x[N])
}
Neigb<-function(N){
W=Matrix(nrow=N*N,ncol=N*N,data=0,sparse=TRUE)
for(i in 2:N){
W[i-1,i]=W[i,i-1]=1
}
for (i in (N+1):(N*N)){
if (i%%N==1){
W[i-N,i]=W[i,i-N]=1
}
else{
W[i-N,i]=W[i,i-N]=1
W[i-1,i]=W[i,i-1]=1
}
}
return (W/rowSums(W))
}
###########################################################
N=100
rho=0.3
beta0=0.5
beta1=0.5
beta2=-0.5
sigsq=1
W=Neigb(N**0.5)
Inverse<-function(W, rho,K=50){
N=as.integer(length(W)**0.5)
Rholist<-Rholistgen(rho, K)
library(Matrix)
Temp=sparseMatrix(i=1:N, j=1:N, x=rep(1,N))
Result=Temp
for (i in 2:K){
Temp=Temp%*%W
Result=Result+Temp*Rholist[i]
}
return (Result)
}
A=diag(N)-rho*W
InvA=Inverse(W, rho)
x0=rep(1,N)
set.seed(10)
x1=runif(N,-1,1)
x2=runif(N,-1,1)
X=matrix(c(x0,x1,x2),nrow=N, byrow = FALSE)
beta=matrix(c(beta0,beta1,beta2),nrow = 3)
Xbeta=X%*%beta
set.seed(10)
ytilde=rnorm(N, 0, sigsq^(1/2))
ytilde=matrix(ytilde,nrow=N)
ytilde=ytilde+Xbeta
Y=InvA%*%ytilde
#
S1=Sys.time()
SARSGD(X, Y, W, lr0=0.2, C=2/3, burnin=0.2, K=30)
Sys.time()-S1
S2=Sys.time()
SARSGDCI(X, Y, W, lr0=0.2)
Sys.time()-S2
##############################################################
###### Constructing W for all data points are neighbors ######
##############################################################
N=9
ln=matrix(rep(1,N),nrow=1)
W=(t(ln)%*%ln-diag(N))/(N-1)
W_=Neigb(N**0.5)
Statdist<-function(W0){
# find the stationary distribution for transition matrix W
N<-length(W0[1,])
I_W<-diag(N)-W0
A<-rbind(I_W,rep(1,N))
b=matrix(c(rep(0,N),1),ncol=1)
ATA=t(A)%*%A
InvATA=solve(ATA)
ATb=t(A)%*%b
return (InvATA%*%ATb)
}
Limit<-function(W0){
W5=W0%*%W0%*%W0%*%W0%*%W0
W10=W5%*%W5
W20=W10%*%W10
W40=W20%*%W20
W80=W40%*%W40
W160=W80%*%W80
W200=W160%*%W40
W400=W200%*%W200
return (W200)
}
Limitcheck<-function(W0,k=1){
# W0 is the limit distribution
# k the step jump k=1 or k=2
N=length(W0[1,])
i=1
while (i+k<=N){
if (sum(abs(W0[i,]-W0[i+k,])>0.0001)>0){
return (FALSE)
}
i=i+1
}
return (TRUE)
}
Statdist(W_)
Limit(W_)
|
b0ed4de1f4c5044d88cd4bf13552c8e9907364b2 | a11e5a6b5f597027117e5c006baec6f031914e70 | /Functions/the311/R/query.R | 4f2a768375f4551a489a0a7507b0c6d7ab00a88b | [] | no_license | jverzani/RSquareEdge | 667b9bcf2180c28432e41a62efdd118c878c36f2 | 4e587a009ad21f05ce2dc02cbde076270e47f66c | refs/heads/master | 2020-12-09T02:59:47.373824 | 2016-08-13T17:15:49 | 2016-08-13T17:15:49 | 52,087,183 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,163 | r | query.R | ##' @include utils.R
NULL
## Make queries a bit easier
## used through `soda` interface
Soda <- function(x,var,...) UseMethod("Soda")
Soda.character <- function(val, var) {
sprintf("%s in(%s)", var, paste(shQuote(x), collapse = ", "))
}
## difference ".character", val is first, not second now (S3 concessions)
Soda.POSIXct <- function(vals, var) {
if (length(vals) > 1) vals <- vals[1:2]
compare_with <- ifelse(length(vals) == 1, ">=", "between")
sprintf("%s %s %s", var, compare_with, paste(shQuote(x), collapse=" and "))
}
##' Create a BBOX object to define a bounding box based on latitudes and longitudes
##'
##' @param lats latitudes
##' @param lons longitudes
##' @export
bbox <- function(lats, lons) {
lats <- range(as.numeric(lats), na.rm=TRUE)
lons <- range(as.numeric(lons), na.rm=TRUE)
out <- c(lats[1], lons[1], lats[2], lons[2])
class(out) <- c("BBOX", class(out)) ## <--- why?
out
}
## By adding a class we can now do:
Soda.BBOX <- function(b, var) {
sprintf("within_box(%s, %2.6f, %2.6f, %2.6f, %2.6f)", var, b[1], b[2], b[3], b[4])
}
## How to combine queries? Natural to use `&` or `|`
## For that we can define these for a query class -- but as of now our `soda`
## outputs are just characters.
## Let's adjust that:
##' Constructor to create Query class object
Query <- function(x) {
class(x) <- c("Query", class(x))
x
}
##' Create a query using dispatch to control how
##'
##' @param x: value to dispatch on
##' @param var: a field name
##'
##' Examples: soda(c(now()-days(2), now()), "created_date") -- records within a time window
##' soda(bbox(lats, lons), "location") -- return records within bounding box
##' soda("Noise - Residential", "complaint_type") -- match this complaint_type
##'
##' Queries can be combined with `&` or `|`
##' @export
soda <- function(x, var, ...) {
if (length(var) > 1)
warning("Only one variable at a time, first one being used.")
Query(Soda(x, verify_name(var[1]), ...))
}
## combine queries logically
"&.Query" <- function(x, y) Query(paste(x, y, sep=" and "))
"|.Query" <- function(x, y) Query(paste(x, y, sep=" or "))
|
52645783260da98b595db408dfe0492e1a08d343 | 29585dff702209dd446c0ab52ceea046c58e384e | /CEC/inst/cec_tests/energy.calculation.test.mouseset1.R | cdbecf7ccabf4e2dc3fe950d5582e65f2f869a5f | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,371 | r | energy.calculation.test.mouseset1.R | testname <- "Energy calculation (mouseset1)"
setup <- function()
{
B <- as.matrix(read.table(system.file("cec_tests", "mouse1.data", package="CEC")))
}
test.type.covariance <- function()
{
given.cov = matrix(c(2,1,1,3), 2,2)
expected.energy <- 3.540174056
CE <- cec(B, centers=1, type="cov", param = given.cov, iter.max=0)
CEC:::checkNumericVectorEquals(expected.energy, CE$cost[1], msg="Energy")
}
test.type.fixedr <- function()
{
r <- 1.5
expected.energy <- 3.416637007
CE <- cec(B, centers=1, type="fix", param = 1.5, iter.max=0)
CEC:::checkNumericVectorEquals(expected.energy, CE$cost[1], msg="Energy")
}
test.type.spherical <- function()
{
expected.energy <- 3.403158062
CE <- cec(B, centers=1, type="sp", iter.max=0)
CEC:::checkNumericVectorEquals(expected.energy, CE$cost[1], msg="Energy")
}
test.type.diagonal <- function()
{
expected.energy <- 3.396500695
CE <- cec(B, centers=1, type="diag", iter.max=0)
CEC:::checkNumericVectorEquals(expected.energy, CE$cost[1], msg="Energy")
}
test.type.all <- function()
{
expected.energy <- 3.396472329
CE <- cec(B, centers=1, type="all", iter.max=0)
CEC:::checkNumericVectorEquals(expected.energy, CE$cost[1], msg="Energy")
}
|
3812b721bdc25c9fec2af48dbcd523abbe14f89f | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.customer.engagement/man/connect_update_user_identity_info.Rd | 716b4507e45c79ad4eda538ca79bdf6cfdb9523c | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 909 | rd | connect_update_user_identity_info.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_update_user_identity_info}
\alias{connect_update_user_identity_info}
\title{Updates the identity information for the specified user}
\usage{
connect_update_user_identity_info(IdentityInfo, UserId, InstanceId)
}
\arguments{
\item{IdentityInfo}{[required] The identity information for the user.}
\item{UserId}{[required] The identifier of the user account.}
\item{InstanceId}{[required] The identifier of the Amazon Connect instance. You can \href{https://docs.aws.amazon.com/connect/latest/adminguide/find-instance-arn.html}{find the instance ID}
in the Amazon Resource Name (ARN) of the instance.}
}
\description{
Updates the identity information for the specified user.
See \url{https://www.paws-r-sdk.com/docs/connect_update_user_identity_info/} for full documentation.
}
\keyword{internal}
|
31930a67c31357532fdf182272ae3066af9f0350 | 7cdfd206317b4b4d8acb8dd474c2672f87b54d98 | /plot3.R | 05075bbd88b5b8cf915b6bfbb638f121c11a4a3e | [] | no_license | decipleme/ExData_Plotting1 | 0f6c369951c25fa04dc49049291ad7642fd08049 | 7a3ae5527c2b7a731d71460c7fcef670fafc0997 | refs/heads/master | 2020-12-28T04:36:48.508781 | 2015-11-08T19:57:48 | 2015-11-08T19:57:48 | 45,795,302 | 0 | 0 | null | 2015-11-08T19:28:53 | 2015-11-08T19:28:52 | null | UTF-8 | R | false | false | 1,157 | r | plot3.R |
{
# read the given data
data <- read.table("household_power_consumption.txt", sep = ";",
skip = 66637, nrows = 2880, na.strings = "?")
colnames(data) <- c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
# change the class types of Date and Time Columns
time <- paste(as.character(data[,1]), as.character(data[,2]))
time <- strptime(time, format = "%d/%m/%Y %H:%M:%S")
# create a png file with default 480x480 size
png(filename = "plot3.png")
# plot the graph
par(mfrow = c(1,1))
#1
plot(time, data$Sub_metering_1, type = "n",
xlab = "", ylab = "Energy sub metering")
lines(time, data$Sub_metering_1, type = "l")
#2
lines(time, data$Sub_metering_2, type = "l", col = "red")
#3
lines(time, data$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1, col = c("black", "red", "blue"))
dev.off()
}
|
ce801db33bd109fff172f9c54e2ea4d147b57bbf | ce9bdf8164761b42efe0708bfe27e19da9744b39 | /DataExtraction.R | 374b28f094f0c7c68ca0385a60139ef1f4aa8431 | [] | no_license | Dilanka-Niro/Hydrology | 4f9959ce3a8c447e65eec66251026a608d3c0577 | ad9f9fa98e496a8c126dfbf2213e9d455854c516 | refs/heads/master | 2022-11-12T10:48:19.687269 | 2020-06-24T20:20:55 | 2020-06-24T20:20:55 | 276,183,519 | 0 | 0 | null | 2020-06-30T18:53:42 | 2020-06-30T18:53:41 | null | UTF-8 | R | false | false | 3,323 | r | DataExtraction.R | -------------------------------------EXTRACT AND TIDY CANADIAN HYDROMETRIC DATA------------------------------------------
#Set working directory -- this is a folder/place where all files from this project is saved
#Use getwd() to find filepath then enter it within the "" in line 5
work_directory <- "/Users/celynkhoo/R Projects/Hydrology"
setwd(work_directory)
library(tidyhydat)
library(dplyr) #data manipulation package
library(ggplot2) #data visualization tool
library(lubridate) #aids with date and time in R
----------------------------------------------GETTING HISTORICAL DATA---------------------------------------------------
download_hydat() #downloads HYDAT database, the Canadian National Water Data Archive, data here has been validated/corrected
NL_stns <- hy_stations(prov_terr_state_loc = "NL") #Finding station information and numbers for NL regional data
#Pulling data from the stations you want. Things you would change in the code following are the station numbers and data type
Pipers_Hole <- hy_stn_data_range() %>%
filter(DATA_TYPE == "Q", STATION_NUMBER == "02ZH001") %>%
hy_daily_flows()
Come_by_Chance <- hy_stn_data_range() %>%
filter(DATA_TYPE == "Q", STATION_NUMBER == "02ZH002") %>%
hy_daily_flows()
Ugjoktok <- hy_stn_data_range() %>%
filter(DATA_TYPE == "Q", STATION_NUMBER == "03NF001") %>%
hy_daily_flows()
#Station information in list form including name, lat, long, drainage area
hy_stations(station_number = unique(Pipers_Hole$STATION_NUMBER)) %>%
as.list()
hy_stations(station_number = unique(Come_by_Chance$STATION_NUMBER)) %>%
as.list()
hy_stations(station_number = unique(Ugjoktok$STATION_NUMBER)) %>%
as.list()
#Plotting the time series for the entire record with a snoother added, for other stations: replace 'Pipers_Hole' with new station
#Picking a time frame for this plot will be better
Pipers_Hole %>%
ggplot(aes(x=Date, y=Value)) +
geom_line() +
geom_point() +
geom_smooth() +
labs(title = "Piper's Hole River", subtitle = "Station Number = 02ZH001", y = "Discharge (m^3/s)") +
theme_minimal()
#Normalizing multiple stations by drainage area
stns <- c("02ZH001", "02ZH002", "03NF001")
runoff_data <- hy_daily_flows(station_number = stns, start_date = "2018-01-01", end_date = "2018-12-31") %>%
left_join(hy_stations(station_number = stns) %>%
select(STATION_NUMBER, STATION_NAME, DRAINAGE_AREA_GROSS), by = "STATION_NUMBER") %>%
mutate(runoff = Value / DRAINAGE_AREA_GROSS * 86400 / 1e6 *1e3)
ggplot(runoff_data) +
geom_line(aes(x=Date, y=runoff, colour = STATION_NAME)) +
labs(title = "Normalized Discharge", y="Mean daily runoff (mm/day)", subtitle = "Data Source: Canadian National Water Data Archive") + scale_fill_gradient2() +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom", legend.title = element_blank(), legend.direction = "vertical")
#Save as PNG 600 x 500
-------------------------------------------------GETTING REALTIME DATA--------------------------------------------------------
#Real-time data has not been validated and may have some missing values
realtime_dd(station_number = "02ZH001") #select specific realtime discharge station
realtime_plot(station_number = "02ZH001") #plots the most recent month
|
6f7549d31e48b441a72ccb42cdce4be5ec6f0d03 | 322acb29c4c89176b14eb81f0696bab787a5929f | /assignment4/test-assigntemt4.R | 9b0c5f312556f59a49cb9229618230ab0c8f5fe8 | [] | no_license | rshopa/computing-for-data-analysis | feecf75daf0ad90c87523af4218a146f21d52d1f | 09659dffa0f532ed026084a5749149878b1ab74c | refs/heads/master | 2021-01-10T04:31:01.001452 | 2016-02-10T12:26:32 | 2016-02-10T12:26:32 | 51,438,166 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 243 | r | test-assigntemt4.R | homicides <- readLines("../homicides.txt")
# How many of each cause of homicide?
source('count.R')
count("other")
num <- count("unknown")
print(num)
# Ages of homicide victims
source('agecount.R')
agecount(3)
num <- agecount(21)
print(num) |
5754ad4d9840da85501dde63d67eee82f274d271 | dfb18d1b4608c4404cc291a5232692abe04ca230 | /R/dendro.resample.R | 011d71fb893f101eda602e6ae8c500acccd23524 | [] | no_license | sugam72-os/dendRoAnalyst-1 | 0d2fb30336cc7fa287eb428472347a711bd6aaf9 | 9b009f909dddbbba0837f6e2aa18b97f0b2e17c8 | refs/heads/master | 2022-11-13T16:15:10.604682 | 2020-07-03T18:00:07 | 2020-07-03T18:00:07 | 285,296,186 | 1 | 0 | null | 2020-08-05T13:28:24 | 2020-08-05T13:28:23 | null | UTF-8 | R | false | false | 9,281 | r | dendro.resample.R | #' @title Resampling temporal resolution of dendrometer data
#'
#' @description This function is designed to change the temporal resolution of data. Depending on the objective, the user can define either maximum, minimum, or mean values to resample data in hourly, daily, weekly or monthly frequency.
#'
#' @param df dataframe with first column containing date and time in the format \code{yyyy-mm-dd HH:MM:SS}.
#'
#' @param by either \emph{H, D, W} or \emph{M} to resample data into hourly, daily, weekly or monthly resolution.
#'
#' @param value either \emph{max, min} or \emph{mean} for the resampling value.
#'
#' @return Dataframe with resampled data.
#'
#' @examples library(dendRoAnalyst)
#' data(nepa17)
#' # To resample monthly with maximum value
#' resample_M<-dendro.resample(df=nepa17[,1:2], by='M', value='max')
#' head(resample_M,10)
#'
#' @importFrom stats approx median na.exclude na.omit sd
#'
#'
#' @export
dendro.resample<-function(df, by, value){
data<-df
if(ncol(data)<3){
data[,3]=data[,2]
}
yr<-NULL
da<-NULL
wk<-NULL
mn<-NULL
temp1<-data.frame(timestamp=as.POSIXct(strptime(data[,1], format = '%Y-%m-%d %H:%M:%S'), tz='UTC'))
if(is.na(as.POSIXct(temp1$timestamp[1], format = '%Y-%m-%d %H:%M:%S'))){
stop('Date not in the right format')
}
temp1[,2:ncol(data)]<-data[,2:ncol(data)]
temp1$yr<-as.numeric(format(temp1$timestamp, '%Y'))
temp1$mn<-as.numeric(format(temp1$timestamp, '%m'))
temp1$wk<-as.numeric(format(temp1$timestamp, '%W'))
temp1$wk[temp1$wk==0]<-as.numeric(1)
temp1$da<-as.numeric(format(temp1$timestamp, '%j'))
temp1$hr<-as.numeric(format(temp1$timestamp, '%H'))
temp1$ymd<-as.character(format(temp1$timestamp, '%Y-%m-%d'))
yr1<-unique(temp1$yr)
if(by%in%c('D','W','M', 'H')==FALSE){
stop("You must provide arguement 'by' with either 'H' for Hourly, 'D' for Daily,'W' for weekly or 'M' for monthly")
}
if(value%in%c('max','min','mean')==FALSE){
stop("You must provide arguement 'value' with either 'max' for maximum,'min' for minimum or 'mean' for mean")
}
DOY1<-c()
time1<-c()
resamp1<-c()
for(year in yr1){
year1<-subset(temp1, yr==year)
DOY<-c()
time<-c()
a<-c()
#By daily basis
if(by=='D'){
x<-matrix(ncol=ncol(data)-1)
if(value=='max'){
for(i in min(year1$da):max(year1$da)){
day<-subset(year1, da==i)
time<-c(time,format(day$timestamp[1], '%Y-%m-%d'))
#DOY<-c(DOY,i)
a1<-apply(day[,2:ncol(data)], 2, max, na.rm=T)
a<-rbind.data.frame(a,a1)
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
if(value=='min'){
for(i in min(year1$da):max(year1$da)){
day<-subset(year1, da==i)
time<-c(time,format(day$timestamp[1], '%Y-%m-%d'))
#DOY<-c(DOY,i)
a1<-apply(day[,2:ncol(data)], 2, min, na.rm=T)
a<-rbind.data.frame(a,a1)
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
if(value=='mean'){
for(i in min(year1$da):max(year1$da)){
day<-subset(year1, da==i)
time<-c(time,format(day$timestamp[1], '%Y-%m-%d'))
#DOY<-c(DOY,i)
a1<-apply(day[,2:ncol(data)], 2, mean, na.rm=T)
a<-rbind.data.frame(a,a1)
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
}
#############################################################
####################################################################
####################################################################
# By weekly basis
if(by=='W'){
w<-c()
x<-matrix(ncol=ncol(data)-1)
if(value=='max'){
for(i in min(year1$wk):max(year1$wk)){
week<-subset(year1, wk==i)
time<-c(time,format(week$timestamp[1], '%Y-%m-%d'))
#w<-c(w,i)
a1<-apply(week[,2:ncol(data)], 2, max, na.rm=T)
a<-rbind.data.frame(a,a1)
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
if(value=='min'){
for(i in min(year1$wk):max(year1$wk)){
week<-subset(year1, wk==i)
time<-c(time,format(week$timestamp[1], '%Y-%m-%d'))
#w<-c(w,i)
a1<-apply(week[,2:ncol(data)], 2, min, na.rm=T)
a<-rbind.data.frame(a,a1)
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
if(value=='mean'){
for(i in min(year1$wk):max(year1$wk)){
week<-subset(year1, wk==i)
time<-c(time,format(week$timestamp[1], '%Y-%m-%d'))
#w<-c(w,i)
a1<-apply(week[,2:ncol(data)], 2, mean, na.rm=T)
a<-rbind.data.frame(a,a1)
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
}
############################################################################
###############################################################################
###############################################################################
if(by=='M'){
m<-c()
x<-matrix(ncol=ncol(data)-1)
if(value=='max'){
for(i in min(year1$mn):max(year1$mn)){
month<-subset(year1, mn==i)
time<-c(time,format(month$timestamp[1], '%Y-%m-%d'))
#m<-c(m,i)
a1<-apply(month[,2:ncol(data)], 2, max, na.rm=T)
a<-rbind.data.frame(a,a1)
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
if(value=='min'){
for(i in min(year1$mn):max(year1$mn)){
month<-subset(year1, mn==i)
time<-c(time,format(month$timestamp[1], '%Y-%m-%d'))
#m<-c(m,i)
a1<-apply(month[,2:ncol(data)], 2, min, na.rm=T)
a<-rbind.data.frame(a,a1)
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
if(value=='mean'){
for(i in min(year1$mn):max(year1$mn)){
month<-subset(year1, mn==i)
time<-c(time,format(month$timestamp[1], '%Y-%m-%d'))
#m<-c(m,i)
a1<-apply(month[,2:ncol(data)], 2, mean, na.rm=T)
a<-rbind.data.frame(a,a1)
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
}
#########################################################
############# Hourly resample ###########################
if(by=='H'){
if(value=='max'){
for(i in min(year1$da):max(year1$da)){
day<-subset(year1, da==i)
for(h in unique(day$hr)){
hours<-subset(day, day$hr==h)
hms<-paste( sprintf("%02d", as.numeric(h)),':00:00', sep = '')
ymdhms<-paste(as.character(hours$ymd[1]),hms, sep =' ')
time<-c(time,ymdhms)
a1<-apply(hours[,2:ncol(data)], 2, max, na.rm=T)
a<-rbind.data.frame(a,a1)
}
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
if(value=='min'){
for(i in min(year1$da):max(year1$da)){
day<-subset(year1, da==i)
for(h in unique(day$hr)){
hours<-subset(day, day$hr==h)
hms<-paste(h,':00:00', sep = '')
ymdhms<-paste(hours$ymd[1],hms, sep =' ')
time<-c(time,ymdhms)
a1<-apply(hours[,2:ncol(data)], 2, min, na.rm=T)
a<-rbind.data.frame(a,a1)
}
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
if(value=='mean'){
for(i in min(year1$da):max(year1$da)){
day<-subset(year1, da==i)
for(h in unique(day$hr)){
hours<-subset(day, day$hr==h)
hms<-paste(h,':00:00', sep = '')
ymdhms<-paste(hours$ymd[1],hms, sep =' ')
time<-c(time,ymdhms)
a1<-apply(hours[,2:ncol(data)], 2, mean, na.rm=T)
a<-rbind.data.frame(a,a1)
}
}
resamp<-data.frame(time,a)
names(resamp)<-c('Time',colnames(data[2:ncol(data)]))
#return(resamp)
}
}
#########################################################
resamp1<-rbind.data.frame(resamp1,resamp)
}
rownames(resamp1)<-1:nrow(resamp1)
for(i in 2:ncol(resamp1)){
loc.inf<-which(resamp1[,i]=='-Inf')
resamp1[loc.inf,i]<-NA
}
if(ncol(df)<3){
resamp1=resamp1[,1:2]
names(resamp1)<-colnames(df)
}
resamp1[,1]=as.character(resamp1[,1])
return(resamp1)
}
|
ad6627f134238f86c43da1a6d08c7e89ed18cb19 | 4bd57b8501d4326ecc06c1d1ea499935e1668d95 | /MASH-dev/SeanWu/MBITES-Scripts/EXPERIMENT-Simulations.R | 2f3a2176601f41977103df8367992bf44e0cfefa | [] | no_license | aucarter/MASH-Main | 0a97eac24df1f7e6c4e01ceb4778088b2f00c194 | d4ea6e89a9f00aa6327bed4762cba66298bb6027 | refs/heads/master | 2020-12-07T09:05:52.814249 | 2019-12-12T19:53:24 | 2019-12-12T19:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,088 | r | EXPERIMENT-Simulations.R | ###############################################################################
# __ ___ ____ _____________________
# / |/ / / __ )/ _/_ __/ ____/ ___/
# / /|_/ /_____/ __ |/ / / / / __/ \__ \
# / / / /_____/ /_/ // / / / / /___ ___/ /
# /_/ /_/ /_____/___/ /_/ /_____//____/
#
# Simulations of Peri-domestic breeding resource-scapes
# MBITES Team
# May 2018
#
###############################################################################
rm(list=ls());gc()
###############################################################################
# load landscape sets
###############################################################################
dir_dev <- "/Users/slwu89/Desktop/git/MASH-Main/MASH-dev/"
landscapes <- readRDS(file = paste0(dir_dev,"DavidSmith/MBITES-Demo/periDomesticLandscapes.rds"))
humans <- readRDS(file = paste0(dir_dev,"DavidSmith/MBITES-Demo/periDomesticHumans.rds"))
mosquitoes <- readRDS(file = paste0(dir_dev,"DavidSmith/MBITES-Demo/periDomesticMosquitoes.rds"))
peridomestic_data <- vector(mode="list",length=length(landscapes))
directory <- "/Users/slwu89/Desktop/mbites/peridom/"
for(i in 1:length(peridomestic_data)){
peridomestic_data[[i]]$id <- i
peridomestic_data[[i]]$directory <- directory
peridomestic_data[[i]]$landscape <- landscapes[[i]]$sites
peridomestic_data[[i]]$humans <- as.data.frame(humans[[i]])
peridomestic_data[[i]]$mosquitoes <- as.data.frame(mosquitoes[[i]])
dir.create(path = paste0(directory,"landscape",i))
}
###############################################################################
# Run MBITES
###############################################################################
library(parallel)
cl <- parallel::makePSOCKcluster(names = 8)
# initialize MBITES parameters on cores
parallel::clusterEvalQ(cl = cl,expr = {
library(MBITES)
# initialize methods
MBITES_Setup_MBDETES()
PATHOGEN_Setup(pathogen_model = "null")
# we want detailed output of blood hosts from the mosquito
trackBloodHost()
trackOviposition()
# set parameters
MBITES:::Parameters$set_parameters(Bs_surv = 0.95,Os_surv = 0.95,B_surv = 0.99,O_surv = 0.99,
Bs_succeed = 0.99,Os_succeed = 0.99,B_succeed = 0.95,O_succeed = 0.99,
S_u = 0,disperse = 0.2)
})
# set RNG streams
parallel::clusterSetRNGStream(cl = cl,iseed = 123)
# for running locally with only 4 cores
idx <- list((1:4),(5:8),(9:12),(13:16),(17:20),(21:24),(25:26))
idx <- list((1:8),(9:17),(18:25),26) # 8 cores
for(ix in idx){
# run simulation
parallel::clusterMap(cl = cl,fun = function(x){
# initialize a tile
Tile_Initialize(x$landscape)
Human_NULL_Initialize(x$humans)
MBITES_Initialize(x$mosquitoes)
# run simulation
set_output(directory = x$directory,runID = x$id)
simulation(tMax = 365*100,pretty = TRUE)
hardreset()
},x=peridomestic_data[ix],RECYCLE = FALSE,SIMPLIFY = FALSE,USE.NAMES = FALSE,.scheduling = "dynamic")
}
parallel::stopCluster(cl)
rm(cl);gc()
|
41c3c188a5f966abf1dcf20101b1a35a3c993b41 | d3f20ed2a13eb9ca153094f15c2e351e2642cb19 | /datahandling/scaling_centering.R | 90e7f975ed680bf9c62e130ce020a1c0acfb166d | [] | no_license | apoorvakarn/R-in-Action | b7d0003d9d0be0755b7710903fb56984a59dda9b | 923dfe1f12cecfdd1511d2d55e4a4796499f9c82 | refs/heads/master | 2021-09-11T19:49:45.127777 | 2018-04-11T18:01:48 | 2018-04-11T18:01:48 | 100,679,882 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 709 | r | scaling_centering.R | #first centering should be done----
#centering-- we find mid value and put it into mid place
#scaling-- we divide by standard deviation
#for z-score-- we provide true for both the center value and scale value
?scale
x= c(1,3,5,7,9,2,4,6,8,10)
(x1=scale(x,scale = F, center= F))
as.vector(x1)
(xc=scale(x, scale = F , center = T))# centering the mid value
(xc=scale(x, scale = F, center = 5.5))
(xc1=scale(x, scale = F, center = 4))
cbind(x,-4,xc1)
(xs=scale(x,scale = T, center = F))
(round(xs))
(rmse=sqrt(sum(x^2)/(length(x)-1)))#root mean square value
round((xs2=(x/rmse)),2)
(xsc=scale(x,scale = T,center = T))#mean and standard deviation
round((zscore=(x-mean(x))/sd(x)),2)# z-score value
(x2=x-mean(x))
|
4c04d71bc2b05380b9c986bdb00d1fb895a32a20 | 2b434a561e65b7c44cd0d10d3619119f805eefc7 | /Code/Building_Set_14/2a_p_Percentile_Sensitivity_Analysis.R | 9132dd278ae212135fef4faf5af3e73e6f65a853 | [] | no_license | etzkorn/postuR_analysis | 9f0059b6bfcad2dd4629c259af6811d5f7ed161e | d0f8d07c8396ac06a88706392ed6dfc6720e5a2c | refs/heads/main | 2023-06-19T23:34:21.700493 | 2021-07-16T15:18:41 | 2021-07-16T15:18:41 | 386,680,358 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,827 | r | 2a_p_Percentile_Sensitivity_Analysis.R | # Perform sensitivity analysis for the quantile used to estimate the top of the sphere.
# Vary |H_i| from 0.9 T_i ... 0.99 T_i
# check angular change in upright orientation
# check change in classifications
###########################################################
# Check Angular Change in Upright Orientation
rm(list = ls())
raw.file <- dir("Data/Data_SMASH_ZIO/Full_Data_2019-04-26_15:15:35/", full.names = T)
raw.file <- raw.file[grepl("Full_Data", raw.file) & grepl(".rdata", raw.file)]
## Packages
library(lubridate)
library(dplyr)
library(ggplot2)
library(reshape2)
library(gridExtra)
library(movMF)
chord2theta <- function(chord) 2*asin(chord/2)/pi*180
top.data <- tibble()
max.theta <- c()
for(i in raw.file){
## Grab one raw data file
load(i)
data <- data %>%
mutate(r = sqrt(x^2 + y^2 + z^2),
x = x/r,
y = y/r,
z = z/r,
r2 = (1-r)^2)
# calculate quantiles
q <- quantile(data$r2, probs = seq(0.9, 0.99, by = 0.01))
# get centers for each quantile
cutoff.means <-
sapply(q, FUN = function(q0){
data %>%
filter(r2>q0) %>%
select(x,y,z) %>%
sapply(mean)
}) %>%
apply(2, function(m) m / sqrt(sum(m^2))) %>% t()
mean.resultant.length <-
sapply(q, FUN = function(q0){
data %>%
filter(r2>q0) %>%
select(x,y,z) %>%
sapply(mean)
}) %>%
apply(2, function(m) sqrt(sum(m^2))) %>% unclass
# calculate angular differences
theta.diffs <- cutoff.means %>%
dist %>%
as.matrix %>%
chord2theta
theta90 <- theta.diffs[,1]
theta95 <- theta.diffs[,6]
theta99 <- theta.diffs[,10]
theta.diffs <- c(NA,diag(theta.diffs[1:9,2:10]))
max.theta <- cutoff.means %>%
dist %>%
max %>%
chord2theta %>%
c(max.theta)
# diffs
# combine data
cutoff.means <- data.frame(p = rownames(cutoff.means),
q = q,
mean.resultant.length = mean.resultant.length,
theta.diffs = theta.diffs,
theta90 = theta90,
theta95 = theta95,
theta99 = theta99) %>%
cbind(cutoff.means) %>%
mutate(id = data$id[1])
top.data <- bind_rows(top.data, cutoff.means)
}
###########################################################
# Plot angular differences from 95th percentile mean
png(filename = "Figures/Resubmission_Plots/Sensitivity_Percentile.png",
width = 800, height = 600)
top.data %>%
#filter(!id %in% c("150831_N509676032","150903_N509636036",
# "151031_N520942075","151102_N520912069")) %>%
mutate(p = as.numeric(gsub("%","", p))) %>%
ggplot() +
geom_line(aes(x = p,
y = (theta95), group = id)) +
geom_point(aes(x = p,
y = (theta95), group = id)) +
scale_x_continuous("Percentile",breaks = 90:99,minor_breaks = NULL, expand = c(0.01,0.01)) +
scale_y_continuous("Angular Change (degrees)", expand = c(0.01,0.01))+
theme_bw(20)
dev.off()
# Plot angular differences from 99th percentile mean
png(filename = "Figures/Resubmission_Plots/Sensitivity_Percentile2.png",
width = 800, height = 600)
top.data %>%
#filter(!id %in% c("150831_N509676032","150903_N509636036",
# "151031_N520942075","151102_N520912069")) %>%
mutate(p = as.numeric(gsub("%","", p))) %>%
ggplot() +
geom_path(aes(x = p,
y = (theta99), group = id)) +
geom_point(aes(x = p,
y = (theta99), group = id)) +
scale_x_continuous("Percentile",breaks = 90:99,minor_breaks = NULL, expand = c(0.01,0.01)) +
scale_y_continuous("Angular Change (degrees)", expand = c(0.01,0.01))+
theme_bw(20)
dev.off()
# Plot angular differences from 90th percentile mean
png(filename = "Figures/Resubmission_Plots/Sensitivity_Percentile3.png",
width = 800, height = 600)
top.data %>%
#filter(!id %in% c("150831_N509676032","150903_N509636036",
# "151031_N520942075","151102_N520912069")) %>%
mutate(p = as.numeric(gsub("%","", p))) %>%
ggplot() +
geom_path(aes(x = p,
y = (theta90), group = id)) +
geom_point(aes(x = p,
y = (theta90), group = id)) +
scale_x_continuous("Percentile",breaks = 90:99,minor_breaks = NULL, expand = c(0.01,0.01)) +
scale_y_continuous("Angular Change (degrees)", expand = c(0.01,0.01))+
theme_bw(30)
dev.off()
summary(top.data$theta90[top.data$p == "99%"])
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.8327 2.6175 4.2269 4.8678 4.6688 20.2406
sort(top.data$theta90[top.data$p == "99%"])
#[1] 0.8327011 0.9852590 1.4776786 2.3321806 2.7126440 2.7790493 2.8282541 3.9638634 4.4899962 4.5314901
#[11] 4.5509194 4.5979630 4.8813924 5.7576668 10.9233013 20.2406366
###########################################################
# Check Changes in mean resultant length
png(filename = "Figures/Resubmission_Plots/Sensitivity_Percentile_ResultantLength.png",
width = 800, height = 600)
top.data %>%
mutate(p = as.numeric(gsub("%","", p)),
change = id %in% c("150831_N509676032",
"150911_N509606040",
"151102_N520912069")) %>%
ggplot() +
geom_path(aes(x = p,
y = (mean.resultant.length), group = id, color = change)) +
geom_point(aes(x = p,
y = (mean.resultant.length), group = id, color = change)) +
scale_x_continuous("Percentile",breaks = 90:99,minor_breaks = NULL, expand = c(0.01,0.01)) +
scale_y_continuous("Mean Resultant Length", expand = c(0.01,0.01))+
theme_bw(30) +
theme(legend.position = "none")
dev.off()
###########################################################
# Check Changes in classification
min.data <- tibble()
data.dir <- dir("Data/Data_SMASH_ZIO/OneMinute_Data_2021-06-21/", full.names = T)
for(i in data.dir){
min.data <- bind_rows(read.csv(i), min.data)
}
min.data <-
min.data %>%
# remove individuals with change points
filter(!id %in% c("150831_N509676032","150911_N509606040","151102_N520912069"))
top.data <- top.data %>%
rename(tx = x, ty = y, tz = z)#%>%
filter(!id %in% c("150831_N509676032","150911_N509606040","151102_N520912069"))
concordance <-
left_join(min.data, top.data , by = "id") %>%
mutate(theta1 = acos(x*tx + y*ty + z*tz),
down1 = theta1 >= pi/4) %>%
group_by(id, p, cluster.meanshift.14) %>%
mutate(p.down1 = mean(down1),
down1 = as.numeric((down1)|(p.down1 > 0.5))) %>%
ungroup %>%
group_by(id, time) %>%
mutate(same95 = down1 == down1[p == "95%"],
different95 = down1 != down1[p == "95%"],
bothdown95 = down1 & down1[p == "95%"],
bothup95 = (!down1) & (!down1[p == "95%"]),
down95 = down1[p == "95%"],
up95 = !down1[p == "95%"],
same90 = down1 == down1[p == "90%"],
different90 = down1 != down1[p == "90%"],
bothdown90 = down1 & down1[p == "90%"],
bothup90 = (!down1) & (!down1[p == "90%"]),
down90 = down1[p == "90%"],
up90 = !down1[p == "90%"]) %>%
ungroup %>%
group_by(p,id) %>%
summarise(concordance90 = mean(same90, na.rm = T),
discordance90 = mean(different90, na.rm = T),
bothup90 = mean(bothup90, na.rm = T)/mean(up90, na.rm = T),
bothdown90 = mean(bothdown90, na.rm = T)/mean(down90, na.rm = T))
png(filename = "Figures/2_Appendix_Images/Sensitivity_Percentile4.png",
width = 800, height = 600)
concordance %>%
mutate(p = as.numeric(gsub("%","", p))) %>%
ggplot() +
geom_path(aes(x = p,
y = concordance90*100, group = id)) +
geom_point(aes(x = p,
y = concordance90*100)) +
scale_x_continuous("Percentile",breaks = 90:99,minor_breaks = NULL, expand = c(0.01,0.01)) +
scale_y_continuous("Concordance (%)", expand = c(0.01,0.01))+
theme_bw(30)
dev.off()
summary(1- concordance$concordance90[concordance$p == "95%"])
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.0000000 0.0006759 0.0015556 0.0023833 0.0034217 0.0072969
summary(1- concordance$concordance90[concordance$p == "99%"])
# 0.000000 0.003733 0.007043 0.007920 0.011307 0.022427
|
f83558df14f87e6253917f3bf9957bd2f71b42e5 | bfdbfa3a437438c3f211e60b22a2cfb39b3c9227 | /code/sim/generate_tables.R | 2f45b14e65e012750f3095029d46516ee5911904 | [] | no_license | xiyuansun/kellycc | 6720495017769a7407838df6371c2348004f2ae0 | d4533a314eff6f372840bcc810834cabcf9de875 | refs/heads/master | 2020-05-04T18:47:07.995097 | 2019-06-24T05:04:38 | 2019-06-24T05:04:38 | 179,366,595 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 242 | r | generate_tables.R | #generate simulation scenario tables
library(xtable)
sc <- c(1:18)
nGenes <- rep(c(10000,1000),each=9)
nSamples <- rep(rep(c(8,4,16),each=3),2)
pDiff <- rep(c(0.1,0.3,0.01),6)
sc_table <- cbind(sc, nGenes, nSamples, pDiff)
xtable(sc_table)
|
ac5a410e407b4417400ddbb286100e89e1eb0f0d | 9fecce6f3ef41202cdcc855f4b0baff36131eacc | /Analysis/old_analysis/VMS/results/2014-07-07/speed_by_trip.R | a6f939c8f4d38d729070b5b510a76a7d0c150944 | [] | no_license | emfuller/cnh | 0487e9647837d8fc999850b5951ff6331f9a5159 | 8b36faf8c73607d92e59e392fff3c0094b389d26 | refs/heads/master | 2021-05-01T08:02:52.200343 | 2019-04-06T18:25:48 | 2019-04-06T18:25:48 | 28,717,834 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,312 | r | speed_by_trip.R | # finding gf trawl trips, getting speed distributions from trips
# 1. Finding IDs of vessels that land gf trips
# 2. Find VMS for those vessels
# 3. Make move stack of all vessels
# 3.5 remove outlier speeds, points which have Avg_speed > 30
# 4. Find landing dates for each vessel
# 5. Parse VMS into trip segments, additional ID for catch profile type
# 6. Find distributions of speed (both instantaneous and inferred) for each
# catch profile type
# load data
require(plyr); require(mapdata); require(move);require(dplyr); require(move);
VMS <- read.csv(
"/Volumes/NOAA_Data/CNH/VMS_cleaning/results/2014-03-02/VMS_woDups.csv",
stringsAsFactors=F)
# load ticket data
load("/Volumes/NOAA_Data/CNH/Analysis/Metiers/data/all_tickets/cluster_sol_objectives_asw.Rdata")
load("/Volumes/NOAA_Data/CNH/Analysis/Metiers/data/all_tickets/propTable_tickets.Rdata")
load("/Volumes/NOAA_Data/CNH/Analysis/Metiers/data/all_tickets/tickets.Rdata")
# merge ftid with cluster
ref_ftid <- subset(prop_table, select=ftid)
ref_ftid$cluster <- cluster_ind[,"cluster"]
# merge cluster with fish ticket
tickets <- merge(tickets, ref_ftid, by="ftid")
#########################################################
# 1. Finding IDs of vessels that land salmon/tuna trips #
# tuna, salmon, cluster == 2 | cluster == 7
# or Shrimp, when subsetting to cluster==6
# exploratory, what gears are catching cluster 7/2?
total_trips <- subset(tickets, cluster==6, select=c(grid, ftid, grgroup))
total_trip <- total_trips[!duplicated(total_trips),]
barplot(round(sort(table(total_trip$grgroup))/sum(table(total_trip$grgroup))*100,3),
las=2, bor=F,
main="proportion of shrimp landed by gear from 2009-2013")
# go with troll (TRL), but should also look at midwater trawlers for tuna/salmon
# go with shrimp trawl (TWS), but should check out what this MSC business is. Probably all the non-pink shrimp stuff
any_trip <- unique(subset(tickets, cluster==6 & grgroup=="TWS",select=veid)$veid)
#################################
# 2. Find VMS for these vessels #
# subset veids for all vessels which land gf trips
any_VMS <- subset(VMS, Doc_Number %in% any_trip)
length(unique(any_VMS$Doc_Number))# how many vessels in VMS
length(any_trip) # how many vessels landed gf with trawl, missing most
#################################
# 3. Make move_stack out of these vessels #
# order by vessel ID and time
any_VMS$Date_Time <- as.POSIXct(any_VMS$Date_Time,
format="%Y-%m-%d %H:%M",
tz="US/Pacific")
any_VMS <- any_VMS[order(any_VMS$Doc_Number, any_VMS$Date_Time),]
# remove NA time stamps
any_VMS <- any_VMS[!is.na(any_VMS$Date_Time),]
# remove any Avg_Speed > 30
any_VMS <- subset(any_VMS, Avg_Speed < 30)
#########################################
# 4. Find landing dates for each vessel #
vms_tickets <- subset(tickets, veid %in% unique(any_VMS$Doc_Number))
vms_tickets$tdate <- as.POSIXct(vms_tickets$tdate, format="%d-%b-%y", tz="US/Pacific")
unique_clusters <- function(data){
all <- unique(data[,c("tdate","cluster","ftid")])
all <- all[order(all$tdate),]
return(all)
}
landings <- ddply(vms_tickets, .(veid), unique_clusters) # list of landing dates for each vessel
#########################################################################
# 5. Parse VMS into trip segments, additional ID for catch profile type #
# try using ddply to appy findInterval to landings trips. want to subset both landings and VMS by veid, find interval on tdate, and apply cluster and ftid.
combine_VMS <- function(df){
# function to feed into ddply to match trips to cluster, ftid and generate a tripID
# also transform into move object to calculate speed to find any outliers and flag those
veid.x=unique(df$Doc_Number)
sub_landings <- subset(landings,veid==veid.x)
df$tripID <- rep(NA,nrow(df))
df$tripID[is.na(df$status)] <- findInterval(df$Date_Time[is.na(df$status)], sub_landings$tdate) + 1 # start indexing at 1
sub_landings$tripID <- 1:nrow(sub_landings)
sub_merge <- merge(df,sub_landings[,c("cluster","tripID","ftid")],by="tripID",all.x=TRUE,all.y=FALSE, sort=F)
sub_merge <- sub_merge[order(sub_merge$Date_Time),]
sub_move <- move(x=sub_merge$Longitude, y=sub_merge$Latitude, time=sub_merge$Date_Time, data=sub_merge, proj=CRS("+proj=longlat +ellps=WGS84"), animal=sub_merge$Doc_Number)
too_fast <- which(speed(sub_move) > 16)
sub_merge$too_fast <- rep(0,nrow(sub_merge))
sub_merge$too_fast[too_fast] <- 1
#sub_merge <- sub_merge[-which(speed(sub_move)>16),]
return(sub_merge)
}
clean_VMS <- ddply(any_VMS, .(Doc_Number), combine_VMS, .progress='text')
# need to drop the too_fast data points (can't drop them in the ddply function, breaks for some reason)
clean_VMS <- subset(clean_VMS, too_fast!=1)
#############################################################################
# 6. Find distributions of speed (both instantaneous and inferred) for each #
# try for first vessel, want to subset by non_zero trip IDs and only when the type is 8
# should just subset, for only when trip ID is 8 then. then can burst based on tripID if I need to
# should make a function that takes the gf_filter, add the trip type, and save as a move object again. that's bursted for trip ID. Then can subset on trip type. Ideally link back with ftid
pdf(file="/Volumes/NOAA_Data/CNH/Analysis/VMS/2014-07-07/maps_of_trips_shrimp.pdf",width=8,height=8)
for(i in 1:length(unique(clean_VMS$Doc_Number))){
v1 <- subset(clean_VMS, Doc_Number == unique(clean_VMS$Doc_Number)[i])
possible_trips <- unique(v1$cluster)
plot(v1$Longitude,v1$Latitude, col="white",asp=1)
v1_hmsp <- subset(v1, cluster==2)
lines(v1_hmsp$Longitude, v1_hmsp$Latitude, pch=20, cex=.25,type='o',lwd=0.1, col="cyan",asp=1)
v1_trips <- subset(v1, cluster==7)
lines(v1_trips$Longitude,v1_trips$Latitude,pch=20,lwd=0.1,type='o',cex=.25,asp=1)
map('state',add=T)
v1_crab <- subset(v1, cluster==1)
lines(v1_crab$Longitude, v1_crab$Latitude, pch=20, cex=.25, type='o',lwd=.1,col="indianred")
v1_gf <- subset(v1, cluster==8)
lines(v1_gf$Longitude, v1_gf$Latitude, pch=20, cex=.25, type='o',lwd=.1,col="chartreuse4")
v1_shrimp <- subset(v1,cluster==6)
lines(v1_shrimp$Longitude, v1_shrimp$Latitude, pch=20, cex=.25, type='o',lwd=.1,col="deeppink")
v1_shell <- subset(v1, cluster==4)
lines(v1_shell$Longitude, v1_shell$Latitude, pch=20, cex=.25, type='o',lwd=.1, col="grey")
v1_other <- subset(v1, cluster==3)
lines(v1_other$Longitude, v1_other$Latitude, pch=20, cex=0.25, type='o',lwd=.1, col="darkorchid4")
v1_cpel <- subset(v1, cluster==5)
lines(v1_cpel$Longitude, v1_cpel$Latitude, pch=20, cex=0.25, type='o',lwd=.1, col="dodgerblue")
cat(i," ")
}
dev.off()
# estimate mixture model for shrimp speed distribution
# Saving Data
cp6 <- clean_VMS
save(cp6,file="/Volumes/NOAA_Data/CNH/Analysis/VMS/2014-07-07/cp6_tripIDVMS.Rdata")
load("/Volumes/NOAA_Data/CNH/Analysis/VMS/2014-07-07/cp6_tripIDVMS.Rdata")
# speed distribution
shrimp <- subset(cp6, cluster==6)
hist(shrimp$Avg_Speed, breaks=50,freq=F)
plot(density(shrimp$Avg_Speed))
require(mixtools)
# assumes variance same for both
shrimp1 <- normalmixEM(shrimp$Avg_Speed, lambda=c(.7,.3), mu=c(2, 12), sigma=c(2,2))
plot(shrimp1, which=2, main2="Shrimp Trawling Speeds",breaks=30)
lines(density(shrimp$Avg_Speed))
# load salmon/tuna - careful, writes over clean_VMS
load("/Volumes/NOAA_Data/CNH/Analysis/VMS/2014-07-07/cp2_7_tripIDVMS.Rdata")
samn <- subset(clean_VMS, cluster == 2)
tuna <- subset(clean_VMS, cluster == 7)
samn1 <- normalmixEM(samn$Avg_Speed, lambda=.5, mu=c(0,7), sigma=c(2,2))
tuna1 <- normalmixEM(tuna$Avg_Speed, lambda=.5, mu=c(2,7), sigma=c(2,2))
samn2 <- spEMsymloc(samn$Avg_Speed, mu0 = c(0,7))
plot(samn1, which=2, main2="Salmon Trolling Speeds", breaks=50,lwd2=3)
summary(samn1)
plot(tuna1, density=TRUE)
summary(tuna1)
# problem, estimated distributions not the same as actual. cant' figure out why
plot(density(tuna$Avg_Speed),col="indianred",bty="n",lwd=3,main="Distribution of Speeds",xlab="knots")
lines(density(shrimp$Avg_Speed),col="slate blue",lwd=3)
lines(density(samn$Avg_Speed),col="goldenrod1",lwd=3)
legend("topright",col=c("indianred","slate blue","goldenrod1"), legend=c("tuna troll", "shrimp trawl", "salmon troll"),lwd=3,bty="n")
|
e77fc5f7cac9e2c763cf38f93aaab3f311e7bcd1 | b25d4909885027ec2552c8f29c100c9ea308178a | /plot4.R | d0cb1000b5fe57ff166ed9ca7b5bef5cf3385ae5 | [] | no_license | opa/ExData_Plotting1 | 8505470eac5b1b8518bd36cf93d302ac4fb19e36 | b8db157b48f0c168dd75e28c502fa273fd84ac60 | refs/heads/master | 2021-01-14T13:43:03.224621 | 2015-01-10T18:41:43 | 2015-01-10T18:41:43 | 27,665,355 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,773 | r | plot4.R | ### Bruce Pezzlo 7 Dec 2015
### Coursera MOOC John Hopkins University: Exploritory Data
# plot 4 - histogram
# see read_datafile.R for notes ... this source imports data for all plots
source('read_datafile.R')
#load data
power_data <- read_data()
# Open png device
if (!DEBUG) png( filename= 'plot4.png',
width= pic_size,
height= pic_size,
units= "px",
pointsize= 12,
bg= "white",
res= NA
)
## create 2 row 2 column table of 4 charts
par( mfrow= c(2,2) )
# Top left - line chart Power x Time
plot( power_data$datetime,
power_data$Global_active_power,
ylab= 'Global Active Power',
xlab= '',
type= 'l'
)
# Top right - line chart Voltage x Time
plot( power_data$datetime,
power_data$Voltage,
xlab= 'datetime',
ylab= 'Voltage',
type= 'l'
)
# Bottom left
plot( power_data$datetime,
power_data$Sub_metering_1,
type='l',
xlab='',
ylab='Energy sub metering'
)
lines( power_data$datetime,
power_data$Sub_metering_2,
col='red'
)
lines( power_data$datetime,
power_data$Sub_metering_3,
col='blue'
)
legend( 'topright',
legend= c('Sub_metering_1',
'Sub_metering_2',
'Sub_metering_3'
),
col= c( 'black',
'red',
'blue'
),
lty= 'solid',
bty= 'n'
)
# Bottom right
plot( power_data$datetime,
power_data$Global_reactive_power,
xlab= 'datetime',
ylab= 'Global_reactive_power',
type= 'l'
)
# Turn off device
if (!DEBUG) dev.off()
|
ff25fae9a5588afe6f4e2f9a16874c959778e374 | 18d00b87c79284894bc704dc3ab9c327c1e4d96b | /man/randomMatches.Rd | 1f961496c0e572e0edf780b1b3599e6b50d2a21b | [
"MIT"
] | permissive | homerhanumat/simaltruist | 9861933986dbca036a6f19b702e15277db14b5ac | 8f4e25fb2315a1702344175a6b248d4143509282 | refs/heads/master | 2021-04-06T09:59:05.790828 | 2018-05-01T14:39:09 | 2018-05-01T14:39:09 | 125,393,408 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 999 | rd | randomMatches.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reproduce.R
\name{randomMatches}
\alias{randomMatches}
\title{Random Mating}
\usage{
randomMatches(individuals, number_of_couples)
}
\arguments{
\item{individuals}{Data frame containing id, sex, warner-status,
mother id and father id.}
\item{number_of_couples}{Number of couples to form in the current
generation.}
}
\value{
A named list with two elements:
\describe{
\item{females}{character vector of id numbers of females}
\item{males}{character vector of id numbers of females}
}
The two vectors are of the same length. Corresponding elements
represent a couple.
}
\description{
Default function to govern mating. Each fertile female mates
with a randomly-selected male. If the user chooses to modify
this a as custom function for the \code{mating_behavior}, in
\code{simulate} it shold be noted that the current values of
of \code{individuals} and \code{number_of_couples} will be
provided by the program.
}
|
26a34c2ded3ba00fe72e80248ac86e8924fb92d8 | b32616c9f618f841d4d937448be95bf00cfd8b92 | /R/fair-coin.R | d2facf1d15aa641b6a5656a2b08d39c1eb2e7db9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | yukiyanai/rm2-Bayes | 60f92ad57451854e645943fd6ab45c74125a9c24 | 9fc06f80da08c5938c0b42cc5abc932addecfa12 | refs/heads/master | 2020-05-20T05:32:47.189612 | 2015-07-22T09:53:20 | 2015-07-22T09:53:20 | 33,363,777 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 4,217 | r | fair-coin.R | ## fair-coin.R
##
## Illustrate how Bayesian inference works using
## an example of coin flips.
##
## 03/31/2015 Yuki Yanai
## Let theta denote the probability that we observe Head
## by flipping the coin
## likelihood of theta, up to a constant
bin_lik <- function(theta, n, y) {
if (y > n) stop("Inlvalid value of y: y can't be greater than n")
return(theta^y * (1 - theta)^(n - y))
}
## prior PMF of discrete theta
## theta must be rounded to second decimal
prior_pmf <- function(theta = c(.1, .5, .9),
prior = rep(1, length(theta))) {
## Arguments:
## theta: vector of possible parameter values
## prior: prior weights for the possible values
## Return:
## Prior PMF of theta in (0, 1)
if (length(theta) == 0) stop("Enter at least one possible value")
if (sum(prior) == 0) stop("Invalid prior values")
x <- 1:99
prob <- rep(NA, length(x))
j = 1
for (i in seq_along(x)) {
if (x[i] %in% round(100 * theta)) {
prob[i] <- prior[j]
j = j + 1
}
else prob[i] <- 0
}
return(prob / sum(prob))
}
## function to make a graph with prior, likelihood, and posterior
## where the prior is given by a PMF
plt_coin_disc <- function(n, y, theta = c(.1, .5, .9), prior = c(1, 1, 1)) {
x <- seq(0.01, 0.99, by = 0.01)
post <- bin_lik(x, n, y) * prior_pmf(theta, prior)
post <- post / sum(post)
maxlik <- max(bin_lik(x, n, y))
plot(x, post, type = "h", lwd = 10, ylim = c(0, 1), col = "tomato",
xlab = expression(theta),
ylab = "Probability or Likelihood",
main = paste("Binomial experiment: n = ",n, ", y = ", y, sep = ""))
par(new = TRUE)
plot(x, prior_pmf(theta, prior), type = "h", lwd = 4, ylim = c(0, 1),
xlab = "", ylab = "", main = "")
curve(bin_lik(x, n, y) / maxlik, 0, 1, lwd = 2, col = "royalblue", add = TRUE)
if (y >= n / 2) legend.pos <- "topleft"
else legend.pos <- "topright"
legend(legend.pos, lty = 1, col = c("black", "royalblue", "tomato"), lwd = 4,
legend = c("prior", "likelihood", "posterior"))
}
plt_coin_cont <- function(n, y, a = 1, b = 1){
x <- seq(0.001, 0.999, by = 0.001)
maxlik <- max(bin_lik(x, n, y))
posterior <- function(theta) {
return(bin_lik(theta, n, y) * dbeta(theta, a, b))
}
post <- bin_lik(x, n, y) * dbeta(x, a, b)
post <- post / integrate(posterior, lower = 0, upper = 1)$value
ymax <- max(dbeta(x, a, b), post)
plot(x, post, type = "l", lwd = 4, col = "tomato", yaxt = "n",
ylim = c(-0.3 * ymax, ymax),
xlab = expression(theta),
ylab = "Density or Likelihood",
main = paste("Binomial experiment: n = ",n, ", y = ", y,
" with prior Beta(", a, ", ", b, ")", sep = ""))
curve(dbeta(x, a, b), from = 0, to = 1, lwd = 2, add = TRUE)
curve((bin_lik(x, n, y) / maxlik) * ymax, from = 0, to = 1,
lwd =2, col = "royalblue", add = TRUE)
abline(h = 0, col = "gray", lwd = 2)
dgts <- function(ymax){
if (ymax > 50) -1
else if (ymax > 10) 0
else 1
}
axis(2, at = round(seq(0, ymax, length = 7), digits = dgts(ymax)))
legend("bottomright", lty = 1, col = c("black", "royalblue", "tomato"), lwd = 4,
legend = c("prior", "likelihood", "posterior"))
}
## welcome message
cat("
###########################################################
## Illustration of Bayesian Inference: 'Is a Coin Fair?'
## Prepared for Research Methods in Political Science II,
## Kobe University. 2015. Yuki Yanai
##
## Function to use:
## (1) discrete theta: plt_coin_disc(n, y, theta, prior),
## (2) continuous theta: plt_coin_cont(n, y, a, b)
## n: number of trials, a single value
## y: number of Heads, a single value
## theta: possible values of theta, a vector
## prior: probabilities or relative weights for each
## possbile theta, a vector
## a, b: shape parameters of beta distribution
###########################################################\n
## Example:
## plt_coin_disc(n = 8, y = 4, theta = c(.5, .8), prior = c(1, 2))
## plt_coin_cont(n = 8, y = 4, a = 6, b = 2)
")
|
1d5cc535770276c0ad0460973a827f514d4c2c99 | bc8c6f3dc23567979a01c1ea4e72f6bc35345ea3 | /Command files/data analysis new.R | 949848dc18d197ba62dedddf7cc9ab471f03f254 | [] | no_license | Wangyin18/My-first-research-about-displacement-and-civil-conflict-self-practicing-only- | fbd277b517a88a0176619bc58900ccc76231e01b | 282182eb858eca117b3e2d578d163cd37e385e30 | refs/heads/main | 2023-08-20T17:57:06.891847 | 2021-10-25T09:50:40 | 2021-10-25T09:50:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,345 | r | data analysis new.R | ### Data analysis
setwd ("/Users/zhaowangyin/Desktop/Research_displaced_2/Analysis Data")
Database <- read.csv ("Database.csv")
names(Database)[names(Database) == "GDP.per.capita..2010.US.dollar."] <- "GDP per capita (2010 US dollar)"
names(Database)[names(Database) == "economic.inequality"] <- "economic inequality"
names(Database)[names(Database) == "rule.of.law..estimate."] <- "rule of law (estimate)"
names(Database)[names(Database) == "polity2"] <- "p_polity2"
names(Database)[names(Database) == "wdi_pop"] <- "population"
names(Database)[names(Database) == "government.functioning"] <- "government functioning"
library (stargazer)
### DATA visualation
### here I add code in QOD into database - UCDP
conflict_map <- dplyr::select (Database, cname, ccodealp, all.number, Country.Code)
setwd ("/Users/zhaowangyin/Desktop/Research_displaced/Original Data/ne_10m_admin_0_countries")
library (sf)
library (tmap)
world_map <- st_read("ne_10m_admin_0_countries.shp")
qtm (world_map)
conflict_map1 <- summarize(group_by(conflict_map, ccodealp), conflict_sum = sum(all.number))
world_map <- merge(world_map, conflict_map1, by.x = "ADM0_A3", by.y = "ccodealp", all = TRUE)
hist (Database_V$conflict_sum)
a <- tm_shape (world_map) + tm_polygons("conflict_sum", title = "Conflict Distribution", palette = "Greens", style="fixed", breaks = c(0, 50, 1000, 5000, 10000, 100000)) +
tm_layout(title = "Conflict Count Distribution from 2010-2018", title.size = 1.2,
title.position = c(0.4, 0.22)) + tm_legend(position = c(0.04, 0.1), title.size = 1)
a
st_write (world_map, "world_map.shp")
tmap_save (a)
### CONFLICT TREND
library (patchwork)
conflict_per_year <- summarize(group_by(Database, year),
conflict.number = sum(all.number))
intra_conflict_per_year <- summarize (group_by(Database, year), conflict.nnumber = sum (intrastate.number))
p1 <- ggplot(data = conflict_per_year, aes(x = year, y = conflict.number)) +
geom_line() +
geom_hline(yintercept = 0, linetype = "dashed") +
xlab("year") + ylab("all event on organized violence") + theme_bw()
p2 <- ggplot (data = intra_conflict_per_year, aes (x = year, y = conflict.nnumber)) + geom_line() +
geom_hline(yintercept = 0, linetype = "dashed") +
xlab("year") + ylab("intrastate conflict number") + theme_bw()
conflict_per_year$intraconflict <- intra_conflict_per_year$conflict.nnumber
p1 + p2
## another way to do it
par(mar=c(5,5,4,5)+0.1)
bar <- barplot(conflict_per_year$conflict.number,ylim = c(0,25000),xlab = "year", ylab="Events on organized violence",col="grey",col.axis="black",col.lab="black")
mtext(conflict_per_year$year,side=1,line=1,at=bar,col="black")
mtext("Year",side=1,line=3,col="black")
par(new=T)
plot(bar,conflict_per_year$intraconflict,axes=F,ylim=c(0,60),xlab="",ylab="",col="blue",type="b", lwd = 2)
axis(4,col="red",col.ticks="black",col.axis="black")
mtext("intrastate conflict number ",side=4,line=3,col="black")
###################################################################################
### Let's try NEGATIVE BINOMIAL REGRESSION
install.packages("MASS")
library (MASS)
### for control variable
gmodel_all_c <- glm.nb (Database$all.number ~ Database$`economic inequality` + log (Database$`GDP per capita (2010 US dollar)`) + Database$p_polity2 + Database$p_polity2_square+ Database$`government functioning` + log(Database$population))
gmodel_intr_c <- glm (Database$intrastate.dummy ~ Database$`economic inequality` + log (Database$`GDP per capita (2010 US dollar)`) + Database$p_polity2+ Database$p_polity2_square + Database$`government functioning` + log(Database$population), family = binomial(link = "logit"))
gmodel_intr.n_c <- glm.nb (Database$intrastate.number ~ Database$`economic inequality` + log (Database$`GDP per capita (2010 US dollar)`) + Database$p_polity2 + Database$p_polity2_square+ Database$`government functioning` + log(Database$population))
### displacement number
gmodel_all_d <- glm.nb (Database$all.number ~ log (Database$number))
gmodel_intr_d <- glm (Database$intrastate.dummy ~ log (Database$number),family = binomial(link = "logit"))
gmodel_intr_number_d <- glm.nb (Database$intrastate.number ~ log (Database$number))
### displacement
gmodel_all <- glm.nb (Database$all.number ~ log (Database$number) + Database$`economic inequality` + log (Database$`GDP per capita (2010 US dollar)`) + Database$p_polity2 + Database$p_polity2_square+ Database$`government functioning` + log(Database$population) )
gmodel_intr <- glm (Database$intrastate.dummy ~ log (Database$number) +Database$`economic inequality` + log (Database$`GDP per capita (2010 US dollar)`) + Database$p_polity2+ Database$p_polity2_square + Database$`government functioning` + log(Database$population),family = binomial(link = "logit") )
gmodel_intr_number <- glm.nb (Database$intrastate.number ~ log (Database$number) +Database$`economic inequality` + log (Database$`GDP per capita (2010 US dollar)`) + Database$p_polity2 + Database$p_polity2_square+ Database$`government functioning` + log(Database$population))
stargazer(gmodel_all_c, gmodel_intr_c, gmodel_intr.n_c,gmodel_all_d, gmodel_intr_d, gmodel_intr_number_d, gmodel_all, gmodel_intr, gmodel_intr_number, type = "text" )
### state fixed effect
Database$state_fixed <- as.factor (Database$Country.Code)
model_all_s <- glm.nb (Database$all.number ~ log (Database$number) + Database$`economic inequality` + log (Database$`GDP per capita (2010 US dollar)`) + Database$p_polity2+ Database$p_polity2_square + Database$`government functioning` + log(Database$population) + Database$state_fixed )
model_intr_s <- glm (Database$intrastate.dummy ~ log (Database$number) + Database$`economic inequality` + log (Database$`GDP per capita (2010 US dollar)`) + Database$p_polity2 + Database$p_polity2_square+ Database$`government functioning` + log(Database$population)+ Database$state_fixed )
model_intr_number_s <- glm.nb (Database$intrastate.number ~ log (Database$number) +Database$`economic inequality` + log (Database$`GDP per capita (2010 US dollar)`) + Database$p_polity2+ Database$p_polity2_square + Database$`government functioning` + log(Database$population) + Database$state_fixed)
stargazer(model_dummy, type = "text")
stargazer(model_dummy_s, model_all_s, model_intr_s, model_intr_number_s,type = "text")
#### Special GDP per capita quartile
Database_gdp <- Database [Database$GDP_quan != "[211,1.63e+03]", ]
Database_gdp1 <- Database [Database$GDP_quan != "(1.51e+04,1.94e+05]", ]
gmodel_all_gdp <- glm.nb (Database_gdp$all.number ~ log (Database_gdp$number) + Database_gdp$`economic inequality` + log (Database_gdp$`GDP per capita (2010 US dollar)`) + Database_gdp$p_polity2 + Database_gdp$p_polity2_square+ Database_gdp$`government functioning` + log(Database_gdp$population) )
gmodel_intr_gdp <- glm (Database_gdp$intrastate.dummy ~ log (Database_gdp$number) +Database_gdp$`economic inequality` + log (Database_gdp$`GDP per capita (2010 US dollar)`) + Database_gdp$p_polity2+ Database_gdp$p_polity2_square + Database_gdp$`government functioning` + log(Database_gdp$population) )
gmodel_intr_number_gdp <- glm.nb (Database_gdp$intrastate.number ~ log (Database_gdp$number) +Database_gdp$`economic inequality` + log (Database_gdp$`GDP per capita (2010 US dollar)`) + Database_gdp$p_polity2 + Database_gdp$p_polity2_square+ Database_gdp$`government functioning` + log(Database_gdp$population) )
gmodel_all_gdp1 <- glm.nb (Database_gdp1$all.number ~ log (Database_gdp1$number) + Database_gdp1$`economic inequality` + log (Database_gdp1$`GDP per capita (2010 US dollar)`) + Database_gdp1$p_polity2 + Database_gdp1$p_polity2_square+ Database_gdp1$`government functioning` + log(Database_gdp1$population) )
gmodel_intr_gdp1 <- glm (Database_gdp1$intrastate.dummy ~ log (Database_gdp1$number) +Database_gdp1$`economic inequality` + log (Database_gdp1$`GDP per capita (2010 US dollar)`) + Database_gdp1$p_polity2+ Database_gdp1$p_polity2_square + Database_gdp1$`government functioning` + log(Database_gdp1$population) )
gmodel_intr_number_gdp1 <- glm.nb (Database_gdp1$intrastate.number ~ log (Database_gdp1$number) +Database_gdp1$`economic inequality` + log (Database_gdp1$`GDP per capita (2010 US dollar)`) + Database_gdp1$p_polity2 + Database_gdp1$p_polity2_square+ Database_gdp1$`government functioning` + log(Database_gdp1$population) )
stargazer(gmodel_all_gdp, gmodel_intr_gdp, gmodel_intr_number_gdp, type = "text")
stargazer(gmodel_all_gdp1, gmodel_intr_gdp1, gmodel_intr_number_gdp1, type = "text")
#### special Polity2
Database_de <- Database [Database$DEM_quan == "(-5,5]", ]
gmodel_all_de <- glm.nb (Database_de$all.number ~ log (Database_de$number) + Database_de$`economic inequality` + log (Database_de$`GDP per capita (2010 US dollar)`) + Database_de$p_polity2 + Database_de$p_polity2_square+ Database_de$`government functioning` + log(Database_de$population) )
gmodel_intr_de <- glm (Database_de$intrastate.dummy ~ log (Database_de$number) +Database_de$`economic inequality` + log (Database_de$`GDP per capita (2010 US dollar)`) + Database_de$p_polity2+ Database_de$p_polity2_square + Database_de$`government functioning` + log(Database_de$population) )
gmodel_intr_number_de <- glm.nb (Database_de$intrastate.number ~ log (Database_de$number) +Database_de$`economic inequality` + log (Database_de$`GDP per capita (2010 US dollar)`) + Database_de$p_polity2 + Database_de$p_polity2_square+ Database_de$`government functioning` + log(Database_de$population) )
stargazer( gmodel_all_de, gmodel_intr_de, gmodel_intr_number_de, type = "text")
Database_de1 <- Database [Database$DEM_quan == "[-10,-5]", ]
gmodel_all_de1 <- glm.nb (all.number ~ log (number) + `economic inequality` + log (`GDP per capita (2010 US dollar)`) + p_polity2 + p_polity2_square + `government functioning` + log(population), data = Database_de1 )
gmodel_intr_de1 <- glm (intrastate.dummy ~ log (number) + `economic inequality` + log (`GDP per capita (2010 US dollar)`) + p_polity2 + p_polity2_square + `government functioning` + log(population), data = Database_de1 )
gmodel_intr_number_de1 <- glm.nb (intrastate.number ~ log (number) +`economic inequality` + log (`GDP per capita (2010 US dollar)`) + p_polity2 + p_polity2_square + `government functioning` + log(population), data = Database_de1 )
stargazer( gmodel_intr_de1, gmodel_intr_number_de1, type = "text")
Database_de2 <- Database [Database$DEM_quan == "(5,10]", ]
gmodel_all_de2 <- glm.nb (all.number ~ log (number) + `economic inequality` + log (`GDP per capita (2010 US dollar)`) + p_polity2 + p_polity2_square + `government functioning` + log(population), data = Database_de2 )
gmodel_intr_de2 <- glm (intrastate.dummy ~ log (number) + `economic inequality` + log (`GDP per capita (2010 US dollar)`) + p_polity2 + p_polity2_square + `government functioning` + log(population), data = Database_de2 )
gmodel_intr_number_de2 <- glm.nb (intrastate.number ~ log (number) +`economic inequality` + log (`GDP per capita (2010 US dollar)`) + p_polity2 + p_polity2_square + `government functioning` + log(population), data = Database_de2 )
stargazer(gmodel_all_de2, gmodel_intr_de2, gmodel_intr_number_de2, type = "text")
|
dfead85f10992867598a109f35e017f22abe0ae5 | 06dcdd23ff7773fb8ec544e36a5784ec475b5a0b | /code/6.1_plot_shrub_biomass_by_height_and_cover.R | 06245cf70b168735d2af6e692276469fb3287726 | [] | no_license | logan-berner/nslope_biomass | daf09662a6214b992c71b227bcb6bff1cef483fe | 4e1d54db710a7788352f703f739f988ddd09e1e7 | refs/heads/master | 2021-06-17T04:09:21.233070 | 2021-04-06T21:36:15 | 2021-04-06T21:36:15 | 191,438,518 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,288 | r | 6.1_plot_shrub_biomass_by_height_and_cover.R | rm(list=ls())
require(raster)
require(maptools)
require(plotrix)
require(lattice)
require(dplyr)
setwd('C:/research/projects/above/')
#------------------------------------------------------------------------------------------------------------
# LOAD FILES AND SET VARIABLES
#------------------------------------------------------------------------------------------------------------
wgs84 <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
aaea <- CRS("+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs")
sagb <- raster('gis_data/nslope/biomass/nslope_shrub_agb_gm2_p50.tif')
sagb.mac <- raster('gis_data/nslope/biomass/nslope_shrub_agb_gm2_p50_macanderExtent.tif')
shrub.cov <- raster('gis_data/nslope/nslope_shrub_cover_aea_30m_macander.tif')
shrub.survey <- read.csv('field_data/NACP_Woody_Veg_N_Slope_AK_V2_1365/data/shrub_dimensions.csv')
shrub.survey$canopy.height.m[shrub.survey$canopy.height.m == -999] <- NA
shrub.survey$crown.radius.m[shrub.survey$crown.radius.m == -999] <- NA
shrub.coords <- read.csv('field_data/NACP_Woody_Veg_N_Slope_AK_V2_1365/data/shrub_sites.csv')
shrub.coords <- subset(shrub.coords, Site != 'Dalton201112')
#------------------------------------------------------------------------------------------------------------
# SUMMARIZE SHRUB DIMENSIONS FOR EACH FIELD SITE, MAP SITE LOCATIONS AND EXTRACT AGB
#------------------------------------------------------------------------------------------------------------
head(shrub.survey)
shrub.survey$canopy.vol.m3 <- (pi*shrub.survey$crown.radius.m^2)*shrub.survey$canopy.height.m/3 # from Tape et al. 2016 GCB
shrub.sites <- shrub.survey %>% group_by(site) %>% summarise(canopy.height.m.avg=mean(canopy.height.m, na.rm=T), canopy.height.m.sd=sd(canopy.height.m, na.rm=T),
crown.radius.m.avg=mean(crown.radius.m, na.rm=T), crown.radius.m.sd=sd(crown.radius.m, na.rm=T),
canopy.vol.m3.avg=mean(canopy.vol.m3, na.rm=T), canopy.vol.m3.sd=sd(canopy.vol.m3, na.rm=T),
canopy.vol.m3.tot=sum(canopy.vol.m3, na.rm=T), n.samples= n())%>%
mutate(canopy.height.m.se = canopy.height.m.sd/sqrt(n.samples),
crown.radius.m.se=crown.radius.m.sd/sqrt(n.samples), canopy.vol.m3.se=canopy.vol.m3.sd/sqrt(n.samples))
shrub.sites$x.coord <- shrub.coords$x.coord
shrub.sites$y.coord <- shrub.coords$y.coord
shrub.pts <- SpatialPointsDataFrame(coords =shrub.sites[,c(13,14)], shrub.sites, proj4string = aaea)
shrub.sites$sagb.kgm2.avg <- extract(sagb, shrub.pts, buffer=100, fun=mean, na.rm=T)/1000
shrub.sites$sagb.kgm2.sd <- extract(sagb, shrub.pts, buffer=100, fun=sd, na.rm=T)/1000
shrub.sites$sagb.kgm2.n <- extract(sagb, shrub.pts, buffer=100, fun=length, na.rm=T)
shrub.sites$sagb.kgm2.se <- shrub.sites$sagb.kgm2.sd/sqrt(shrub.sites$sagb.kgm2.n)
#------------------------------------------------------------------------------------------------------------
# EXTRACT SHRUB AGB AND CANOPY COVER VALUES TO DATA FRAME, THEN SUBSAMPLE
#------------------------------------------------------------------------------------------------------------
sagb.cov.df <- data.frame(cov=values(shrub.cov), sagb.kgm2=values(sagb.mac)/1000)
sagb.cov.df <- na.omit(sagb.cov.df)
nrow(sagb.cov.df)
sagb.cov.df.smp <- sagb.cov.df[sample(1:nrow(sagb.cov.df), 2000),]
#------------------------------------------------------------------------------------------------------------
# CORRELATE MODELED SHRUB AGB vs SHRUB HEIGHT CANOPY COVER
#------------------------------------------------------------------------------------------------------------
# modeled shrub AGB vs canopy height
cor.sagb.height <- cor.test(shrub.sites$canopy.height.m.avg, shrub.sites$sagb.kgm2.avg, method = 'spearman')
cor.sagb.height
cor.sagb.cov <- cor.test(sagb.cov.df.smp$sagb.kgm2, sagb.cov.df.smp$cov, method = 'spearman')
cor.sagb.cov
#------------------------------------------------------------------------------------------------------------
# PLOT MODELES SHRUB AGB VS MEASURED SHRUB HEIGHT
#------------------------------------------------------------------------------------------------------------
agb.ylab <- expression('Shrub aboveground biomass (kg m '^-2*')')
height.xlab <- "Shrub canopy height (m)"
cov.xlab <- "Shrub canopy cover (%)"
my.pch.cex=1.2
my.cex.axis=1.2
cor.sagb.height.r <- bquote(r[s]*' = '*~.(round(as.numeric(cor.sagb.height$estimate),2)))
cor.sagb.cov.r <- bquote(r[s]*' = '*~.(round(as.numeric(cor.sagb.cov$estimate),2)))
jpeg('C:/Users/lb968/Google Drive/research/nau/above/figures/nslope_shrub_AGB_vs_height_cover.jpeg', width = 5, height = 9, res = 300, units = 'in')
par.op <- par(mfrow=c(2,1))
par.top <- par(mar=c(4,4.75,1,1))
plotCI(shrub.sites$canopy.height.m.avg, shrub.sites$sagb.kgm2.avg, uiw=shrub.sites$canopy.height.m.se, err='x', xaxt='n', yaxt='n',
xlim=c(0.5,2.2), ylim=c(0,1.55),xlab = '', ylab='',cex = my.cex.axis, pch=16, scol='grey50')
plotCI(shrub.sites$canopy.height.m.avg, shrub.sites$sagb.kgm2.avg, uiw=shrub.sites$sagb.kgm2.se, err='y', xaxt='n', yaxt='n',
xlim=c(0.5,2.2), ylim=c(0,1.55),xlab = '', ylab='',cex = my.cex.axis, pch=16, scol='grey50', add=T)
axis(1, at = seq(0.5,2,0.5), cex.axis=my.cex.axis, labels = T)
axis(2, seq(0,1.5,0.5), las=2, cex.axis=my.cex.axis)
mtext(side = 1, line = 3.0, cex = my.pch.cex, height.xlab)
mtext(side = 2, line = 3.0, cex = my.pch.cex, agb.ylab)
text(0.6, 1.50, "(a)", cex=my.cex.axis, font=2)
text(0.7, 1.25, cor.sagb.height.r, cex=my.cex.axis)
text(0.7, 1.10, "P < 0.001", cex=my.cex.axis)
par(par.top)
par.bot <- par(mar=c(4,4.75,1,1))
plot(sagb.kgm2 ~ cov, sagb.cov.df.smp, pch='*', xlim=c(0,100), ylim=c(0,1.55), xlab='', ylab='', xaxt='n', yaxt='n')
axis(1, at = seq(0,100,25), cex.axis=my.cex.axis, labels = T)
axis(2, seq(0,1.5,0.5), las=2, cex.axis=my.cex.axis)
mtext(side = 1, line = 3.0, cex = my.pch.cex, cov.xlab)
mtext(side = 2, line = 3.0, cex = my.pch.cex, agb.ylab)
text(3, 1.50, "(b)", cex=my.cex.axis, font=2)
text(10, 1.25, cor.sagb.cov.r, cex=my.cex.axis)
text(10, 1.10, "P < 0.001", cex=my.cex.axis)
par(par.bot)
par(par.op)
dev.off() |
ae736bff5aff057961ade44c2272939f08315495 | c0e6676ad222ab39fb843e027c988d3239c0cb42 | /DO Bucket Tests/200131_DOBucketTest/DO Bucket Test 2.R | a0efa0f799563935fe042e174ac145d25a35866e | [] | no_license | jhart6/Taylor-StreamMetabolizer | a417a9e10c3d9d348cad12229c79318c16e6ecdd | 43341d17d93f43a5a24355bd2b7d21ea82448a4b | refs/heads/master | 2020-12-18T16:42:02.271023 | 2020-02-11T21:29:01 | 2020-02-11T21:29:01 | 235,459,895 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,657 | r | DO Bucket Test 2.R | #Dissolved Oxygen sensor comparison
#4 minidots: mouth, garden, hound, and culvert from Taylor Creek
#1 EXO2
#Sensors synced at 5min recording frequency & placed in bucket of tap water for >3 days
#5 February 2020
#JAH
library(dplyr)
library(tidyr)
library(streamMetabolizer)
setwd("~/Dropbox/GitHub Repos/Taylor-StreamMetabolizer/DO Bucket Tests/200131_DOBucketTest/")
exo <- read.csv("exo_concat.csv")
culvert <- read.csv("culvert_concat.csv")
garden <- read.csv("garden_concat.csv")
hound <- read.csv("hound_concat.csv")
mouth <- read.csv("mouth_concat.csv")
do.comp <- exo %>%
select(DateTime,exo2.sat,exo2.mgL)
garden.mgL <- garden$DO..mg.l.
garden.temp <- garden$T..deg.C.
garden.eqsat <- calc_DO_sat(garden.temp,press = 1013,sal = 0)
garden.sat <- (garden.mgL/garden.eqsat)*100
culvert.mgL <- culvert$DO..mg.l.
culvert.temp <- culvert$T..deg.C.
culvert.eqsat <- calc_DO_sat(culvert.temp,press=1013,sal=0)
culvert.sat <- (culvert.mgL/culvert.eqsat) * 100
hound.mgL <- hound$DO..mg.l.
hound.temp <- hound$T..deg.C.
hound.eqsat <- calc_DO_sat(hound.temp,press = 1013, sal=0)
hound.sat <- (hound.mgL/hound.eqsat) *100
mouth.mgL <- mouth$DO..mg.l.
mouth.temp <- mouth$T..deg.C.
mouth.eqsat <- calc_DO_sat(mouth.temp,press=1013, sal=0)
mouth.sat <- (mouth.mgL/mouth.eqsat) * 100
do.comp <- cbind(do.comp,garden.sat,garden.mgL,culvert.sat,culvert.mgL,hound.sat,hound.mgL,mouth.sat,mouth.mgL)
#####Plots#####
#time series plots
quartz()
plot(do.comp$DateTime,do.comp$exo2.sat,ylim=c(60,120),xlab="DateTime",ylab="DO %Sat")
points(do.comp$DateTime,do.comp$garden.sat,col="green")
points(do.comp$DateTime,do.comp$culvert.sat,col="orange")
points(do.comp$DateTime,do.comp$hound.sat,col="blue")
points(do.comp$DateTime,do.comp$mouth.sat,col="red")
legend("bottomright",c("EXO2","Garden MiniDOT","Culvert MiniDOT","Hound MiniDOT","Mouth MiniDOT"),lty=c(1,1,1,1,1),col=c('black',"green","orange","blue","red"))
quartz()
plot(do.comp$DateTime,do.comp$exo2.mgL,ylim=c(2,12),xlab="DateTime",ylab="DO mg/L")
points(do.comp$DateTime,do.comp$garden.mgL,col="green")
points(do.comp$DateTime,do.comp$culvert.mgL,col="orange")
points(do.comp$DateTime,do.comp$hound.mgL,col="blue")
points(do.comp$DateTime,do.comp$mouth.mgL,col="red")
legend("bottomright",c("EXO2","Garden MiniDOT","Culvert MiniDOT","Hound MiniDOT","Mouth MiniDOT"),lty=c(1,1,1,1,1),col=c('black',"green","orange","blue","red"))
#calibration comparison
quartz()
plot(do.comp$exo2.sat,do.comp$garden.sat,ylim=c(75,120),xlim=c(75,120),xlab="EXO2 %Sat",ylab="%Sat",col="green")
abline(0,1,lty=1,cex=2)
points(do.comp$exo2.sat,do.comp$culvert.sat,col="orange")
points(do.comp$exo2.sat,do.comp$hound.sat,col="blue")
points(do.comp$exo2.sat,do.comp$mouth.sat,col="red")
legend("topleft",c("Garden MiniDOT 4.65%","Culvert MiniDOT 5.97%","Hound MiniDOT 8.79%","Mouth MiniDOT 13.85%"),lty=c(1,1,1,1),col=c("green","orange","blue","red"))
quartz()
plot(do.comp$exo2.mgL,do.comp$garden.mgL,ylim=c(7,11),xlim=c(7,11),xlab="EXO2 mg/L",ylab="mg/L",col="green")
abline(0,1,lty=1,cex=2)
points(do.comp$exo2.mgL,do.comp$culvert.mgL,col="orange")
points(do.comp$exo2.mgL,do.comp$hound.mgL,col="blue")
points(do.comp$exo2.mgL,do.comp$mouth.mgL,col="red")
legend("topleft",c("Garden MiniDOT 4.74%","Culvert MiniDOT 6.08%","Hound MiniDOT 8.79%","Mouth MiniDOT 13.75%"),lty=c(1,1,1,1),col=c("green","orange","blue","red"))
####Estimate Offset####
garden.mgL.offset <- mean(((do.comp$exo2.mgL-do.comp$garden.mgL)/do.comp$exo2.mgL) * 100) #4.746782
culvert.mgL.offset <- mean(((do.comp$exo2.mgL-do.comp$culvert.mgL)/do.comp$exo2.mgL) * 100) #6.084556
hound.mgL.offset <- mean(((do.comp$exo2.mgL-do.comp$hound.mgL)/do.comp$exo2.mgL) * 100) #8.796492
mouth.mgL.offset <- mean(((do.comp$exo2.mgL-do.comp$mouth.mgL)/do.comp$exo2.mgL) * 100) #13.85024
garden.sat.offset <- mean(((do.comp$exo2.sat-do.comp$garden.sat)/do.comp$exo2.sat) * 100) #4.658371
culvert.sat.offset <- mean(((do.comp$exo2.sat-do.comp$culvert.sat)/do.comp$exo2.sat) * 100) #5.979058
hound.sat.offset <- mean(((do.comp$exo2.sat-do.comp$hound.sat)/do.comp$exo2.sat) * 100) #8.794387
mouth.sat.offset <- mean(((do.comp$exo2.sat-do.comp$mouth.sat)/do.comp$exo2.sat) * 100) #13.75172
####Adjust Observed Data####
garden.mgL.adj <- do.comp$garden.mgL * (1+(garden.mgL.offset/100))
culvert.mgL.adj <- do.comp$culvert.mgL * (1 +(culvert.mgL.offset/100))
hound.mgL.adj <- do.comp$hound.mgL * (1+(hound.mgL.offset/100))
mouth.mgL.adj <- do.comp$mouth.mgL * (1+(mouth.mgL.offset/100))
garden.sat.adj <- do.comp$garden.sat * (1+(garden.sat.offset/100))
culvert.sat.adj <- do.comp$culvert.sat * (1+(culvert.sat.offset/100))
hound.sat.adj <- do.comp$hound.sat * (1+(hound.sat.offset/100))
mouth.sat.adj <- do.comp$mouth.sat * (1+(mouth.sat.offset/100))
#####Plot Adjusted Data####
quartz()
plot(do.comp$DateTime,do.comp$exo2.sat,ylim=c(60,120),xlab="DateTime",ylab="DO %Sat")
points(do.comp$DateTime,garden.sat.adj,col="green")
points(do.comp$DateTime,culvert.sat.adj,col="orange")
points(do.comp$DateTime,hound.sat.adj,col="blue")
points(do.comp$DateTime,mouth.sat.adj,col="red")
legend("bottomright",c("EXO2","Garden MiniDOT Adj","Culvert MiniDOT Adj","Hound MiniDOT Adj","Mouth MiniDOT Adj"),lty=c(1,1,1,1,1),col=c('black',"green","orange","blue","red"))
quartz()
plot(do.comp$DateTime,do.comp$exo2.mgL,ylim=c(2,12),xlab="DateTime",ylab="DO mg/L")
points(do.comp$DateTime,garden.mgL.adj,col="green")
points(do.comp$DateTime,culvert.mgL.adj,col="orange")
points(do.comp$DateTime,hound.mgL.adj,col="blue")
points(do.comp$DateTime,mouth.mgL.adj,col="red")
legend("bottomright",c("EXO2","Garden MiniDOT Adj","Culvert MiniDOT Adj","Hound MiniDOT Adj","Mouth MiniDOT Adj"),lty=c(1,1,1,1,1),col=c('black',"green","orange","blue","red"))
#calibration comparison
quartz()
plot(do.comp$exo2.sat,garden.sat.adj,ylim=c(85,120),xlim=c(85,120),xlab="EXO2 %Sat",ylab="%Sat",col="green")
abline(0,1,lty=1,cex=2)
points(do.comp$exo2.sat,culvert.sat.adj,col="orange")
points(do.comp$exo2.sat,hound.sat.adj,col="blue")
points(do.comp$exo2.sat,mouth.sat.adj,col="red")
legend("topleft",c("Garden MiniDOT Adj","Culvert MiniDOT Adj","Hound MiniDOT Adj","Mouth MiniDOT Adj"),lty=c(1,1,1,1),col=c("green","orange","blue","red"))
quartz()
plot(do.comp$exo2.mgL,garden.mgL.adj,ylim=c(7,11),xlim=c(7,11),xlab="EXO2 mg/L",ylab="mg/L",col="green")
abline(0,1,lty=1,cex=2)
points(do.comp$exo2.mgL,culvert.mgL.adj,col="orange")
points(do.comp$exo2.mgL,hound.mgL.adj,col="blue")
points(do.comp$exo2.mgL,mouth.mgL.adj,col="red")
legend("topleft",c("Garden MiniDOT Adj","Culvert MiniDOT Adj","Hound MiniDOT Adj","Mouth MiniDOT Adj"),lty=c(1,1,1,1),col=c("green","orange","blue","red"))
|
fe0c59a5677aa68030a2e64beb431921a266361e | a94e0f4865b334d8806ab7efc00387aba279f0ae | /joseph_heatmap.r | 3536a26b52cc1301232a3361188038dd6133c03b | [] | no_license | buzoherbert/6.867-Machine-Learinng-in-transportation-safety-perception | 6fe3808b52dc218e9694b98ebf92d803eaed75c1 | 08adb536299384866693abfbbbf00182c0db4f3c | refs/heads/master | 2021-08-28T20:33:10.019092 | 2017-12-13T04:28:28 | 2017-12-13T04:28:28 | 109,623,027 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,070 | r | joseph_heatmap.r | install.packages("NISTunits", dependencies = TRUE);
install.packages("reshape2", dependencies = TRUE);
install.packages("plotly", dependencies = TRUE);
library(plotly);
library(NISTunits);
library(reshape2);
# loading file
safety_data = read.csv("safety_data.csv");
# Removing rows with no safety perception measurement
completeFun <- function(data, desiredCols) {
completeVec <- complete.cases(data[, desiredCols])
return(data[completeVec, ])
}
safety_data = completeFun(safety_data, "pointsecurity")
# Based on
# https://stackoverflow.com/a/365853/3128369
haversine <- function(data, lat1, lon1, lat2, lon2){
earthRadiusKm = 6371;
dLat = NISTdegTOradian(data[[lat2]]-data[[lat1]]);
dLon = NISTdegTOradian(data[[lon2]]-data[[lon1]]);
lat1 = NISTdegTOradian(data[[lat1]]);
lat2 = NISTdegTOradian(data[[lat2]]);
a = sin(dLat/2) * sin(dLat/2) +
sin(dLon/2) * sin(dLon/2) * cos(lat1) * cos(lat2);
c = 2 * atan2(sqrt(a), sqrt(1-a));
distance = earthRadiusKm * c;
return (distance);
}
safety_data[["haversine"]] = haversine(safety_data, "cetram_lat", "cetram_long", "latitude", "longitude")
# Making a basic plot of some potentially relevant variables
plot_data <- data.frame(
bus_or_ped = safety_data[["bus_or_ped"]],
base_study_zone = safety_data[["base_study_zone"]],
busdestination = safety_data[["busdestination"]],
inside_or_outside = safety_data[["inside_or_outside"]],
total_seats = safety_data[["totalseats"]],
total_passenger_count = safety_data[["totalpassengercount"]],
total_female_count = safety_data[["totalfemalecount"]],
empty_seats = safety_data[["emptyseats"]],
gender = safety_data[["gender"]],
age = safety_data[["age"]],
companions = safety_data[["companions"]],
education = safety_data[["educational_attainment"]],
origin = safety_data[["origin"]],
destination = safety_data[["destinations"]],
trip_purpose = safety_data[["trip_purpose"]],
mode_security = safety_data[["modesecurity"]],
point_security = safety_data[["pointsecurity"]],
importance_safety = safety_data[["Importance_safety_digit"]],
most_safe = safety_data[["mostsafe"]],
least_safe = safety_data[["leastsafe"]],
urban_typology = safety_data[["urban.typology"]],
haversine = safety_data[["haversine"]],
hour = safety_data[["hour"]],
week_day = safety_data[["wday"]]
);
field1 = "gender"
field2 = "age"
casting_formula = sprintf("%s ~ %s", field1, field2)
#agg = aggregate(safety_data[["pointsecurity"]], list(Gender = safety_data[["gender"]], Age = safety_data[["age"]]), mean, na.rm=TRUE);
agg = dcast(safety_data, casting_formula, mean, value.var = "pointsecurity");
#if("Var.2" %in% colnames(agg))
#{
# agg = subset(agg, select = -c(Var.2) );
#}
agg = subset(agg, select = -c(Var.2) );
#agg = na.omit(agg);
row.names(agg) <- agg[,1];
agg <- agg[,-1];
#agg[agg > 4] <- 4
num_surveys = dcast(safety_data, casting_formula, length, value.var = "pointsecurity");
#if("Var.2" %in% colnames(agg))
#{
# num_surveys = subset(num_surveys, select = -c(Var.2) );
#}
num_surveys = subset(num_surveys, select = -c(Var.2) );
num_surveys = na.omit(num_surveys);
#row.names(num_surveys) <- num_surveys[,1];
#num_surveys <- num_surveys[,-1];
num_surveys;
num_list = aggregate(safety_data[["pointsecurity"]], list(field1 = safety_data[[field1]], field2 = safety_data[[field2]]), length);
num_list[num_list==""] <- NA
num_list = num_list[!is.na(num_list$field2),];
num_list;
xa = list(title = field2)
ya = list(title = field1)
anno_x = num_list[["field2"]]
anno_y = num_list[["field1"]]
anno_text = num_list[["x"]]
#anno_x = c("BRT")
#anno_y = c(3)
#anno_text = c("xxx")
plot_ly(
x = names(agg), y = row.names(agg),
z = data.matrix(agg), type = "heatmap", colors = colorRamp(c("red", "green"))) %>%
layout(xaxis = xa, yaxis = ya) %>%
layout(margin = list(l = 100)) %>%
add_annotations(x = anno_x, y = anno_y, text = anno_text, xref = 'x', yref = 'y', showarrow = FALSE, font=list(color='black'));
#agg_heat <- heatmap(as.matrix(sapply(agg, as.numeric)), Rowv=NA, Colv=NA, scale='none');
agg; |
c0cedc0e4cf8c137a3507d7c3b0810328d3e2bb9 | 6e32987e92e9074939fea0d76f103b6a29df7f1f | /googleaiplatformv1.auto/man/GoogleCloudAiplatformV1FeatureValue.Rd | b9f968db5c74b978b944604b9d1498a262cf77ff | [] | no_license | justinjm/autoGoogleAPI | a8158acd9d5fa33eeafd9150079f66e7ae5f0668 | 6a26a543271916329606e5dbd42d11d8a1602aca | refs/heads/master | 2023-09-03T02:00:51.433755 | 2023-08-09T21:29:35 | 2023-08-09T21:29:35 | 183,957,898 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,295 | rd | GoogleCloudAiplatformV1FeatureValue.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1FeatureValue}
\alias{GoogleCloudAiplatformV1FeatureValue}
\title{GoogleCloudAiplatformV1FeatureValue Object}
\usage{
GoogleCloudAiplatformV1FeatureValue(
bytesValue = NULL,
boolValue = NULL,
stringValue = NULL,
boolArrayValue = NULL,
int64Value = NULL,
int64ArrayValue = NULL,
doubleArrayValue = NULL,
metadata = NULL,
stringArrayValue = NULL,
doubleValue = NULL
)
}
\arguments{
\item{bytesValue}{Bytes feature value}
\item{boolValue}{Bool type feature value}
\item{stringValue}{String feature value}
\item{boolArrayValue}{A list of bool type feature value}
\item{int64Value}{Int64 feature value}
\item{int64ArrayValue}{A list of int64 type feature value}
\item{doubleArrayValue}{A list of double type feature value}
\item{metadata}{Metadata of feature value}
\item{stringArrayValue}{A list of string type feature value}
\item{doubleValue}{Double type feature value}
}
\value{
GoogleCloudAiplatformV1FeatureValue object
}
\description{
GoogleCloudAiplatformV1FeatureValue Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Value for a feature.
}
\concept{GoogleCloudAiplatformV1FeatureValue functions}
|
c7f80c7db49ffd1029487d2ddb664f18f08c8d4a | 341b66d831198c801945f552608ed5e6d8c554d2 | /code_R/FADN.code.key.R | b791979443db3f8f758f3ef55b8b276c3eb89632 | [] | no_license | progillespie/data_FADN_PUBLIC | df257d0152c32951cea781cc2c08afc10f8b2432 | 99ebb4e17d0a97ca3aa1d2acc14f96fd132fc338 | refs/heads/master | 2016-09-05T19:11:30.441859 | 2014-07-23T21:59:10 | 2014-07-23T21:59:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,283 | r | FADN.code.key.R | #---------------- Create FADN.code.key ------------------------------
N <- 148
FADN.code.key <- data.frame(code=rep("",N),
description=rep("",N),
stringsAsFactors=F)
FADN.code.key[1,] <- c("SYS02", "Farms represented")
FADN.code.key[2,] <- c("SYS03", "Sample farms")
FADN.code.key[3,] <- c("SE005", "Economic size")
FADN.code.key[4,] <- c("SE010", "Total labour input")
FADN.code.key[5,] <- c("SE011", "Labour input")
FADN.code.key[6,] <- c("SE015", "Unpaid labour input")
FADN.code.key[7,] <- c("SE020", "Paid labour input")
FADN.code.key[8,] <- c("SE021", "Paid labour Input")
FADN.code.key[9,] <- c("SE025", "Total Utilised Agricultural Area")
FADN.code.key[10,] <- c("SE030", "Rented U.A.A.")
FADN.code.key[11,] <- c("SE016", "Unpaid labour input")
FADN.code.key[12,] <- c("SE035", "Cereals")
FADN.code.key[13,] <- c("SE041", "Other field crops")
FADN.code.key[14,] <- c("SE042", "Energy crops")
FADN.code.key[15,] <- c("SE046", "Vegetables and flowers")
FADN.code.key[16,] <- c("SE050", "Vineyards")
FADN.code.key[17,] <- c("SE054", "Permanent crops")
FADN.code.key[18,] <- c("SE060", "Olive groves")
FADN.code.key[19,] <- c("SE055", "Orchards")
FADN.code.key[20,] <- c("SE065", "Other permanent crops")
FADN.code.key[21,] <- c("SE071", "Forage crops")
FADN.code.key[22,] <- c("SE072", "Agricultural fallows")
FADN.code.key[23,] <- c("SE073", "Set aside")
FADN.code.key[24,] <- c("SE074", "Total agricultural area out of production")
FADN.code.key[25,] <- c("SE075", "Woodland area")
FADN.code.key[26,] <- c("SE080", "Total livestock units")
FADN.code.key[27,] <- c("SE085", "Dairy cows")
FADN.code.key[28,] <- c("SE090", "Other cattle")
FADN.code.key[29,] <- c("SE095", "Sheep and goats")
FADN.code.key[30,] <- c("SE100", "Pigs")
FADN.code.key[31,] <- c("SE105", "Poultry")
FADN.code.key[32,] <- c("SE110", "Yield of wheat")
FADN.code.key[33,] <- c("SE115", "Yield of maize")
FADN.code.key[34,] <- c("SE120", "Stocking density")
FADN.code.key[35,] <- c("SE125", "Milk yield")
FADN.code.key[36,] <- c("SE131", "Total output")
FADN.code.key[37,] <- c("SE132", "Total output / Total input")
FADN.code.key[38,] <- c("SE135", "Total output crops & crop production")
FADN.code.key[39,] <- c("SE136", "Total crops output / ha")
FADN.code.key[40,] <- c("SE140", "Cereals")
FADN.code.key[41,] <- c("SE145", "Protein crops")
FADN.code.key[42,] <- c("SE146", "Energy crops")
FADN.code.key[43,] <- c("SE150", "Potatoes")
FADN.code.key[44,] <- c("SE155", "Sugar beet")
FADN.code.key[45,] <- c("SE160", "Oil-seed crops")
FADN.code.key[46,] <- c("SE165", "Industrial crops")
FADN.code.key[47,] <- c("SE170", "Vegetables & flowers")
FADN.code.key[48,] <- c("SE175", "Fruit")
FADN.code.key[49,] <- c("SE180", "Citrus fruit")
FADN.code.key[50,] <- c("SE185", "Wine and grapes")
FADN.code.key[51,] <- c("SE190", "Olives & olive oil")
FADN.code.key[52,] <- c("SE195", "Forage crops")
FADN.code.key[53,] <- c("SE200", "Other crop output")
FADN.code.key[54,] <- c("SE206", "Total output livestock & livestock products")
FADN.code.key[55,] <- c("SE207", "Total livestock output / LU")
FADN.code.key[56,] <- c("SE211", "Change in value of livestock")
FADN.code.key[57,] <- c("SE216", "Cows' milk & milk products")
FADN.code.key[58,] <- c("SE220", "Beef and veal")
FADN.code.key[59,] <- c("SE225", "Pigmeat")
FADN.code.key[60,] <- c("SE230", "Sheep and goats")
FADN.code.key[61,] <- c("SE235", "Poultrymeat")
FADN.code.key[62,] <- c("SE240", "Eggs")
FADN.code.key[63,] <- c("SE245", "Ewes' and goats' milk")
FADN.code.key[64,] <- c("SE251", "Other livestock & products")
FADN.code.key[65,] <- c("SE256", "Other output")
FADN.code.key[66,] <- c("SE260", "Farmhouse consumption")
FADN.code.key[67,] <- c("SE265", "Farm use")
FADN.code.key[68,] <- c("SE270", "Total Inputs")
FADN.code.key[69,] <- c("SE275", "Total intermediate consumption")
FADN.code.key[70,] <- c("SE281", "Total specific costs")
FADN.code.key[71,] <- c("SE284", "Specific crop costs / ha")
FADN.code.key[72,] <- c("SE285", "Seeds and plants")
FADN.code.key[73,] <- c("SE290", "Seeds and plants home-grown")
FADN.code.key[74,] <- c("SE295", "Fertilisers")
FADN.code.key[75,] <- c("SE300", "Crop protection")
FADN.code.key[76,] <- c("SE305", "Other crop specific costs")
FADN.code.key[77,] <- c("SE309", "Specific livestock output / LU")
FADN.code.key[78,] <- c("SE310", "Feed for grazing livestock")
FADN.code.key[79,] <- c("SE315", "Feed for grazing livestock home-grown")
FADN.code.key[80,] <- c("SE320", "Feed for pigs & poultry")
FADN.code.key[81,] <- c("SE325", "Feed for pigs&poultry home-grown")
FADN.code.key[82,] <- c("SE330", "Other livestock specific costs")
FADN.code.key[83,] <- c("SE331", "Forestry specific costs")
FADN.code.key[84,] <- c("SE336", "Total farming overheads")
FADN.code.key[85,] <- c("SE340", "Machinery & building current costs")
FADN.code.key[86,] <- c("SE345", "Energy")
FADN.code.key[87,] <- c("SE350", "Contract work")
FADN.code.key[88,] <- c("SE356", "Other direct inputs")
FADN.code.key[89,] <- c("SE360", "Depreciation")
FADN.code.key[90,] <- c("SE365", "Total external factors")
FADN.code.key[91,] <- c("SE370", "Wages paid")
FADN.code.key[92,] <- c("SE375", "Rent paid")
FADN.code.key[93,] <- c("SE380", "Interest paid")
FADN.code.key[94,] <- c("SE390", "Taxes")
FADN.code.key[95,] <- c("SE395", "VAT balance excluding on investments")
FADN.code.key[96,] <- c("SE405", "Balance subsidies & taxes on investments")
FADN.code.key[97,] <- c("SE406", "Subsidies on investments")
FADN.code.key[98,] <- c("SE407", "Payments to dairy outgoers")
FADN.code.key[99,] <- c("SE408", "VAT on investments")
FADN.code.key[100,] <- c("SE410", "Gross Farm Income")
FADN.code.key[101,] <- c("SE415", "Farm Net Value Added")
FADN.code.key[102,] <- c("SE420", "Farm Net Income")
FADN.code.key[103,] <- c("SE425", "Farm Net Value Added / AWU")
FADN.code.key[104,] <- c("SE430", "Farm Net Income / FWU")
FADN.code.key[105,] <- c("SE436", "Total assets")
FADN.code.key[106,] <- c("SE441", "Total fixed assets")
FADN.code.key[107,] <- c("SE446", "Land, permanent crops & quotas")
FADN.code.key[108,] <- c("SE450", "Buildings")
FADN.code.key[109,] <- c("SE455", "Machinery")
FADN.code.key[110,] <- c("SE460", "Breeding livestock")
FADN.code.key[111,] <- c("SE465", "Total current assets")
FADN.code.key[112,] <- c("SE470", "Non-breeding livestock")
FADN.code.key[113,] <- c("SE475", "Stock of agricultural products")
FADN.code.key[114,] <- c("SE480", "Other circulating capital")
FADN.code.key[115,] <- c("SE485", "Total liabilities")
FADN.code.key[116,] <- c("SE490", "Long & medium-term loans")
FADN.code.key[117,] <- c("SE495", "Short-term loans")
FADN.code.key[118,] <- c("SE501", "Net worth")
FADN.code.key[119,] <- c("SE506", "Change in net worth")
FADN.code.key[120,] <- c("SE510", "Average farm capital")
FADN.code.key[121,] <- c("SE516", "Gross Investment")
FADN.code.key[122,] <- c("SE521", "Net Investment")
FADN.code.key[123,] <- c("SE526", "Cash Flow (1)")
FADN.code.key[124,] <- c("SE530", "Cash Flow (2)")
FADN.code.key[125,] <- c("SE532", "Cash flow / farm total capital")
FADN.code.key[126,] <- c("SE600", "Balance current subsidies & taxes")
FADN.code.key[127,] <- c("SE605", "Total subsidies - excluding on investments")
FADN.code.key[128,] <- c("SE610", "Total subsidies on crops")
FADN.code.key[129,] <- c("SE611", "Compensatory payments/area payments")
FADN.code.key[130,] <- c("SE612", "Set aside premiums")
FADN.code.key[131,] <- c("SE613", "Other crops subsidies")
FADN.code.key[132,] <- c("SE615", "Total subsidies on livestock")
FADN.code.key[133,] <- c("SE616", "Subsidies dairying")
FADN.code.key[134,] <- c("SE617", "Subsidies other cattle")
FADN.code.key[135,] <- c("SE618", "Subsidies sheep & goats")
FADN.code.key[136,] <- c("SE619", "Other livestock subsidies")
FADN.code.key[137,] <- c("SE621", "Environmental subsidies")
FADN.code.key[138,] <- c("SE622", "LFA subsidies")
FADN.code.key[139,] <- c("SE624", "Total support for rural development")
FADN.code.key[140,] <- c("SE623", "Other rural development payments")
FADN.code.key[141,] <- c("SE699", "Other subsidies")
FADN.code.key[142,] <- c("SE625", "Subsidies on intermediate consumption")
FADN.code.key[143,] <- c("SE626", "Subsidies on external factors")
FADN.code.key[144,] <- c("SE630", "Decoupled payments")
FADN.code.key[145,] <- c("SE631", "Single Farm payment")
FADN.code.key[146,] <- c("SE632", "Single Area payment")
FADN.code.key[147,] <- c("SE640", "Additional aid")
FADN.code.key[148,] <- c("SE650", "Support_Art68")
# These codes are available from a datawarehouse request form
FADN.code.key[149,] <- c("A24" , "Country")
FADN.code.key[150,] <- c("SYS04" , "Exchange rate")
FADN.code.key[151,] <- c("SE110D", "Wheat Yield Denominator")
FADN.code.key[152,] <- c("SE110N", "Wheat Yield Numerator")
FADN.code.key[153,] <- c("SE115D", "Maize Yield Denominator")
FADN.code.key[154,] <- c("SE115N", "Maize Yield Numerator")
FADN.code.key[155,] <- c("SE120D", "Stocking Density Denominator")
FADN.code.key[156,] <- c("SE120N", "Stocking Density Numerator")
FADN.code.key[157,] <- c("SE125D", "Milk Yield Denominator")
FADN.code.key[158,] <- c("SE125N", "Milk Yield Numerator")
FADN.code.key[159,] <- c("SE132D", "Total Input")
FADN.code.key[160,] <- c("SE132N", "Total Output")
FADN.code.key[161,] <- c("SE136D", "Total Crops Yield Denominator")
FADN.code.key[162,] <- c("SE136N", "Total Crops Yield Numerator")
FADN.code.key[163,] <- c("SE207D", "Total Livestock Yield Denominator")
FADN.code.key[164,] <- c("SE207N", "Total Livestock Yield Numerator")
FADN.code.key[165,] <- c("SE284D", "Specific Crop Yield Denominator")
FADN.code.key[166,] <- c("SE284N", "Specific Crop Yield Numerator")
FADN.code.key[167,] <- c("SE309D", "Specific Livestock Yield Denominator")
FADN.code.key[168,] <- c("SE309N", "Specific Livestock Yield Numerator")
FADN.code.key[169,] <- c("SE425D", "FNVA Per AWU Denominator")
FADN.code.key[170,] <- c("SE425N", "FNVA Per AWU Numerator")
FADN.code.key[171,] <- c("SE430D", "FNVA Per FWU Denominator")
FADN.code.key[172,] <- c("SE430N", "FNVA Per FWU Numerator")
FADN.code.key[173,] <- c("SE532D", "Farm Total Capital")
FADN.code.key[174,] <- c("SE532N", "Cash Flow")
FADN.code.key[175,] <- c("SE620" , "Other Subsidies")
FADN.code.key[176,] <- c("YEAR" , "YEAR")
rm(N)
FADN.code.key <- data.table(FADN.code.key, key=c("code", "description"))
#View(FADN.code.key)
#---------------- Create FADN.code.key ------------------------------
#---------------- Define key function ------------------------------
# Convenience function- takes code and returns description
FADNlookup <- function(code.or.desc, reverse=F){
if (reverse==F){
output <- FADN.code.key[code.or.desc][, description]
output <- gsub("[[:punct:]]", "", output)
output <- gsub(" ", ".", output)
output <- gsub("\\.{2}", ".", output)
output <- tolower(output)
}
if (reverse==T){
output <- FADN.code.key[code.or.desc][, code]
}
if (reverse!=T && reverse!=F){
stop("<reverse> is a logical argument.Please set it to T or F.")
}
return(output)
}
#---------------- Define key function ------------------------------
|
7841987efacc957a88267107386d0bdda994d5ff | dc2a6cfca1f99b5de97d668fd6a3a0f391c84bd6 | /plot4.R | ebf6244c5d41cbdd37a6d1126a1dae5df82f0fa1 | [] | no_license | olivmx/ExData_Plotting1 | de1845d80b3ee3eeddf05d86e694a81898a0a586 | 38913336cac76f8b67081b85a84bfa6184677a31 | refs/heads/master | 2021-01-18T21:17:44.663506 | 2014-09-03T19:04:19 | 2014-09-03T19:04:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,385 | r | plot4.R | #plot 4 code
1. READ THE CODE AND STRIP ALL LINES NOT USED
setwd('C:/TestR/ExploratoryCoursera/Project1')
#1.1 read file into dataframe named original and make sure it has data with the head command
original <- data.frame(read.table('household_power_consumption.txt', header=TRUE, sep=";", dec=".", na.strings="?"))
original$Date <- as.Date(original$Date , format="%d/%m/%Y")
head (original$Date)
#1.2 subset original dataframe into workingSet with only selected dates range and make
# sure it has the correct dates.
workingSet<- subset(original,
original$Date >= as.Date('2007-02-01') &
original$Date <= as.Date('2007-02-02'))
dim(workingSet)
#1.3 remove original dataframe to free memory
rm(original)
# 2. adjust data so time is ordered
workingSet$Time[1]
strptime(workingSet$Time[1], format="%H:%M:%S")
dayHrMinNumeric<-
(as.numeric(substr(workingSet$Date,start=9, stop=10))-1)*24+
as.numeric(substr(workingSet$Time, start=1, stop=2))+as.numeric(substr(workingSet$Time, start=4, stop=5))/60
#3. construct plot multiple rows and columns
#openfile
png (filename="plot4.png", width=480, height=480)
par(mfrow = c(2, 2)) # 2 x 2 pictures on one plot
par(cex=0.70)
# row 1,col1 plot
plot(dayHrMinNumeric, workingSet$Global_active_power, type="l", ylab="Global Active Power" , xlab="", xaxt = "n")
axis (1, at=c(0,24,48), labels=c("Thu","Fri","Sat"))
# row 1,col2 plot
plot(dayHrMinNumeric, workingSet$Voltage, type="l", ylab="Voltage" , xlab="datetime", xaxt = "n")
axis (1, at=c(0,24,48), labels=c("Thu","Fri","Sat"))
# row 2,col1 plot
plot(dayHrMinNumeric, workingSet$Sub_metering_1, type="l", col="black",
ylab="Energy sub Metering", xlab="", xaxt="n" )
lines(dayHrMinNumeric, workingSet$Sub_metering_2, type="l", col="red")
lines(dayHrMinNumeric, workingSet$Sub_metering_3, type="l", col="blue")
axis (1, at=c(0,24,48), labels=c("Thu","Fri","Sat"))
legend ("topright", lwd="1",
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3") )
# row 2,col2 plot
plot(dayHrMinNumeric, workingSet$Global_reactive_power, type="l", ylab="Global_reactive_power" , xlab="datetime", xaxt = "n")
axis (1, at=c(0,24,48), labels=c("Thu","Fri","Sat"))
axis (2, at=c(seq(from=0.1,to=0.5,length.out=5)), labels=c(seq(from=0.1,to=0.5,length.out=5)) )
#close file
dev.off()
|
69f55bf4116c38760392a298fbbaba7f111df76d | fb69e88a2f3cb30f526b8facb76981b4f91f6f29 | /Code/PrepareUserData.R | 779ce00f1990a98d09ad9cd924b87f8bc85d004c | [] | no_license | HatoonS/Depression-Analytics | 4dad54a4641643e6a955bf87ad946af2335309b5 | 0a4666ad30888b38231235b5851ad0363c28dee7 | refs/heads/master | 2020-08-01T09:42:06.400631 | 2019-09-28T10:51:22 | 2019-09-28T10:51:22 | 210,954,399 | 0 | 0 | null | 2019-09-28T10:51:23 | 2019-09-25T22:49:03 | R | UTF-8 | R | false | false | 6,388 | r | PrepareUserData.R | #global data
pTerms = read.table("negative-words.txt", header = F, stringsAsFactors = F)[, 1]
nTerms =read.table("positive-words.txt", header = F, stringsAsFactors = F)[, 1]
PrepareUserData <- function(TimeStamp)
{
#has to be called after PrepareTweets
#TimeStamp = "02052018"
library(plyr, quietly = TRUE)#::join()
library(dplyr, quietly = TRUE)#::select()
library(lubridate, quietly = TRUE)#::date()
library(chron, quietly = TRUE)#::times()
library(rtweet, quietly = TRUE) #::read_tweeter_csv()
library(stringr, quietly = TRUE) #::replace_all()
library(sentimentr)#::sentiment()
#Read row users data from disk
####################################
FileName = paste("Data/raw_usersData_", TimeStamp, ".csv", sep = "" )
UsersData = read_twitter_csv(file = FileName)
#First part of gathered data
Part1 = UsersData %>% select(
#internal id
Num = Serial_no,
#Twitter id
id = id,
#twitter name
userName = screen_name,
#classification target
Class = Class,
#The number of followers this account currently has.
Number_of_followers = followers_count,
#The number of users this account is following
Following = friends_count,
#Total number of posts
Total_number_of_posts = statuses_count,
#user Location
Location = location,
#user UTC offset
UTCoffset = utc_offset
)
#replace NA by 0
Part1$UTCoffset[is.na(Part1$UTCoffset)] <- 0
print("users Raw data Read successfuly...")
#Second part of gathered data
#Read from Prepared Tweet Data
Part2 = data.frame()
n = length(Part1$Num) #no. of users
for(i in 1:n)
{
userNum = Part1$Num[[i]]
UTCoffset = Part1$UTCoffset[[i]]
perUser = getTweetStats(userNum, UTCoffset)
Part2 = rbind(Part2, perUser)
print(paste("done with user ", i))
}
print("users tweet summary data Read successfuly...")
userData = join(Part1, Part2, by="Num", type = "left")
print("done merging parts...")
FileName = paste("Data/UsersData_", TimeStamp, ".csv", sep = "")
write.csv(userData, file= FileName ,row.names=FALSE, na="0")
}
getTweetStats <- function(userNum, UTCoffset)
{
#Read tweet details from file
####################################
FileName = paste("Data/Tweets/TweetData_user", userNum,"_", TimeStamp,".csv", sep = "")
UserTweets = read.csv(file= FileName, stringsAsFactors = FALSE)
#Average_Posts_Per_day
####################################
y <- data.frame(days = as.numeric(lubridate::date(UserTweets$WhenUTC)))
Average_Posts_Per_day = ceiling(mean(plyr::count(y, vars = "days")[[2]]))
#Time of posts (Majority of posts classified as Morning, Afternoon, evening, night)
####################################
#adjust time zone using UTCoffset user info
Times = chron::times(substr(UserTweets$WhenUTC, 12,19))
#divide the times of the day into labeled segments
breaks <- c(0,6, 12, 17, 22, 24) / 24 # times are internally fractions of a day
labels <- c("night1", "morning", "afternoon", "evening", "night")
ind <- cut(Times, breaks, labels, include.lowest = TRUE)
ind <- str_replace_all(ind,"night1", "night")
#get the name of the most frequent time of the day
Time_of_posts = names(sort(table(ind), decreasing=TRUE)[1])
#Total no. of replies, to others
####################################
Total_number_of_replies = length(na.omit(UserTweets$IsReply))
#Total number of hashtags
##################################
Total_number_of_hashtags = sum(UserTweets$hashCount)
#total number of mentions
###################################
Total_number_of_mentions = sum(UserTweets$mentionCount)
#Total number retweets : made by the user!!
###################################
#Total_retweets = sum(sapply(userDetails$text, function(x) if_else(startsWith(x, "RT "), 1, 0)))
Total_retweets = sum(UserTweets$IsRetweet, na.rm=TRUE)
#Total number retweeted : by others!!
###################################
Total_number_retweeted = sum(UserTweets$Retweeted, na.rm=TRUE)
#Total number emojies
###################################
Total_number_emoji = sum(UserTweets$emojiCount, na.rm=TRUE)
#sentiment score for each tweet then
#average sentiment accross all tweets
################################################
#Upweight the negative values in a vector while
#also downweighting the zeros in a vector.
#Useful for small text chunks with several sentences in
#which some one states a negative sentence but then
# uses the social convention of several positive
#sentences in an attempt to negate the impact of the negative.
#The affective state isn't a neutral but a
# slightly lessened negative state.
##################################################
sentiment = sentimentr::sentiment(get_sentences(as.character(UserTweets$Tweet)))
MixedSntmScore = sentimentr::average_weighted_mixed_sentiment(sentiment$sentiment)
AvgSntmScore = sentimentr::average_downweighted_zero(sentiment$sentiment)
#count positive and negative words
#######################################
Terms = strsplit_space_tokenizer(UserTweets$Tweet)
NoPosTerms = sum(as.numeric(Terms %in% pTerms))
NoNegTerms = sum(as.numeric(Terms %in% nTerms))
#Result
#######################################
perUser = data.frame(Num = userNum,
Average_Posts_Per_day= Average_Posts_Per_day,
Time_of_posts = Time_of_posts,
Total_number_of_replies = Total_number_of_replies,
Total_number_of_hashtags = Total_number_of_hashtags,
Total_number_of_mentions = Total_number_of_mentions,
Total_number_emojies = Total_number_emoji,
Total_number_of_retweets = Total_retweets,
Total_number_retweeted = Total_number_retweeted,
MixedSntmScore = MixedSntmScore,
AvgSntmScore = AvgSntmScore,
Total_Num_Pos_Terms = NoPosTerms,
Total_Num_Neg_Terms = NoNegTerms)
}
strsplit_space_tokenizer <- function(x)
unlist(strsplit(as.character(x), "[[:space:]]+")) |
5da82107b169e109f2297ee6f9ef173f8b2f4ad8 | 82fdf86e2fe0da18dcae4d85e9b439d0168ace70 | /man/installGithub.Rd | 76261d360e8b06cf5f97014a27cc6a0a2b07b087 | [] | no_license | eddelbuettel/apollo13 | 24f69d6458f727eced7a8dcfe315c616265e284d | 9ff19612b68ca76984a8f0ca531fa2f104b08954 | refs/heads/master | 2021-01-23T07:21:20.360648 | 2015-04-26T14:18:53 | 2015-04-26T14:18:53 | 34,614,622 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 333 | rd | installGithub.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/aliases.R
\name{installGithub}
\alias{installGithub}
\title{installGithub}
\usage{
installGithub(...)
}
\arguments{
\item{...}{passed on to \code{install_github}}
}
\description{
installGithub
}
\seealso{
\code{\link[devtools]{install_github}}
}
|
ca31b98ca2195347c9bd946b33154bf7e2721a20 | 3f3e0d69fd9d9c8e9c9555756949568037971a8b | /Ch. 2-3/Ch. 3/Variables/Fallow/5.2.FIX_Calculo_Buffer500_AES_SG_GREEN.R | 63b018950f8d39940023b6fc33a8c5249fcf8124 | [] | no_license | anasanz/MyScripts | 28d5a6f244029674017d53d01f8c00307cb81ecb | d762b9582d99c6fc285af13150f95ffd2622c1a8 | refs/heads/master | 2021-05-10T08:56:54.228036 | 2021-03-08T07:11:51 | 2021-03-08T07:11:51 | 118,910,393 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,690 | r | 5.2.FIX_Calculo_Buffer500_AES_SG_GREEN.R | # ==================================================================================
# EXTRACT BUFFER 500 M CALCULATING Ha OF SG-AES-GREENING
# ==================================================================================
rm(list=ls())
library(rgdal)
library(rgeos)
library(raster)
library(dplyr)
# ---- Load data (layers without overlap) ----
# Transects
tr <- readOGR("D:/PhD/Third chapter/GIS", "Trans_2018_EPSG23031") # Contains transects sampled each year (1/0)
# SG
#sg14 <- readOGR("C:/Users/Ana/Documents/PhD/Third chapter/GIS/Fallow/SG", layer = "SG_2014_EPSG23031")
#colnames(sg14@data)[colnames(sg14@data) == "Codi"] <- "Codi.2"
sg15 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2015_EPSG23031")
sg16 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2016_EPSG23031")
sg17 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2017_EPSG23031")
sg18 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2018_EPSG23031")
sg19 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2019_EPSG23031")
# AES
#aes14 <- readOGR("C:/Users/Ana/Documents/PhD/Third chapter/GIS/Fallow/AES", layer = "AEScutted_2014_EPSG23031")
aes15 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2015_EPSG23031")
aes16 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2016_EPSG23031")
aes17 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2017_EPSG23031_FIXED_FALLOW")
aes18 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2018_EPSG23031_FALLOW")
aes19 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2019_EPSG23031_FALLOW")
# SELECT ONLY FALLOW FIELDS IN AES (In 2016 it was already done, and in 2015 is all together)
aes17 <- aes17[which(aes17$PROD_NOM == "FALLOW"), ]
aes18 <- aes18[which(aes18$PROD_NOM == "FALLOW"), ]
aes19 <- aes19[which(aes19$PROD_NOM == "FALLOW"), ]
# GREEN
#?green14 <- readOGR("C:/Users/Ana/Documents/PhD/Third chapter/GIS/Fallow/Greening", layer = "GREENcutted_2014_EPSG23031")
green15 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2015_EPSG23031")
green16 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2016_EPSG23031")
green17 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2017_EPSG23031")
green18 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2018_EPSG23031")
green19 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2019_EPSG23031")
# ---- Create buffers and calculate area ----
buf <- gBuffer(tr, byid = TRUE, width = 500)
# Data frame to fill up:
management <- as.data.frame(matrix(NA, nrow = nrow(buf), ncol = 1))
colnames(management)[1] <- "Codi"
management$Codi <- buf@data$Codi
# ---- AES ----
#### 2015 ----
# Calculate proportional area, because the strip of fallow is not digital
# So calculate the HA_Fallow proportional to the intersected area
layers <- list(aes15)
layers_names <- c("aes15")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# Proportional intersecting area of fallow:
poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_fallow ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### 2016 & 2017 ----
# Strip of fallow is digitalized
# So only sum HA of Fallow fields
layers <- list(aes16, aes17)
layers_names <- c("aes16", "aes17")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### 2018 & 2019 (added afterwards) ----
# Strip of fallow is digitalized
# So only sum HA of Fallow fields
layers <- list(aes18, aes19)
layers_names <- c("aes18", "aes19")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### SAVE ####
setwd("D:/PhD/Third chapter/Data")
write.csv(management, "AES_15_19_FIX.csv")
# ---- SG 15-19 ----
# Data frame to fill up:
management <- as.data.frame(matrix(NA, nrow = nrow(buf), ncol = 1))
colnames(management)[1] <- "Codi"
management$Codi <- buf@data$Codi
layers <- list(sg15, sg16, sg17, sg18, sg19)
layers_names <- c("sg15", "sg16", "sg17", "sg18", "sg19")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### SAVE ####
setwd("D:/PhD/Third chapter/Data")
write.csv(management, "SG_15_19_FIX.csv")
# ---- GREENING ----
#### 2015 - 2019 ####
# Many intersection errors, very slow
# Alternative: Only fix 2018 and 2019, the bad ones
setwd("D:/PhD/Third chapter/Data")
green <- read.csv("GREEN_15_19.csv")
green <- green[ ,-c(6,7)]
management <- as.data.frame(matrix(NA, nrow = nrow(buf), ncol = 1))
colnames(management)[1] <- "Codi"
management$Codi <- buf@data$Codi
layers <- list(green18, green19)
layers_names <- c("green18", "green19")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
management2 <- left_join(green, management, by = "Codi")
# CHeck
plot(buf[153,])
plot(poli[which(poli$Codi == "BE14"), ], col = "red", add = TRUE)
be14 <- poli[which(poli$Codi == "BE14"), ]
be14$area <- area(be14)/10000
sum(be14$area)
plot(buf[149,])
plot(poli[which(poli$Codi == "BE06"), ], col = "red", add = TRUE)
be06 <- poli[which(poli$Codi == "BE06"), ]
be06$area <- area(be06)/10000
sum(be06$area)
#### SAVE ####
setwd("D:/PhD/Third chapter/Data")
write.csv(management2, "GREEN_15_19_FIX.csv")
|
0aadbfd6e0e77453b0f83726d9d161a4a970cb3f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ri/examples/estlate.Rd.R | 6d5c0f8ef36fe77c247d93e4d2dcc0b389830ba3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 571 | r | estlate.Rd.R | library(ri)
### Name: estlate
### Title: Estimation of local average treatment effects under
### noncompliance
### Aliases: estlate
### Keywords: estimation noncompliance
### ** Examples
y <- c(8,6,2,0,3,1,1,1,2,2,0,1,0,2,2,4,1,1)
Z <- c(1,1,0,0,1,1,0,0,1,1,1,1,0,0,1,1,0,0)
D <- c(1,0,0,0,0,0,1,0,1,1,0,1,0,0,1,0,0,1)
cluster <- c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9)
block <- c(rep(1,4),rep(2,6),rep(3,8))
probs <- genprobexact(Z,block,cluster) # generate probability of assignment
late <- estlate(y,D,Z,prob=probs) # estimate the LATE; estimated LATE = 9
|
9675ed61bdf6eb16730b24085728fe4e2b7301ee | 5b5499c80a9376a70295cb76d759e6c63c197bf0 | /run_analysis.R | 8b2c532d343e9b3fc983614ba532ef358acbd5d2 | [] | no_license | courseradata111/Peer-graded-Assignment-Getting-and-Cleaning-Data-Course-Project | 2782b7a77a8b41b1d6fa246db5c3a7d5f911530b | 6e99e7be7d108e4162bd7e129da5cd9093941cbf | refs/heads/master | 2022-11-16T12:47:22.892480 | 2020-07-02T12:36:59 | 2020-07-02T12:36:59 | 276,376,937 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,992 | r | run_analysis.R | library(dplyr)
get_raw_data <- function() {
url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
destfile <- 'raw.zip'
download.file(url, destfile = destfile)
unzip(destfile)
}
read_file <- function(feature, datasets, data_base_dir) {
x <- tibble()
for (d in datasets) {
filepath <- file.path(data_base_dir, d, paste(feature, '_', d, '.txt', sep = ''))
x <- bind_rows(x, tibble(read.table(filepath)))
}
x
}
run_analysis <- function() {
data_base_dir <- 'UCI HAR Dataset'
datasets = c('train', 'test')
# merge training and test data sets
X <- read_file(feature = 'X',
datasets = datasets,
data_base_dir = data_base_dir)
y <- read_file(feature = 'y',
datasets = datasets,
data_base_dir = data_base_dir)
subject <- read_file(feature = 'subject',
datasets = datasets,
data_base_dir = data_base_dir)
names(subject) <- c("subject_code")
# extract mean and std
features <- tibble(read.table(file.path(data_base_dir, 'features.txt')))
idx <- filter(features, grepl('-mean[(]|-std[(]', features$V2))
X_mean_and_std <- select(X, idx$V1)
names(X_mean_and_std) <- idx$V2
# assign descriptive names to activities
# labels data set with descriptive variables names
activity_labels <- tibble(read.table(
file.path(data_base_dir, "activity_labels.txt")))
y_labeled <- inner_join(y, activity_labels, by = c("V1" = "V1"))
names(y_labeled) <- c("activity_code", "activity_name")
# mean each variable group by activity and subject
data_final <- bind_cols(X_mean_and_std, subject, y_labeled)
gdata <- group_by(data_final, activity_name, subject_code)
averaged_data_final <- summarise_all(gdata, mean, na.rm = TRUE)
#list(data_final, averaged_data_final)
averaged_data_final
}
write_data <- function(dataset) {
}
|
27cb76f2d77e8931910ee276f9aacac3a0334a8e | 0edde7dc03658b64303ffc0d4da539f858cf77b9 | /man/vec_post_cropped_azure.Rd | 075899ef13cd5ef056280a739869870ba4edb384 | [
"MIT"
] | permissive | Atan1988/alvision | 8ff114dbff8297768b0e8ef6196d0355b89b412e | 15f771d24f70353c81fa62c7461c4e602dc75b01 | refs/heads/master | 2021-07-11T00:52:34.072235 | 2020-12-11T05:43:09 | 2020-12-11T05:43:09 | 221,738,729 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 704 | rd | vec_post_cropped_azure.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_ocr_azure.R
\name{vec_post_cropped_azure}
\alias{vec_post_cropped_azure}
\title{vectorized post azure}
\usage{
vec_post_cropped_azure(
df,
cropped_tm_dir,
img,
azure_creds,
push_to_az = T,
box_highlight = F,
remove_fl = F
)
}
\arguments{
\item{df}{bounds dataframe}
\item{cropped_tm_dir}{temporary dir to output cropped img}
\item{img}{image}
\item{push_to_az}{whether to push to azure}
\item{box_highlight}{whether to have the additional step of flagging the cropped image}
\item{remove_fl}{whether to remove cropped files}
\item{azure_cred}{azure credentials}
}
\description{
vectorized post azure
}
|
9ef767b1353a61119ae9e7ec07286b1e422e41e1 | da2676b6fd629acdcad0bf955b0643a27407d1ce | /recommenderB/start.R | 800c65a8c80967593fd9a73056c43c14b0cd7af9 | [] | no_license | DataKind-SF/datadive_201503_techsoup-global | 728e5a0ee69e5b29d91af8021df7a44ecd3b0c34 | 966f94d27bd522638c11582a13baca2f9fd49b1f | refs/heads/master | 2016-09-10T21:16:06.661971 | 2015-04-07T04:56:01 | 2015-04-07T04:56:01 | 31,853,990 | 0 | 12 | null | 2015-04-24T19:14:45 | 2015-03-08T15:10:51 | R | UTF-8 | R | false | false | 3,682 | r | start.R | library("readr")
library("dplyr")
library("lubridate")
tsdSample <- read_csv("data/techsoup_datakind_sample.csv")
#tsd_sample has info on each purchase
tsdDemo <- read_csv("data/techsoup_datakind_code_demo.csv")
#tsdDemo is like sample, but without revenue
# Pri_key – unique incrementing ID, Integer
# Org – internal organization ID, BigInt
# City – City Name of physical location, Varchar(50)
# State – State Code of physical location, Varchar(5)
# PostalCode – ZipCode.Postal Delivery code – Varchar(20)
# OrgType – Major Type of Nonprofit Org – Char(4)
# Org_SubType – Subtype Code of Nonprofit Org – Char(4)
# Budget – Annual Budget for organization in USD – Currency
# Reg_Date – First Date of Registration with TechSoup Global – Date/Time
# Contact – contact email – varchar(100)
# Role – ID for contact’s role at organization – varchar(50)
# Role_description – contact’s role at organization – varchar(100)
# OrderNum – Unique ID for order – BigInt
# Tax_ID_Number – tax ID number for matching to IRS BMF file – Varchar(100)
# Transaction_date – date of transaction – Date/Time
# Vendor – Unique ID for vendor of product – BigInt
# Item_ID – Unique ID for Item – Varchar(20)
# Category – type of product ** only on recent live products – varchar(100)
# Count_Licenses – Count of licenses of that product for this order – Bigint
# Revenue – Total amount of admin fee for donation – Currency
# Value – Total value of donated product in USD – Currency
#tsdWide is same as tsdSample, but 2M records - use n_max=whatever to limit
tsdWide <- read_csv("data/techsoup_datakind_details_wide.csv")
#BMF has more details on who the clients are. 1.5M of them
tsdBMF <- read_csv("data/big_bmf.csv")
BMFsample <- tsdBMF[1:100,]
#tsdWide %>% filter(tax_id_number %in% tsdBMF$EIN[1:100])
#this doesn't work because tax_id_numbers are in all kinds of formats. EIN is numbers.
#this works, because the particular tax id is known Get's whole transaction history for this client
x=tsdWide %>% filter(tax_id_number == 122)
#what we really want is, given a tax_id_number, find the BMF record
y=tsdBMF %>% filter(EIN == 1076156) #but tax_id_number is this weird string, in general
#well another thing we can do is just add up revenue coming from transactions where budget <= x, and make a curve
tsd2010 = tsdWide %>% filter(year(tsdWide$transaction_date)==2010)
revenue2010 = sum(tsd2010$revenue)
budget <- c(1e5, 3e5, 5e5, 7e5, 1e6, 3e6, 5e6, 7e6, 1e7, 3e7, 5e7, 7e7, 1e8)#, 5e8, 1e9, 5e9, 1e10)
bucketRevenue = budget-budget
for (i in 1:length(budget)) bucketRevenue[i] = sum(tsd2010$revenue[tsd2010$budget <= budget[i]], na.rm=TRUE)/revenue2010
plot(budget, bucketRevenue, main="2010")
#2014 CDF
tsd2014 = tsdWide %>% filter(year(tsdWide$transaction_date)==2014)
revenue2014 = sum(tsd2014$revenue)
budget <- c(1e5, 3e5, 5e5, 7e5, 1e6, 2e6, 3e6, 5e6, 7e6, 1e7, 3e7)#, 5e7, 7e7, 1e8)#, 5e8, 1e9, 5e9, 1e10)
bucketRevenue14 = budget-budget
for (i in 1:length(budget)) bucketRevenue14[i] = sum(tsd2014$revenue[tsd2014$budget <= budget[i]], na.rm=TRUE)/revenue2014
plot(budget, bucketRevenue14, main="2014")
#2006 CDF
tsd2006 = tsdWide %>% filter(year(tsdWide$transaction_date)==2006)
revenue2006 = sum(tsd2006$revenue)
budget <- c(1e5, 3e5, 5e5, 7e5, 1e6, 2e6, 3e6, 5e6, 7e6, 1e7, 3e7)#, 5e8, 1e9, 5e9, 1e10)
bucketRevenue06 = budget-budget
for (i in 1:length(budget)) bucketRevenue06[i] = sum(tsd2006$revenue[tsd2006$budget <= budget[i]], na.rm=TRUE)/revenue2006
plot(budget, bucketRevenue06, main="2006")
plot(budget, bucketRevenue14, main="2014 dots vs. 2006 line")
lines(budget, bucketRevenue06) |
82e0de5e3e541e9dcc2ebefc005c2705d80a720c | 047dc234e511b3b898a83bcf6848d6b1a8cfcc66 | /Bankruptcy Data.R | b08a738d6fe3171f27e378cfcf40954be8e382e7 | [] | no_license | akhmetod/Machine-Learning-on-Bankruptcy | 1249a320b2c2c2eaeb351c4d1ef45dfbfa8d3cb8 | 054c91a6d6fd3e181a8ffb296cf3dccaea94e1a8 | refs/heads/master | 2020-03-19T10:22:25.760801 | 2018-05-20T23:07:31 | 2018-05-20T23:07:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,513 | r | Bankruptcy Data.R | rm(list=ls())
library(tidyverse)
#install.packages('fifer')
library(fifer)
bank.data <- read.csv("C:\\Users\\madha\\OneDrive\\Documents\\UC-BANA\\Spring Sem 2018\\Data Mining I\\bankruptcy.csv")
samp<-floor(0.8 * nrow(bank.data))
colnames(bank.data)
str(bank.data)
summary(bank.data) #mean bankruptcy 14%
bank.years<-bank.data %>%
group_by(FYEAR) %>%
summarise(amt = sum(DLRSN))
par(mar=c(1,1,1,1))
hist(bank.years$amt,axes=T, xlab = 'Years', ylab = 'bankruptcies')
index.train <- sample(seq_len(nrow(bank.data)), size = samp)
bank.train <- bank.data[-index.train, ]
bank.test <- bank.data[index.train, ]
#bank.train <- stratified(df = bank.data, group = "DLRSN", size = .8)
#bank.test <- bank.data[- as.numeric(row.names(bank.train)),]
#1 GLM
bank.glm0 <- glm(DLRSN ~ . - CUSIP -FYEAR, family = binomial, bank.train)
bank.glm.bic<- step(bank.glm0, k = log(nrow(bank.train)))
summary(bank.glm.bic)
AIC(bank.glm.bic) #2410.6
BIC(bank.glm.bic) #2468.04
#insample
prob.glm1b.insample <- predict(bank.glm.bic, type = "response")
predicted.glm1b.insample <- prob.glm1b.insample > 0.06
predicted.glm1b.insample <- as.numeric(predicted.glm1b.insample)
table(bank.train$DLRSN, predicted.glm1b.insample, dnn = c("Actual", "Predicted"))
mean(ifelse(bank.train$DLRSN != predicted.glm1b.insample, 1, 0)) #AMR
#outsample
prob.glm0.outsample <- predict(bank.glm.bic, newdata =bank.test, type = "response")
predicted.glm0.outsample <- prob.glm0.outsample > 0.06
predicted.glm0.outsample <- as.numeric(predicted.glm0.outsample)
table(bank.test$DLRSN, predicted.glm0.outsample, dnn = c("Truth", "Predicted"))
mean(ifelse(bank.test$DLRSN != predicted.glm0.outsample, 1, 0)) #AMR
#ROC curve
library("verification")
par(mar=c(1,1,1,1))
rocplot=roc.plot(bank.test$DLRSN == "1", prob.glm0.outsample)
rocplot$roc.vol
#2 CART
library(rpart)
bank.rpart <- rpart(formula = DLRSN ~ . - CUSIP -FYEAR, data = bank.train, method = "class",
parms = list(loss = matrix(c(0, 15, 1, 0), nrow = 2)))
plot(bank.rpart)
text(bank.rpart)
plotcp(bank.rpart)
bank.rpart2 <- prune(bank.rpart,cp=0.011)
#insample
bank.train.rpart = predict(bank.rpart2, bank.train, type = "prob")
#pred.train = prediction(bank.train.rpart[, 2], bank.train$DLRSN)
bank.train.pred.rpart2 = as.numeric(bank.train.rpart[, 2] > 0.06)
table(bank.train$DLRSN, bank.train.pred.rpart2, dnn = c("Truth", "Predicted"))
mean(ifelse(bank.train$DLRSN != bank.train.pred.rpart2, 1, 0)) #AMR
par(mar=c(1,1,1,1))
roc.plot(bank.train$DLRSN == "1", bank.train.pred.rpart2)
#outofsample
library(ROCR)
library(verification)
bank.data$DLRSN=as.list(bank.data$DLRSN)
bank.test.rpart = predict(bank.rpart2, bank.test, type = "prob")
#pred = prediction(bank.test.rpart[, 2], bank.test$DLRSN)
bank.test.pred.rpart2 = as.numeric(bank.test.rpart[, 2] > 0.06)
table(bank.test$DLRSN, bank.test.pred.rpart2, dnn = c("Truth", "Predicted"))
mean(ifelse(bank.test$DLRSN != bank.test.pred.rpart2, 1, 0)) #AMR
roc.plot(bank.test$DLRSN == "1", bank.test.pred.rpart2)
roc.plot(bank.test$DLRSN == "1", bank.test.pred.rpart2)$roc.vol
#3 GAM -----------------------------*************
library(mgcv)
str(bank.data)
bank.gam <-gam(DLRSN~s(R1)+s(R2)+s(R3)+s(R4)+s(R5)+s(R6)+s(R7)+s(R8)+s(R9)+s(R10),family = binomial, data=bank.train, select = TRUE)
par(mar=c(1,1,1,1))
plot(bank.gam, shade = TRUE, seWithMean = TRUE, scale = 0, pages=1)
AIC(bank.gam)
BIC(bank.gam)
bank.gam$deviance
bank.gam2 <-gam(DLRSN~R1+s(R2)+s(R3)+R4+R5+s(R6)+R7+s(R8)+s(R9)+s(R10),family = binomial, data=bank.train, select = TRUE)
plot(bank.gam2, shade = TRUE, seWithMean = TRUE, scale = 0, pages=1)
#In-sample
pcut.gam <- 0.06
prob.gam.in <- predict(bank.gam2, bank.train, type="response")
pred.gam.in <- (prob.gam.in >= pcut.gam) * 1
table(bank.train$DLRSN, pred.gam.in, dnn = c("Obs.", "Prediction"))
mean(ifelse(bank.train$DLRSN != pred.gam.in, 1, 0))
#Out-of-sample
prob.gam.out <- predict(bank.gam, bank.test, type ="response")
pred.gam.out <- (prob.gam.out >= pcut.gam) * 1
table(bank.test$DLRSN, pred.gam.out, dnn = c("Obs.", "Prediction"))
mean(ifelse(bank.test$DLRSN != pred.gam.out, 1, 0))
roc.plot(bank.test$DLRSN == "1", pred.gam.out)
#4 LDA- normality test
par(mar=c(1,1,1,1))
par(mfrow=c(1,1))
qqnorm(bank.data$R1)
qqnorm(bank.data$R2)
qqnorm(bank.data$R3)
qqnorm(bank.data$R4)
qqnorm(bank.data$R5)
qqnorm(bank.data$R6)
qqnorm(bank.data$R7)
qqnorm(bank.data$R8)
qqnorm(bank.data$R9)
qqnorm(bank.data$R10)
#quadratic
bank.qda <- qda(DLRSN ~ . -CUSIP -FYEAR, data = bank.train)
prob.qda.in <- predict(bank.qda, data = bank.train)
pcut.qda <- 0.06
pred.qda.in <- (prob.qda.in$posterior[, 2] >= pcut.lda) * 1
table(bank.train$DLRSN, pred.qda.in, dnn = c("Obs", "Pred"))
mean(ifelse(bank.train$DLRSN != pred.qda.in, 1, 0))
#outofsample
qda.out <- predict(bank.qda, newdata = bank.test)
cut.lda <- 0.06
pred.qda.out <- as.numeric((qda.out$posterior[, 2] >= cut.lda))
table(bank.test$DLRSN, pred.qda.out, dnn = c("Obs", "Pred"))
mean(ifelse(bank.test$DLRSN != pred.qda.out, 1, 0))
roc.plot(bank.test$DLRSN == "1", pred.qda.out)
roc.plot(bank.train$DLRSN == "1", pred.qda.in)
summary(bank.gam)
#4 NNet
library(nnet)
library(neuralnet)
bank.nnet <- nnet(DLRSN ~ . -CUSIP -FYEAR, data = bank.train, size = 1, maxit = 500, type='class')
prob.nnet = predict(bank.nnet, bank.test)
pred.nnet = as.numeric(prob.nnet > 0.06)
table(bank.test$DLRSN, pred.nnet, dnn = c("Observation", "Prediction"))
mean(ifelse(bank.test$DLRSN != pred.nnet, 1, 0))
n=8
bank.nnet.tune <- nnet(DLRSN ~ . -CUSIP -FYEAR,
size = n, data = bank.train, maxit=1000, decay=0.006, linout = TRUE)
prob.nnet.tune.train = predict(bank.nnet.tune, bank.train)
pred.nnet.tune.train = as.numeric(prob.nnet.tune.train > 0.06)
table(bank.test$DLRSN, pred.nnet.tune.train, dnn = c("Observation", "Prediction"))
mean(ifelse(bank.train$DLRSN != pred.nnet.tune.train, 1, 0))
#outofsample
prob.nnet.tune = predict(bank.nnet.tune, bank.test)
pred.nnet.tune = as.numeric(prob.nnet.tune > 0.06)
table(bank.test$DLRSN, pred.nnet.tune, dnn = c("Observation", "Prediction"))
mean(ifelse(bank.test$DLRSN != pred.nnet.tune, 1, 0))
bank.data$DLRSN=as.factor(bank.data$DLRSN)
roc.plot(bank.test$DLRSN == "1", pred.nnet.tune)
nn <- neuralnet(DLRSN ~ R1+R2+R3+R4+R5+R6+R7+R8+R9+R10,
data=bank.train,hidden=c(8,1),linear.output=T)
plot(nn)
|
f7bccf48beba15fa645afa0afe83c9b4bef8b6db | 94ff11f4f793d36f3ff2ee16052cba076a09cb54 | /onsite.R | a9163cbc4245bc92a71db7eb2af556db32aef241 | [] | no_license | aish-k/B.Tech-IT-final-year-project | ed8964ff9a30aca60f4fa4142a2cc9b89bef2ffe | f932865a50b671eeb4ca7d4b9f506560cc228c95 | refs/heads/master | 2021-06-18T15:06:39.543298 | 2017-05-12T16:33:03 | 2017-05-12T16:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,869 | r | onsite.R | setwd("C:/wamp64/www/st")
library("gridExtra")
library("tableplot")
library("data.table")
library("data.table")
library("RMySQL")
library("DMwR")
library("neuralnet")
library("caret")
library("readxl")
con <- dbConnect(MySQL(),
user="root", password="",
dbname="project", host="localhost")
rs <- dbSendQuery(con, "select trainee_id,name,experience,coursescompleted,projects_completed,net_score,net_att from trainee;")
data <- fetch(rs)
print(data)
preprocessParams <- preProcess(data[,1:7], method=c("range"))
testdata<-(predict(preprocessParams, data[,1:7]))
print(testdata)
library(readxl)
Dataset_emp <- read_excel("C:/Users/Aishwarya/Documents/Dataset 800.xlsx")
Dataset_emp$experience<-as.numeric(factor(Dataset_emp$experience, levels=c('Young', 'Intermediate','Senior')))
Dataset_emp$net_score<-as.numeric(factor(Dataset_emp$net_score, levels=c('Good', 'Very good','Excellent')))
Dataset_emp$projects_completed<-as.numeric(factor(Dataset_emp$projects_completed, levels=c('Low', 'Medium','High')))
Dataset_emp$net_att<-as.numeric(factor(Dataset_emp$net_att, levels=c('Low', 'Medium','High')))
Dataset_emp$coursescompleted<-as.numeric(factor(Dataset_emp$coursescompleted, levels=c('Low', 'Medium','High')))
Dataset_emp$Onsite<-as.numeric(factor(factor(Dataset_emp$Onsite,levels=c('No', 'Yes'))))
preprocessParams2 <- preProcess(Dataset_emp[,1:6], method=c("range"))
traindata<-(predict(preprocessParams2, Dataset_emp[,1:6]))
print(traindata)
onsite <- neuralnet(traindata$Onsite ~ traindata$experience + traindata$net_score+traindata$coursescompleted+traindata$projects_completed+traindata$net_att,traindata, hidden = 5, lifesign = "minimal",threshold=0.01,linear.output =FALSE,learningrate = 0.01)
plot(onsite)
onsite.results <- compute(onsite, testdata[,3:7])
results <- data.frame(prediction = onsite.results$net.result)
print(testdata)
print(results)
print(onsite$startweights)
optimum_results<-preProcess(results, method=c("range"))
final_result<-predict(optimum_results,results)
print(final_result$prediction)
barplot(final_result$prediction,width = 1,names.arg=testdata$trainee_id,xlab="Trainee Id's",ylab="Probability of going onsite",col="blue",
main="Prediction of going onsite",border="red")
o<-head(order(final_result$prediction,decreasing=TRUE),10)
print(final_result$prediction[o])
barplot(final_result$prediction[o],width = 1,names.arg=testdata$trainee_id[o],xlab="Trainee Id's",ylab="Probability of going onsite",col="blue",main="Prediction of going onsite",border="red")
o_per<-final_result$prediction[o]*100
print(o_per)
Onsite_Eligibility<- c(o_per)
Trainee_id<- c(testdata$trainee_id[o])
Trainee_name<-c(testdata$name[o])
df <- data.frame(Onsite_Eligibility,Trainee_id,Trainee_name)
ggsave("onsite_nn.png", grid.table(df))
|
f9a1f927fb3110410e562154f534893e233ff56b | 015eb7593342515229fd2a45f91c5f539a8d4eb6 | /R/distance.r | 21c365361bbeeca3b6f26d50cf152eaf6d285b86 | [] | no_license | Caleb-Huo/BayesMP | d9d7d77f9dc355aaef47356b26860cde4e25cc41 | ca9fa6a9df875f81d75eff9173acd7c4c5a07515 | refs/heads/master | 2021-01-18T20:23:04.827589 | 2018-08-19T19:13:44 | 2018-08-19T19:13:44 | 65,741,394 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,889 | r | distance.r | ##' Dissimilarity matrix calculation.
##'
##' Based on DE posterior probablity of gene*study matrix,
##' calculate gene-gene distance
##' @title Distance
##' @param plus p*n matrix. Each element represents the posterior probablity of a gene to be positive DE.
##' @param minus p*n matrix. Each element represents the posterior probablity of a gene to be negative DE.
##' @param nsample Total number of posterior samples excluding burning period.
##' @param alpha Forge parameter
##' @param method Choose one from those: cos, KL, L1, L2, L2_2D, Hellinger.
##' @return Dissimilarity matrix. p*p
##' @author Zhiguang Huo <zhuo@ufl.edu>
##' @export
##' @examples
##' G<-10
##' S<-3
##' nsample <- 10000
##' plus <- matrix(c(sample(nsample,G*S,replace = TRUE),rep(0,G*S)),2*G,S)
##' minus <- matrix(c(rep(0,G*S),sample(nsample,G*S,replace = TRUE)),2*G,S)
##' dissimilarity <- distance(plus, minus, nsample)
distance <- function(plus,minus,nsample,alpha=0,method='cos'){
## alpha is for laplace smoothing
## each row represent a gene
if(method!='cos' & method!='KL' & method!='L1' & method!='L2' & method!='L2_2D' & method!='Hellinger'){
stop('please specify correct distance cos, KL, L1, L2, L2_2D, Hellinger')
}
cosineDist <- function(p,q){
1 - sum(p*q)/sqrt(sum(p^2)*sum(q^2))
}
KLDist <- function(p,q){
sum(p*log(p/q)+q*log(q/p)) /2
}
L1Dist <- function(p,q){
sum(abs(p-q))
}
L2Dist <- function(p,q){
sum((p-q)^2)
}
L2Dist2D <- function(p,q){
sum((p[-3]-q[-3])^2)
}
HellingerDist <- function(p,q){
sqrt(sum((sqrt(p)-sqrt(q))^2))/sqrt(2)
}
simpleDist <- function(p,q){
a <- sum( abs(p[1:2] - q[1:2]))
if(a==0) return (0)
res <- sum( abs(p[1:2] - q[1:2]) / (p[1:2] + q[1:2]) )
}
selfDistance <- function(a,b,fun){
outer(a, b, function(x,y) vapply(seq_along(x), function(i) fun(x[[i]], y[[i]]), numeric(1)))
}
K <- ncol(plus)
netro = nsample - plus - minus
Pplus = (plus + alpha)/(nsample + alpha*K)
Pminus = (minus + alpha)/(nsample + alpha*K)
Pnetro = (netro + alpha)/(nsample + alpha*K)
resdist = 0
for(k in 1:K){
tmpData = cbind(Pplus[,k],Pminus[,k],Pnetro[,k])
tmpList <- lapply(seq_len(nrow(tmpData)), function(i) tmpData[i,]/sum(tmpData[i,]))
if(method=='cos'){
adist = selfDistance(tmpList,tmpList,cosineDist)
}
else if(method=='KL'){
if(alpha==0)
warnings('for KL distance, it is better to specify non zero alpha!')
adist = selfDistance(tmpList,tmpList,KLDist)
}
else if(method=='L1'){
adist = selfDistance(tmpList,tmpList,L1Dist)
}
else if(method=='L2'){
adist = selfDistance(tmpList,tmpList,L2Dist)
}
else if(method=='L2_2D'){
adist = selfDistance(tmpList,tmpList,L2Dist2D)
}
else if(method=='Hellinger'){
adist = selfDistance(tmpList,tmpList,HellingerDist)
}
else {stop('there is a bug for function distance 1.')}
resdist = resdist + adist/K
}
resdist
}
|
95d28f95249279826b3cafcfe4bd0ce2d403f35f | fe18df411bd2e756f13944ebc40a3ca60de89fd9 | /R/scrape-old.R | 706305ba63adac57161c7b36fd1dbcfb2ec0fec6 | [] | no_license | BHGC/website | 5b2dc9a74b91bf848514a9c0048de1aab1c20f76 | c534f4f2160e5867b231ddcbd08bf842f9db711d | refs/heads/master | 2023-08-19T04:18:18.255071 | 2023-08-11T21:41:54 | 2023-08-11T21:41:54 | 20,578,077 | 2 | 5 | null | 2023-08-12T01:00:29 | 2014-06-06T21:00:13 | R | ISO-8859-1 | R | false | false | 4,457 | r | scrape-old.R | R.utils::use("R.utils")
download <- function() {
root <- "http://bhgc.org"
dirs=c(".", "flyingSites", "tam", "forums", "join", "members", "db", "miscellaneous", "miscellaneous/recipe-goat", "about")
for (dir in dirs) {
path <- file.path("md", dir)
mkdirs(path)
prefix <- file.path(path, "index")
file <- sprintf("%s.md", prefix)
if (!file_test("-f", file)) {
url <- file.path(root, dir)
printf("Downloading: %s -> %s\n", url, file)
system2("pandoc", args=c("-s", url, "-o", file))
}
} # for (dir ...)
} # download()
clean <- function() {
# All downloaded files
files <- list.files("md", pattern="[.]md$", recursive=TRUE)
for (file in files) {
fileD <- file.path("md,trimmed", file)
if (!file_test("-f", fileD)) {
mkdirs(dirname(fileD))
printf("Trimming: %s -> %s\n", file, fileD)
# Read
bfr <- readLines(file.path("md", file), warn=FALSE)
# Trim
start <- grep("/user/password", bfr, fixed=TRUE) + 4L
end <- grep("[![Powered by Drupal", bfr, fixed=TRUE) - 1L
bfr <- bfr[start:end]
# Trim empty lines at the top
idx <- which(nzchar(bfr))[1L]
bfr <- bfr[idx:length(bfr)]
# Trim empty lines at the bottom
bfr <- rev(bfr)
idx <- which(nzchar(bfr))[1L]
bfr <- bfr[idx:length(bfr)]
bfr <- rev(bfr)
# Trim HTML/CSS markup
bfr <- gsub("[{]style=[^}]*[}]", "", bfr)
bfr <- trim(bfr)
# Trim odd characters
bfr <- gsub(" ", "", bfr)
# Markdown translation
bfr <- gsub("[\\]$", " ", bfr)
# Write
writeLines(bfr, con=fileD)
}
} # for (file ...)
} # clean()
torsp <- function() {
# All downloaded files
pathS <- "md,trimmed"
files <- list.files(pathS, pattern="[.]md$", recursive=TRUE)
pathD <- "md,trimmed,rsp"
for (file in files) {
path <- dirname(file)
fileD <- file.path(pathD, path, "index.md.rsp")
if (!file_test("-f", fileD)) {
mkdirs(dirname(fileD))
fileS <- file.path("md,trimmed", file)
printf("Copying: %s -> %s\n", fileS, fileD)
file.copy(fileS, fileD)
}
} # for (file ...)
} # torsp()
tohtml <- function(force=FALSE) {
use("R.rsp")
# All downloaded files
pathS <- "md,trimmed,rsp"
files <- list.files(pathS, pattern="[.]rsp$", recursive=TRUE)
for (file in files) {
path <- dirname(file)
fileS <- file.path(pathS, file)
pathD <- file.path("md,trimmed,html", path)
fileD <- file.path(pathD, gsub(".md.rsp", ".html", basename(fileS)))
if (force || !file_test("-f", fileD)) {
printf("Compiling: %s -> %s\n", fileS, fileD)
# Find page title
bfr <- readLines(fileS)
idx <- which(nzchar(bfr))[1L]
page <- trim(bfr[idx])
# Find depth
if (path == ".") {
pathToRoot <- ""
} else {
depth <- length(unlist(strsplit(path, split="/")))
pathToRoot <- paste(c(rep("..", times=depth), ""), collapse="/")
}
options(markdown.HTML.options="fragment_only")
args <- list()
args$pathToRoot <- pathToRoot
html <- rfile(fileS, args=args, workdir=pathD)
print(html)
}
} # for (file ...)
} # tohtml()
build <- function(force=FALSE) {
use("R.rsp")
# All downloaded files
pathS <- "md,trimmed,html"
files <- list.files(pathS, pattern="[.]html$", recursive=TRUE)
for (file in files) {
path <- dirname(file)
fileS <- file.path(pathS, file)
pathD <- file.path("html", path)
fileD <- file.path(pathD, gsub(".md.rsp", ".html", basename(fileS)))
if (force || !file_test("-f", fileD)) {
printf("Compiling: %s -> %s\n", fileS, fileD)
# Find page title
bfr <- readLines(fileS)
bfr <- grep("<h2>", bfr, value=TRUE)[1L]
bfr <- gsub("(<h2>|</h2>)", "", bfr)
page <- trim(bfr)
if (is.na(page)) page <- ""
# Find depth
if (path == ".") {
pathToRoot <- ""
} else {
depth <- length(unlist(strsplit(path, split="/")))
pathToRoot <- paste(c(rep("..", times=depth), ""), collapse="/")
}
options(markdown.HTML.options="fragment_only")
args <- list()
args$pathToRoot <- pathToRoot
args$body <- file.path("..", fileS)
args$page <- page
html <- rfile("includes/index.html.rsp", args=args, workdir=pathD)
print(html)
}
} # for (file ...)
} # build()
download(); clean(); torsp(); tohtml();
build()
|
d9d7610255dda051f1f0aaa155f9cede1c1384cf | ac42c7b87b90f08c8ec9b12d5e17a534ffb84b02 | /man/cr_buildstep.Rd | 4e0635af7b1586499d6e4ac6bf189ca17e44b271 | [] | no_license | alainlompo/googleCloudRunner | 990c819c9f4f46ca5a39a720627da4003a6410f3 | 6622d22f1c19d58f6b5888ae929f32d86b57a1c6 | refs/heads/master | 2023-09-03T15:33:00.968050 | 2021-10-27T21:18:23 | 2021-10-27T21:18:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,095 | rd | cr_buildstep.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildsteps.R
\name{cr_buildstep}
\alias{cr_buildstep}
\title{Create a yaml build step}
\usage{
cr_buildstep(
name,
args = NULL,
id = NULL,
prefix = "gcr.io/cloud-builders/",
entrypoint = NULL,
dir = "",
env = NULL,
waitFor = NULL,
volumes = NULL,
secretEnv = NULL
)
}
\arguments{
\item{name}{name of docker image to call appended to \code{prefix}}
\item{args}{character vector of arguments}
\item{id}{Optional id for the step}
\item{prefix}{prefixed to name - set to "" to suppress. Will be suppressed if \code{name} starts with gcr.io}
\item{entrypoint}{change the entrypoint for the docker container}
\item{dir}{The directory to use, relative to /workspace e.g. /workspace/deploy/}
\item{env}{Environment variables for this step. A character vector for each assignment}
\item{waitFor}{Whether to wait for previous buildsteps to complete before running. Default it will wait for previous step.}
\item{volumes}{volumes to connect and write to}
\item{secretEnv}{A list of secrets stored in Secret Manager referred to in args via a \code{$$var}}
}
\description{
Helper for creating build steps for upload to Cloud Build
}
\details{
This uses R to make building steps for cloudbuild.yml files harder to make mistakes with, and also means you can program creation of cloud build steps for use in R or other languages. Various templates with common use cases of buildsteps are also available that wrap this function, refer to the "See Also" section.
}
\section{WaitFor}{
By default each buildstep waits for the previous, but if you pass \code{"-"} then it will start immediately, or if you pass in a list of ids it will wait for previous buildsteps to finish who have that id. See \href{https://cloud.google.com/cloud-build/docs/configuring-builds/configure-build-step-order}{Configuring Build Step Order} for details.
}
\section{Build Macros}{
Fields can include the following variables, which will be expanded when the build is created:-
\itemize{
\item $PROJECT_ID: the project ID of the build.
\item $BUILD_ID: the autogenerated ID of the build.
\item $REPO_NAME: the source repository name specified by RepoSource.
\item $BRANCH_NAME: the branch name specified by RepoSource.
\item $TAG_NAME: the tag name specified by RepoSource.
\item $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag.
\item $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA.
}
Or you can add your own custom variables, set in the Build Trigger. Custom variables always start with $_ e.g. $_MY_VAR
}
\section{secretEnv}{
You can pass secrets that are stored in Secret Manager directly instead of using a dedicated buildstep via \link{cr_buildstep_secret}
Within the code passed to \code{args} those secrets are referred to via \code{$$SECRET_NAME}. If used then \link{cr_build_yaml} must also include the \code{availableSecrets} argument.
}
\examples{
cr_project_set("my-project")
cr_bucket_set("my-bucket")
# creating yaml for use in deploying cloud run
image = "gcr.io/my-project/my-image:$BUILD_ID"
cr_build_yaml(
steps = c(
cr_buildstep("docker", c("build","-t",image,".")),
cr_buildstep("docker", c("push",image)),
cr_buildstep("gcloud", c("beta","run","deploy", "test1",
"--image", image))),
images = image)
# use premade docker buildstep - combine using c()
image = "gcr.io/my-project/my-image"
cr_build_yaml(
steps = c(cr_buildstep_docker(image),
cr_buildstep("gcloud",
args = c("beta","run","deploy",
"test1","--image", image))
),
images = image)
# list files with a new entrypoint for gcloud
cr_build_yaml(steps = cr_buildstep("gcloud", c("-c","ls -la"),
entrypoint = "bash"))
# to call from images not using gcr.io/cloud-builders stem
cr_buildstep("alpine", c("-c","ls -la"), entrypoint = "bash", prefix="")
# to add environment arguments to the step
cr_buildstep("docker", "version", env = c("ENV1=env1", "ENV2=$PROJECT_ID"))
# to add volumes wrap in list()
cr_buildstep("test", "ls", volumes = list(list(name = "ssh", path = "/root/.ssh")))
}
\seealso{
\href{https://cloud.google.com/cloud-build/docs/configuring-builds/use-community-and-custom-builders}{Creating custom build steps how-to guide}
Other Cloud Buildsteps:
\code{\link{cr_buildstep_bash}()},
\code{\link{cr_buildstep_decrypt}()},
\code{\link{cr_buildstep_df}()},
\code{\link{cr_buildstep_docker}()},
\code{\link{cr_buildstep_edit}()},
\code{\link{cr_buildstep_extract}()},
\code{\link{cr_buildstep_gcloud}()},
\code{\link{cr_buildstep_gitsetup}()},
\code{\link{cr_buildstep_mailgun}()},
\code{\link{cr_buildstep_nginx_setup}()},
\code{\link{cr_buildstep_pkgdown}()},
\code{\link{cr_buildstep_run}()},
\code{\link{cr_buildstep_r}()},
\code{\link{cr_buildstep_secret}()},
\code{\link{cr_buildstep_slack}()}
}
\concept{Cloud Buildsteps}
|
9d85754498383098319df1d71246d09ad5ea3300 | 8e19f5167064d02ade73931f59b8737cbaac66fd | /MultipleLinearReg.R | c6554be5337d189d6ba07c0033207320d0e4c162 | [] | no_license | ecrossett/R | ff280c91ba1a22f55568618dcb47554d556b6c48 | 1b4bd146f5e3bcfa82e0fc7fb2e92c1b455d3687 | refs/heads/master | 2020-04-25T08:40:13.930461 | 2020-02-05T01:53:37 | 2020-02-05T01:53:37 | 172,654,679 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,211 | r | MultipleLinearReg.R | getwd()
setwd("/Users/ed/Documents/R/Statistics")
library(ggplot2)
x <- read.csv("X.csv", header = FALSE)
y <- read.csv("Y.csv", header = FALSE)
head(x)
head(y)
int <- rep(1,1110)
Y <- as.matrix(y)
Y <- Y[,2:3]
X <- as.matrix(x)
X <- X[2:4,]
X <- X[,1:1110]
X <- t(X)
X <- cbind(int,X)
#X <- -X[-1,]
#X <- -X[-1111,]
# closed-form solution to manually calculate betas (B = (X'X)^-1 * X' * Y)
betas1 <- solve(t(X) %*% X) %*% t(X) %*% Y[,1]
betas2 <- solve(t(X) %*% X) %*% t(X) %*% Y[,2]
betas1
betas2
data <- cbind(X,Y)
data <- as.data.frame(data)
colnames(data) <- c("coef","X1", "X2","X3","Y1","Y2")
head(data)
# Compute P = (X*(X'X)^-1 * X')
P <- X %*% solve(t(X) %*% X) %*% t(X)
# Compute M = I - P
M <- matrix(0, 1110, 1110)
diag(M) <- 1
M[2,2]
# Check that MP = 0
check <- M %*% P
check[1]
# run OLS regression
lm.mod <- lm(Y ~ X)
lm.betas <- lm.mod$coefficients
# ======= Y1 ~ X1 ==================================
lm.y1 <- lm(Y[,1] ~ X[,2:4])
betay1 <- lm.y1$coefficients
predy1 <- predict(lm.y1)
residy1 <- lm.y1$residuals
ggplot(lm.mod, aes(x = X, y = Y)) +
# regression line
geom_smooth(method = "lm", se = FALSE, color = "lightgrey") +
# dline from prediction to
geom_segment(aes(xend = X[,2], yend = predy1), alpha = .2) +
geom_point(aes(color = abs(residy1), size = abs(resid[,2]))) + # size of the points
scale_color_continuous(low = "lightblue", high = "blue") + # colour of the points mapped to residual size - green smaller, red larger
guides(color = FALSE, size = FALSE) + # Size legend removed
geom_point(aes(y = predy1), shape = 1) +
theme_bw()
# Residuals vs Predicted Values
plot(residy1,predy1)
plot(predy1,residy1)
# Histogram plot of Error Distribution (Residuals)
predicted <- predict(lm.mod)
ggplot(data, aes(residy1)) + geom_histogram()
summary(lm.y1)
resid <- lm.mod$residuals
resid <- as.matrix(resid)
resid[,1]
plot(predicted[,1], Y[,1])
# plot residuals
residPlot <- plot(lm.mod$residuals)
summary(lm.mod)
# ======= Y1 ~ X2 ==================================
lm.y1 <- lm(data[,6] ~ data[,4])
betay1 <- lm.y1$coefficients
predy1 <- predict(lm.y1)
residy1 <- lm.y1$residuals
ggplot(data, aes(x = X3, y = Y2)) +
# regression line
geom_smooth(method = "lm", se = FALSE, color = "lightgrey") +
# dline from prediction to
geom_segment(aes(xend = X3, yend = predy1), alpha = .2) +
geom_point(aes(color = abs(residy1), size = abs(resid[,2]))) + # size of the points
scale_color_continuous(low = "lightblue", high = "blue") + # colour of the points mapped to residual size - green smaller, red larger
guides(color = FALSE, size = FALSE) + # Size legend removed
geom_point(aes(y = predy1), shape = 1) +
theme_bw()
# Residuals vs Predicted Values
plot(residy1,predy1)
plot(predy1,residy1)
# Histogram plot of Error Distribution (Residuals)
predicted <- predict(lm.mod)
ggplot(data, aes(residy1)) + geom_histogram()
summary(lm.y1)
# ======= Y1 ~ X1, X2, X3 ==================================
lm.y1 <- lm(data[,5] ~ data[,2:4])
betay1 <- lm.y1$coefficients
predy1 <- predict(lm.y1)
residy1 <- lm.y1$residuals
ggplot(data, aes(x = X3, y = Y2)) +
# regression line
geom_smooth(method = "lm", se = FALSE, color = "lightgrey") +
# dline from prediction to
geom_segment(aes(xend = X3, yend = predy1), alpha = .2) +
geom_point(aes(color = abs(residy1), size = abs(resid[,2]))) + # size of the points
scale_color_continuous(low = "lightblue", high = "blue") + # colour of the points mapped to residual size - green smaller, red larger
guides(color = FALSE, size = FALSE) + # Size legend removed
geom_point(aes(y = predy1), shape = 1) +
theme_bw()
# Residuals vs Predicted Values
plot(residy1,predy1)
plot(predy1,residy1)
# Histogram plot of Error Distribution (Residuals)
predicted <- predict(lm.mod)
ggplot(data, aes(residy1)) + geom_histogram()
summary(lm.y1)
resid <- lm.mod$residuals
resid <- as.matrix(resid)
resid[,1]
plot(predicted[,1], Y[,1])
# plot residuals
residPlot <- plot(lm.mod$residuals)
summary(lm.mod)
# plot regression line with residuals vs predicted values
# X1 vs Y1
ggplot(data, aes(x = X1, y = Y1)) +
geom_smooth(method = "lm", se = FALSE, color = "lightgrey") + # regression line
geom_segment(aes(xend = X1, yend = predicted[,1]), alpha = .2) + # draw line from point to line
geom_point(aes(color = abs(resid[,1]), size = abs(resid[,2]))) + # size of the points
scale_color_continuous(low = "green", high = "red") + # colour of the points mapped to residual size - green smaller, red larger
guides(color = FALSE, size = FALSE) + # Size legend removed
geom_point(aes(y = predicted[,1]), shape = 1) +
theme_bw()
# X1 vs Y2
ggplot(lm.mod, aes(x = X[,2], y = Y[,2])) +
geom_smooth(method = "lm", se = FALSE, color = "lightgrey") + # regression line
geom_segment(aes(xend = X[,2], yend = predicted[,2]), alpha = .2) + # draw line from point to line
geom_point(aes(color = abs(resid[,2]), size = abs(resid[,2]))) + # size of the points
scale_color_continuous(low = "green", high = "red") + # colour of the points mapped to residual size - green smaller, red larger
guides(color = FALSE, size = FALSE) + # Size legend removed
geom_point(aes(y = predicted[,2]), shape = 1) +
theme_bw()
# X2 vs Y1
ggplot(lm.mod, aes(x = X[,3], y = Y[,1])) +
geom_smooth(method = "lm", se = FALSE, color = "lightgrey") + # regression line
geom_segment(aes(xend = X[,3], yend = predicted[,1]), alpha = .2) + # draw line from point to line
geom_point(aes(color = abs(resid[,1]), size = abs(resid[,1]))) + # size of the points
scale_color_continuous(low = "green", high = "red") + # colour of the points mapped to residual size - green smaller, red larger
guides(color = FALSE, size = FALSE) + # Size legend removed
geom_point(aes(y = predicted[,1]), shape = 1) +
theme_bw()
#X2 vs Y2
ggplot(lm.mod, aes(x = X[,3], y = Y[,2])) +
geom_smooth(method = "lm", se = FALSE, color = "lightgrey") + # regression line
geom_segment(aes(xend = X[,3], yend = predicted[,2]), alpha = .2) + # draw line from point to line
geom_point(aes(color = abs(resid[,2]), size = abs(resid[,2]))) + # size of the points
scale_color_continuous(low = "green", high = "red") + # colour of the points mapped to residual size - green smaller, red larger
guides(color = FALSE, size = FALSE) + # Size legend removed
geom_point(aes(y = predicted[,2]), shape = 1) +
theme_bw()
#X3 vs Y1
ggplot(lm.mod, aes(x = X[,4], y = Y[,1])) +
geom_smooth(method = "lm", se = FALSE, color = "lightgrey") + # regression line
geom_segment(aes(xend = X[,4], yend = predicted[,1]), alpha = .2) + # draw line from point to line
geom_point(aes(color = abs(resid[,1]), size = abs(resid[,2]))) + # size of the points
scale_color_continuous(low = "green", high = "red") + # colour of the points mapped to residual size - green smaller, red larger
guides(color = FALSE, size = FALSE) + # Size legend removed
geom_point(aes(y = predicted[,1]), shape = 1) +
theme_bw()
#X3 vs Y2
ggplot(lm.mod, aes(x = X[,4], y = Y[,2])) +
geom_smooth(method = "lm", se = FALSE, color = "lightgrey") + # regression line
geom_segment(aes(xend = X[,4], yend = predicted[,2]), alpha = .2) + # draw line from point to line
geom_point(aes(color = abs(resid[,2]), size = abs(resid[,2]))) + # size of the points
scale_color_continuous(low = "green", high = "red") + # colour of the points mapped to residual size - green smaller, red larger
guides(color = FALSE, size = FALSE) + # Size legend removed
geom_point(aes(y = predicted[,2]), shape = 1) +
theme_bw()
summary(lm.mod, type = "hc0")
plot(X[,4],Y[,2])
|
cb16d8c40762fa8b09088eebe9c8b0400251163a | 58accfda666d580c22762e9df34e7524a0771b6a | /CMOP_field/model/conversion_CMOP.R | cc2bfe3a56e9df33c0a1b094663a4c2219c2c502 | [] | no_license | mmh1133/CMOP | 39135cb534b4092550a3b963b6badef6d2fd56f7 | f2e3d679ec7e8c4e27b34a9ece6a405d4ac20cef | refs/heads/master | 2020-04-16T02:17:26.090747 | 2017-04-20T22:16:22 | 2017-04-20T22:16:22 | 23,399,631 | 0 | 1 | null | 2014-11-14T00:32:30 | 2014-08-27T17:53:24 | R | UTF-8 | R | false | false | 7,264 | r | conversion_CMOP.R | # This script takes HD.size.class files and makes concatenated distributions with the calibrated cell volume from forward scatter
#arguments of this script: (1) distributuion file location, (2) cat (2^cat number of bins), (3) phytoplankton group, (4) cruise
# for i in $(seq 6 1 8); do echo "Rscript ~/DeepDOM/ssPopModel/Conversion_Size_Dist.R ~/DeepDOM/Cell_Division $i prochloro DeepDOM" | qsub -lwalltime=8:00:00,nodes=1:ppn=1 -N pro_conv$i -d.; done
args <- commandArgs(TRUE)
home <- as.character(args[1])
cat <- as.numeric(args[2])
phyto <- as.character(args[3])
cruise <- as.character(args[4])
library(rgl)
library(zoo)
library(plot3D)
# home <- "/Volumes/gwennm/DeepDOM/Cell_division"
# cruise <- "DeepDOM"
# phyto <- "prochloro"
home <- "/Users/francois/CMOP/CMOP_field/model"
cruise <- "CMOP_6"
phyto <- "crypto"
cat<-6
jet.colors <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan", "#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000"))
#######################
## SIZE DISTRIBUTION ##
#######################
list <- list.files(home,pattern=paste("HD.size.class_",cruise,"_",phyto,sep=""))
Size <- NULL
for(l in list){
print(l)
s <- read.csv(paste(home,l,sep="/"))
Size <- rbind(Size, s)
}
Size$time <- as.POSIXct(Size$time, tz="GMT")
Size$num.time <- as.numeric(Size$time)
Size <- Size[order(Size$num.time),]
# Size$corrected_stages <- 10^((Size$stages/2^16)*3.5)
# Size$corrected_fsc_beads <- 10^((Size$fsc_beads/2^16)*3.5)
# if(cruise =="MBARI_1"){
# Size$corrected_stages <- 10^(((Size$stages+5000)/2^16)*3.5)
# Size$corrected_fsc_beads <- 10^((median(Size$fsc_beads)/2^16)*3.5)
# }
############################### OLD CONVERSION ########################################
# Size$volume <- 21.853267*((Size$corrected_stages/Size$corrected_fsc_beads)^1.834432)
#######################################################################################
############################### NEW CONVERSION #################################################################################################
# if(phyto == "synecho" | phyto == "pico" | phyto == "prochloro"){
# Size$volume <- 10^(0.524*log10(Size$stages/Size$fsc_beads) + 0.283)
# # Size$volume <- 10^(0.5*log10(Size$stages/Size$fsc_beads))# MIE THEORY
# }else{
# Size$volume <- 10^(1.682*log10(Size$stages/Size$fsc_beads) + 0.961)
# }
if(phyto == "crypto"){
#Size$volume <- 10^(0.75*log10(Size$stages/Size$fsc_beads)) # MIE THEORY
Size$volume <- 10^(1.2384*log10(Size$stages/Size$fsc_beads) + 1.003)
}
# if(phyto == "nano"){
# Size$volume <- 10^(2.2384*log10(Size$stages/Size$fsc_beads) + 1.003)
# }
################################################################################################################################################
#volume.range <- range(Size[which(Size[,"size.dist"] > 10), "volume"]); print(volume.range)
#volume.range <- range(Size[which(Size[,"freq.dist"] > 10^-2), "volume"]); print(volume.range)
mean.volume <- median(Size[which(Size[,"freq.dist"] == max(Size[,"freq.dist"])), "volume"]); print(mean.volume)
mean.diameter <- 2*((mean.volume *3)/(pi*4))^(1/3) ; print(mean.diameter)
# percentile <- cut(Size[,"freq.dist"], 100); plot3d(x=log10(Size$volume), y=Size$num.time, z=Size$freq.dist, col=jet.colors(100)[percentile], type='l', lwd=2)
volume.range <- c(mean.volume/10, mean.volume*5); print(volume.range)
diameter.range <- 2*((volume.range *3)/(pi*4))^(1/3) ; print(diameter.range)
Size.phyto <- subset(Size, volume > volume.range[1] & volume < volume.range[2])
# percentile <- cut(Size.phyto[,"freq.dist"], 100); plot3d(x=log((Size.phyto$volume)), y=Size.phyto$num.time, z=Size.phyto$freq.dist, col=jet.colors(100)[percentile], type='l', lwd=2)
n.day <- round(diff(range(Size.phyto$time))); print(paste("Number of days in the dataset:",n.day))
#broke right here after printing the number of days in the dataset, exit with no error
start <- min(Size.phyto$time)
##############################
## CELL VOLUME DISTRIBUTION ##
##############################
#cat <- 6
###############################
m <- 2^cat # number of Size class
###############################
## where to cut Size class
diff.volume <- log(max(Size.phyto$volume)/min(Size.phyto$volume), base=2)/(m+2)
volbins.cut.ext <- min(Size.phyto$volume) * 2^((1:(m+3) -1)*diff.volume)
volbins.cut <- volbins.cut.ext[-c(1, m+3)]
## Size class
diff.volume <- log(max(Size.phyto$volume)/min(Size.phyto$volume), base=2)/(m-1)
volbins <- min(Size.phyto$volume) * 2^((1:(m) -1)*diff.volume)
##############################
## RUN Size.model.functions ##
##############################
resol <- 60 # number of minutes per interval
hours <- 25
breaks <- hours*60/resol
### SELECT Size DISTRIBUTION for DAY i
### rebuild Size distribution according to volbins
HD <- cut(Size.phyto$volume, volbins.cut)
HD.volume <- as.vector(rep(volbins, length(unique(Size.phyto$time))))
HD.time <- rep(unique(Size.phyto$time), each=m)
HD.hist <- tapply(Size.phyto$freq.dist, list(HD,Size.phyto$time), mean)
HD.hist <- as.vector(HD.hist)
#HD.hist <- as.vector(apply(HD.hist, 2, function(x) na.approx(x, na.rm=F)))
HD.size <- tapply(Size.phyto$size.dist, list(HD,Size.phyto$time), mean)
HD.size <- as.vector(HD.size)
#HD.size <- as.vector(apply(HD.size, 2, function(x) na.approx(x, na.rm=F)))
para <- HD.hist; percentile <- cut(para, 100); plot3d(log(HD.volume), HD.time, HD.hist, col=jet.colors(100)[percentile], type='l', lwd=2, xlab="size class", ylab="time", zlab="Frequency")
Size.volume <- data.frame(cbind(HD.volume,HD.time,HD.hist,HD.size))
### binned the data by 1-h interval
h.time <- as.numeric(seq(min(Size$time), max(Size$time), 60*60))
h <- cut(Size.volume$HD.time, breaks=h.time, include.lowest = T)
time <- as.vector(tapply(Size.volume$HD.time, h, mean))
Vhists <- t(tapply(Size.volume$HD.hist, list(h,Size.volume$HD.volume), mean))
N_dist <- t(tapply(Size.volume$HD.size, list(h,Size.volume$HD.volume), mean))
### NA interpolation
# Vhists <- try(t(apply(Vhists, 1, function(x) na.approx(x, na.rm=F))))
# N_dist <- try(t(apply(N_dist, 1, function(x) na.approx(x, na.rm=F))))
# id <- findInterval(h.time, na.approx(time, na.rm=F))
colnames(Vhists) <- colnames(N_dist) <- time
para <- Vhists; percentile <- cut(unlist(para), 100); plot3d(log(rep(as.numeric(row.names(para)), dim(para)[2])), rep(as.numeric(colnames(para)), each=dim(para)[1]) , Vhists , col=jet.colors(100)[percentile], type='l', lwd=6, xlab="size class", ylab="time", zlab="Frequency")
distribution <- list()
distribution[[1]] <- Vhists
distribution[[2]] <- N_dist
para <- distribution[[1]]; percentile <- cut(unlist(para), 100); plot3d(log(rep(as.numeric(row.names(para)), dim(para)[2])), rep(as.numeric(colnames(para)), each=dim(para)[1]) , Vhists , col=jet.colors(100)[percentile], type='l', lwd=6, xlab="size class", ylab="time", zlab="Frequency")
save(distribution, file=paste(home,"",phyto,"_dist_Ncat",m,"_",cruise,sep=""))
print(paste("saving ", home,"",phyto,"_dist_Ncat",m,"_",cruise,sep="")) |
d17179765023cf70df199e195c6a5bf61be168fa | aec820a0c7109fe2184b7d956742915023fd30c1 | /R_MRP_Planning/Qeoq.r | 5313f5f0500353b15d9388fbefd840c05058bef2 | [] | no_license | hendry062105/Practice_2020 | 350dc2996652c7f658157d08946a1c78da1fa240 | 98f26b1b599c9d09f2c8d08cdc362c7823e887be | refs/heads/main | 2023-08-27T18:47:12.021806 | 2021-10-14T22:30:20 | 2021-10-14T22:30:20 | 417,289,588 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 117 | r | Qeoq.r | Qeoq = function(co,cp){
bard = sum(reqb[2:length(reqb)])/(length(reqb) - 1)
return(adjProg(sqrt(2*co*bard/cp)))
} |
812cd64c7de408ce1af279e9a80659beaa21382b | cbb1dc73d80a8149022614f35ec94c2881ba262c | /toxic/Kernel/toxic.R | 5e08f8c429f30bf82aa87bcf443e6c6ee463c967 | [] | no_license | lacomca/walkingkaggle | dfadd04514658bde8d84cdbcc5e9aedf3ea42372 | 8bc70ac6cc7f0d659e2bbe9c48994d0d4b6c6c42 | refs/heads/master | 2020-08-02T23:09:09.568718 | 2018-11-22T04:23:17 | 2018-11-22T04:23:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,011 | r | toxic.R | ##################################################
#### SETTING
##################################################
library(stringr)
library(dplyr)
library(ggplot2)
library(tidytext)
library('wordcloud')
library(xgboost)
library(tm)
library(doParallel)
#load data
train_data <- fread("./InputData/train.csv", header = T, encoding = 'UTF-8')
test_data <- fread("./InputData/test.csv", header = T, encoding = 'UTF-8')
submission <- fread("./InputData/sample_submission.csv")
##################################################
#### DATA CLEANSING
##################################################
# define function for cleaning
cleaning_texts <- function(text){
## need to do tolower and remove stopwords bevor removing punctuation!!
# all letters to lower
text <- tolower(text)
# remove linebreaks
text <- gsub("\n", " ", text, perl = T)
# strip multiple whitspace to one
text <- gsub("\\s+", " ", text, perl = T)
# remove links
text <- gsub("(f|ht)tp(s?)://\\S+", "LINK", text, perl = T)
text <- gsub("http\\S+", "LINK", text, perl = T)
text <- gsub("xml\\S+", "LINK", text, perl = T)
# transform short forms
text <- gsub("'ll", " will", text, perl = T)
text <- gsub("i'm", "i am", text, perl = T)
text <- gsub("'re", " are", text, perl = T)
text <- gsub("'s", " is", text, perl = T)
text <- gsub("'ve", " have", text, perl = T)
text <- gsub("'d", " would", text, perl = T)
text <- gsub("can't", "can not", text, perl = T)
text <- gsub("don't", "do not", text, perl = T)
text <- gsub("doesn't", "does not", text, perl = T)
text <- gsub("isn't", "is not", text, perl = T)
text <- gsub("aren't", "are not", text, perl = T)
text <- gsub("couldn't", "could not", text, perl = T)
text <- gsub("mustn't", "must not", text, perl = T)
text <- gsub("didn't", "did not", text, perl = T)
# remove modified text
#gsub("(?<=\\b\\w)\\s(?=\\w\\b)", "", "f u c k y o u a s u a r e a b i t c h a s s n i g g e r", perl = T)
#gsub("(?<=\\b\\w)\\s(?=\\w\\b)", "", "n i g g e r f a g g o t", perl = T)
text <- gsub("(?<=\\b\\w)\\s(?=\\w\\b)", "", text, perl = T)
# remove "shittext"
text <- gsub("\\b(a|e)w+\\b", "AWWWW", text, perl = T)
text <- gsub("\\b(y)a+\\b", "YAAAA", text, perl = T)
text <- gsub("\\b(w)w+\\b", "WWWWW", text, perl = T)
#text <- gsub("a?(ha)+\\b", "", text, perl = T)
text <- gsub("\\b(b+)?((h+)((a|e|i|o|u)+)(h+)?){2,}\\b", "HAHEHI", text, perl = T)
text <- gsub("\\b(b+)?(((a|e|i|o|u)+)(h+)((a|e|i|o|u)+)?){2,}\\b", "HAHEHI", text, perl = T)
text <- gsub("\\b(m+)?(u+)?(b+)?(w+)?((a+)|(h+))+\\b", "HAHEHI", text, perl = T)
text <- gsub("\\b((e+)(h+))+\\b", "HAHEHI", text, perl = T)
text <- gsub("\\b((h+)(e+))+\\b", "HAHEHI", text, perl = T)
text <- gsub("\\b((o+)(h+))+\\b", "HAHEHI", text, perl = T)
text <- gsub("\\b((h+)(o+))+\\b", "HAHEHI", text, perl = T)
text <- gsub("\\b((l+)(a+))+\\b", "LALALA", text, perl = T)
text <- gsub("(w+)(o+)(h+)(o+)", "WOHOO", text, perl = T)
text <- gsub("\\b(d?(u+)(n+)?(h+))\\b", "UUUHHH", text, perl = T)
text <- gsub("\\b(a+)(r+)(g+)(h+)\\b", "ARGH", text, perl = T)
text <- gsub("\\b(a+)(w+)(h+)\\b", "AAAWWHH", text, perl = T)
text <- gsub("\\b(p+)(s+)(h+)\\b", "SHHHHH", text, perl = T)
text <- gsub("\\b((s+)(e+)?(h+))+\\b", "SHHHHH", text, perl = T)
text <- gsub("\\b(s+)(o+)\\b", "", text, perl = T)
text <- gsub("\\b(h+)(m+)\\b", "HHMM", text, perl = T)
text <- gsub("\\b((b+)(l+)(a+)(h+)?)+\\b", "BLABLA", text, perl = T)
text <- gsub("\\b((y+)(e+)(a+)(h+)?)+\\b", "YEAH", text, perl = T)
text <- gsub("\\b((z+)?(o+)(m+)(f+)?(g+))+\\b", "OMG", text, perl = T)
text <- gsub("aa(a+)", "a", text, perl = T)
text <- gsub("ee(e+)", "e", text, perl = T)
text <- gsub("i(i+)", "i", text, perl = T)
text <- gsub("oo(o+)", "o", text, perl = T)
text <- gsub("uu(u+)", "u", text, perl = T)
text <- gsub("\\b(u(u+))\\b", "u", text, perl = T)
text <- gsub("y(y+)", "y", text, perl = T)
text <- gsub("hh(h+)", "h", text, perl = T)
text <- gsub("gg(g+)", "g", text, perl = T)
text <- gsub("tt(t+)\\b", "t", text, perl = T)
text <- gsub("(tt(t+))", "tt", text, perl = T)
text <- gsub("mm(m+)", "m", text, perl = T)
text <- gsub("ff(f+)", "f", text, perl = T)
text <- gsub("cc(c+)", "c", text, perl = T)
text <- gsub("\\b(kkk)\\b", "KKK", text, perl = T)
text <- gsub("\\b(pkk)\\b", "PKK", text, perl = T)
text <- gsub("kk(k+)", "kk", text, perl = T)
text <- gsub("fukk", "fuck", text, perl = T)
text <- gsub("k(k+)\\b", "k", text, perl = T)
text <- gsub("f+u+c+k+\\b", "fuck", text, perl = T)
#gsub("((a+)|(h+)){3,}", "", "ishahahah hanibal geisha")
text <- gsub("((a+)|(h+)){3,}", "HAHEHI", text, perl = T)
text <- gsub("yeah", "YEAH", text, perl = T)
# remove modified text
#gsub("(?<=\\b\\w)\\s(?=\\w\\b)", "", "f u c k y o u a s u a r e a b i t c h a s s n i g g e r", perl = T)
#gsub("(?<=\\b\\w)\\s(?=\\w\\b)", "", "n i g g e r f a g g o t", perl = T)
text <- gsub("(?<=\\b\\w)\\s(?=\\w\\b)", "", text, perl = T)
# remove stopwords
otherstopwords <- c("put", "far", "bit", "well", "still", "much", "one", "two", "don", "now", "even",
#"article", "articles", "edit", "edits", "page", "pages",
#"talk", "editor", "ax", "edu", "subject", "lines", "like", "likes", "line",
"uh", "oh", "also", "get", "just", "hi", "hello", "ok", "ja", #"editing", "edited",
"dont", "wikipedia", "hey", "however", "id", "yeah", "yo",
#"use", "need", "take", "give", "say", "user", "day", "want", "tell", "even",
#"look", "one", "make", "come", "see", "said", "now",
"wiki",
#"know", "talk", "read", "time", "sentence",
"ain't", "wow", #"image", "jpg", "copyright",
"wikiproject", #"background color", "align", "px", "pixel",
"org", "com", "en", "ip", "ip address", "http", "www", "html", "htm",
"wikimedia", "https", "httpimg", "url", "urls", "utc", "uhm",
#"i", "me", "my", "myself", "we", "our", "ours", "ourselves",
#"you", "your", "yours", "yourself", "yourselves",
"he", "him", "his", "himself",
"she", "her", "hers", "herself",
"it", "its", "itself",
#"they", "them", "their", "theirs", "themselves",
#"i'm", "you're", "he's", "i've", "you've", "we've", "we're",
#"she's", "it's", "they're", "they've",
#"i'd", "you'd", "he'd", "she'd", "we'd", "they'd",
#"i'll", "you'll", "he'll", "she'll", "we'll", "they'll",
"what", "which", "who", "whom", "this", "that", "these", "those",
#"am", "can", "will", "not",
"is", "was", "were", "have", "has", "had", "having", "wasn't", "weren't", "hasn't",
#"are", "cannot", "isn't", "aren't", "doesn't", "don't", "can't", "couldn't", "mustn't", "didn't",
"haven't", "hadn't", "won't", "wouldn't",
"do", "does", "did", "doing", "would", "should", "could",
"be", "been", "being", "ought", "shan't", "shouldn't", "let's", "that's", "who's", "what's", "here's",
"there's", "when's", "where's", "why's", "how's", "a", "an", "the", "and", "but", "if",
"or", "because", "as", "until", "while", "of", "at", "by", "for", "with", "about", "against",
"between", "into", "through", "during", "before", "after", "above", "below", "to", "from",
"up", "down", "in", "out", "on", "off", "over", "under", "again", "further", "then", "once",
"here", "there", "when", "where", "why", "how", "all", "any", "both", "each", "few", "more",
"most", "other", "some", "such", "no", "nor", "only", "own", "same", "so", "than",
"too", "very")
#is.element(otherstopwords, stopwords("en"))
text <- removeWords(text, otherstopwords)
# remove nicknames
#gsub("@\\w+", " ", "@nana, das @ ist ein @spieler")
text <- gsub("@\\w+", " ", text, perl = T)
# remove graphics
text <- gsub("[^[:graph:]]", " ", text, perl = T)
# remove punctuation
text <- gsub("[[:punct:]]", " ", text, perl = T)
# remove digits
text <- gsub("[[:digit:]]", " ", text, perl = T)
# strip multiple whitspace to one
text <- gsub("\\s+", " ", text, perl = T)
#gsub("((a+)|(h+))(a+)((h+)?)\\b", "", "explanation hardcore aaaaaaahhhhhhhhh hhhaaaaaah haaaaaaah aaaaaaaa haaaa")
#text <- gsub("((a+)|(h+))(a+)((h+)?)\\b", "", text, perl = T)
text <- gsub("((lol)(o?))+\\b", "LOL", text, perl = T)
text <- gsub("n ig ger", "nigger", text, perl = T)
text <- gsub("nig ger", "nigger", text, perl = T)
text <- gsub("s hit", "shit", text, perl = T)
text <- gsub("g ay", "gay", text, perl = T)
text <- gsub("f ag got", "faggot", text, perl = T)
text <- gsub("c ock", "cock", text, perl = T)
text <- gsub("cu nt", "cunt", text, perl = T)
text <- gsub("idi ot", "idiot", text, perl = T)
text <- gsub("(?<=\\b(fu|su|di|co|li))\\s(?=(ck)\\b)", "", text, perl = T)
#gsub("(?<=\\w(ck))\\s(?=(ing)\\b)", "", "fuck ing suck ing lick ing", perl = T)
text <- gsub("(?<=\\w(ck))\\s(?=(ing)\\b)", "", text, perl = T)
# reomve single letters
#gsub("\\W*\\b\\w\\b\\W*", " ", "er i das auto ist kaputt 5 6")
#text <- gsub("\\W*\\b\\w\\b\\W*", " ", text, perl = T)
text <- gsub("\\b(.)\\1+\\b", " ", text, perl = T)
# again clean shittext
text <- gsub("((lol)(o?))+", "LOL", text, perl = T)
text <- gsub("(?<=\\b(fu|su|di|co|li))\\s(?=(ck)\\b)", "", text, perl = T)
text <- gsub("(?<=\\w(ck))\\s(?=(ing)\\b)", "", text, perl = T)
text <- gsub("(?<=\\w(uc))\\s(?=(ing)\\b)", "", text, perl = T)
#gsub("(?<=\\b(fu|su|di|co|li))\\s(?=(ck)\\w)", "", tolower(train_data[79644,]$comment_text), perl = T)
text <- gsub("(?<=\\b(fu|su|di|co|li))\\s(?=(ck)\\w)", "", text, perl = T)
text <- gsub("cocksu cking", "cock sucking", text, perl = T)
text <- gsub("du mbfu ck", "dumbfuck", text, perl = T)
text <- gsub("cu nt", "cunt", text, perl = T)
text <- gsub("(?<=\\b(fu|su|di|co|li))\\s(?=(k)\\w)", "c", text, perl = T)
# again remove stopwords
text <- removeWords(text, otherstopwords)
# strip multiple whitspace to one
text <- gsub("\\s+", " ", text, perl = T)
# remove tailing whitespaces
text <- gsub("\\s*$", "", text, perl = T)
# remove leading whitespaces
text <- gsub("^\\s+", "", text, perl = T)
text <- gsub("\\b(.)\\1+\\b", " ", text, perl = T)
# remove single letter words but save "I"
#gsub("\\W*\\b([a-h|j-z])\\b\\W*", " ", "er i das auto ist kaputt 5 6 i j k l m")
text <- gsub("\\W*\\b([a-h|j-z])\\b\\W*", " ", text, perl = T)
#text <- gsub("\\W*\\b\\w\\b\\W*", " ", text, perl = T)
# again strip multiple whitspace to one
text <- gsub("\\s+", " ", text, perl = T)
# remove tailing whitespaces
text <- gsub("\\s*$", "", text, perl = T)
# remove leading whitespaces
text <- gsub("^\\s+", "", text, perl = T)
# reomve double words and sort alphabetically
#text <- sapply(text, function(x){
# words <- unique(unlist(strsplit(x, split = " ")))
# paste(words[order(words)], collapse = " ")
#})
return(unname(text))
}
library(doParallel)
cat("Combining data for simpler custom text-transformation")
# combine datasets
test$toxic <- NA
test$severe_toxic <- NA
test$obscene <- NA
test$threat <- NA
test$insult <- NA
test$identity_hate <- NA
test$type <- "test"
train$type <- "train"
dataset <- rbind(train, test)
system.time(comment_text <- mcmapply(1:nrow(dataset), FUN = function(x) {
cleaning_texts(dataset$comment_text[x])},
mc.cores = 3, mc.preschedule = TRUE))
dataset$comment_text <- comment_text
#dataset <- fread("./InputData/dataset_cleansing.csv", header = T, encoding = 'UTF-8')
##################################################
#### EDA
##################################################
fillColor = "#FFA07A"
fillColor2 = "#F1C40F"
## Sentence Length Distribution
dataset$len = str_count(dataset$comment_text)
dataset %>%
ggplot(aes(x = len, fill = type)) +
geom_histogram(alpha = 0.5, bins = 50) +
labs(x= 'Word Length',y = 'Count', title = paste("Distribution of", ' Word Length ')) +
theme_bw()
createBarPlotCommonWords = function(train,title)
{
train %>%
unnest_tokens(word, comment_text) %>%
filter(!word %in% stop_words$word) %>%
count(word,sort = TRUE) %>%
ungroup() %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
head(20) %>%
ggplot(aes(x = word,y = n)) +
geom_bar(stat='identity',colour="white", fill =fillColor) +
geom_text(aes(x = word, y = 1, label = paste0("(",n,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'Word', y = 'Word Count',
title = title) +
coord_flip() +
theme_bw()
}
createBarPlotCommonWords(dataset,'Top 20 most Common Words')
trainWords <- filter(dataset, type == 'train') %>%
unnest_tokens(word, comment_text) %>%
count(toxic,severe_toxic,obscene,threat,insult,identity_hate,word) %>%
ungroup()
total_words <- trainWords %>%
group_by(toxic,severe_toxic,obscene,threat,insult,identity_hate) %>%
summarize(total = sum(n))
total_words
Category =1:41
total_words$Category = Category
trainWords <- left_join(trainWords, total_words)
#Now we are ready to use the bind_tf_idf which computes the tf-idf for each term.
trainWords <- trainWords %>%
bind_tf_idf(word, Category, n)
plot_trainWords <- trainWords %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word))))
plot_trainWords %>%
top_n(20) %>%
ggplot(aes(word, tf_idf)) +
geom_col(fill = fillColor) +
labs(x = NULL, y = "tf-idf") +
coord_flip() +
theme_bw()
## Various Categories of TF-IDF
# 1 Toxic TF-IDF
plot_trainWords %>%
filter(toxic == 1 ) %>%
top_n(20) %>%
ggplot(aes(word, tf_idf)) +
geom_col(fill = fillColor2) +
labs(x = NULL, y = "tf-idf") +
coord_flip() +
theme_bw()
# 2 Severe Toxic TF-IDF
plot_trainWords %>%
filter(severe_toxic == 1 ) %>%
top_n(20) %>%
ggplot(aes(word, tf_idf)) +
geom_col(fill = fillColor2) +
labs(x = NULL, y = "tf-idf") +
coord_flip() +
theme_bw()
# 3 Obscene TF-IDF
plot_trainWords %>%
filter(obscene == 1 ) %>%
top_n(20) %>%
ggplot(aes(word, tf_idf)) +
geom_col(fill = fillColor2) +
labs(x = NULL, y = "tf-idf") +
coord_flip() +
theme_bw()
# 4 Threat TF-IDF
plot_trainWords %>%
filter(threat == 1 ) %>%
top_n(20) %>%
ggplot(aes(word, tf_idf)) +
geom_col(fill = fillColor2) +
labs(x = NULL, y = "tf-idf") +
coord_flip() +
theme_bw()
# 5 Insult TF-IDF
plot_trainWords %>%
filter(insult == 1 ) %>%
top_n(20) %>%
ggplot(aes(word, tf_idf)) +
geom_col(fill = fillColor2) +
labs(x = NULL, y = "tf-idf") +
coord_flip() +
theme_bw()
# 6 identity_hate TF-IDF
plot_trainWords %>%
filter(identity_hate == 1 ) %>%
top_n(20) %>%
ggplot(aes(word, tf_idf)) +
geom_col(fill = fillColor2) +
labs(x = NULL, y = "tf-idf") +
coord_flip() +
theme_bw()
## Word Cloud for the Most Important Words
plot_trainWords %>%
with(wordcloud(word, tf_idf, max.words = 50,colors=brewer.pal(8, "Dark2")))
|
2e49a62bed8808430ddd65a6968b5e23f7886e01 | 44ee52121b99270c3d3986ea2ffc3399c1d19a92 | /gganimate_intro.R | cf4d4775c472ac2779dcef88c5e57a88b0cea9cf | [] | no_license | sparce/gganimate_seminar | 98f6bf94d67ab77ffb2f0740337ed04c9441c09c | e55a5938d03daffa1e8761b6ef48a931d12d781b | refs/heads/master | 2022-06-14T21:54:39.454860 | 2020-04-24T05:15:51 | 2020-04-24T05:15:51 | 261,656,062 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,899 | r | gganimate_intro.R | library(tidyverse)
library(gganimate)
library(ggforce)
library(cowplot)
library(hrbrthemes)
# Intro to animation -----
## Grid of x/y coords 30 wide, 10 high repeated for 3 animation steps
## Have a (r)adius and (a)ngle value for plotting
## And a separate ID for each of the 300 points to animate correctly
plotgrid <- tibble(x = 1:30) %>%
tidyr::expand(x,y = 1:10,step = 1:3, r = 0.3, a = 0) %>%
add_column(id = rep(1:300, each = 3))
## Make three points grow and rotate and change colour
## Points are at (27, 1), (4, 3), and (16, 8)
x_coords <- c(27, 4, 16)
y_coords <- c( 1, 3, 8)
grid_colours <- plotgrid %>%
# change radius for steps 2 & 3
magrittr::inset(
.$step > 1 & (.$y %in% y_coords & .$x %in% x_coords ), "r", 0.6
) %>%
# change angle for step 3
magrittr::inset(
.$step == 3 & (.$y %in% y_coords & .$x %in% x_coords ), "a", 2
) %>%
ggplot(aes(x0 = x, y0 = y, group = id)) +
ggforce::geom_regon(aes(sides = 4, r = r, angle = a, fill = as.factor(r>0.3))) +
coord_fixed() +
cowplot::theme_nothing() +
transition_states(step) +
scale_fill_manual(values = c("grey20", "red")) +
ease_aes("sine-in-out")
anim_save(
filename = "animations/01_grid_colours.gif",
animation = grid_colours,
nframes = 300,
duration = 10,
detail = 3,
width = 1600,
height = 500,
type = "cairo-png",
res = 300
)
## Move points around in blocks instead
grid_movement <- plotgrid %>%
# Move top half up after step 1
mutate(y = ifelse(y > 5 & step > 1, y + 3L, y)) %>%
# Shift two blocks left/right at step 3
mutate(x = case_when(
y > 5 & step == 3 & x < 15 ~ (x - 3L),
y <= 5 & step == 3 & x >= 15 ~ (x + 3L),
TRUE ~ x
)) %>%
ggplot(aes(x0=x,y0=y, group = id)) +
ggforce::geom_regon(aes(sides = 4, r = r, angle = a)) +
coord_fixed() +
cowplot::theme_nothing() +
transition_states(step) +
ease_aes("cubic-in-out")
anim_save(
filename = "animations/02_grid_movement.gif",
animation = grid_movement,
nframes = 300,
duration = 10,
detail = 3,
width = 1600,
height = 700,
type = "cairo-png",
res = 300
)
# Make a line of points rise in a wave pattern
line_wave <- tibble(x = rep(1:20, times = 24), step = rep(-1:22, each = 20)) %>%
mutate(y = as.numeric(x == step)) %>%
group_by(step) %>%
mutate(y = ifelse(lead(y, default = 0) == 1 | lag(y, default = 0) == 1 , 0.5, y))
# a few manual fixups at the margins of the animation
line_wave[21,3] <- 0.5
line_wave[nrow(d)-20, 3] <- 0.5
line_wave_anim <- ggplot(line_wave, aes(x,y, group = x)) +
geom_point() +
transition_time(step) +
cowplot::theme_nothing()
anim_save(
filename = "animations/03_line_wave.gif",
animation = line_wave_anim,
nframes = 300,
duration = 10,
detail = 3,
width = 1600,
height = 500,
type = "cairo-png",
res = 300
)
# Trying it out for real ----
## Get some weather data
## and make the station names be ordered by state
weather <- read_csv("data/weather_data.csv") %>%
mutate(name = fct_reorder(name, state, .fun = unique))
## Create the base barplot for animation
bars <- ggplot(weather, aes(x = month, y = rainfall, fill = rainfall, group = 1L)) +
scale_fill_viridis_c() +
scale_x_continuous(breaks = 1:12, labels = month.abb) +
scale_y_continuous(breaks = c(2,4,6,8,10)) +
theme_ipsum_tw() +
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
legend.position = "none"
) +
labs(
title = "Average monthly rainfall per station",
x = NULL,
y = "Average rainfall (mm)"
)
## Need a little cheat to get the static bar plot looking nice but
## the animation working correctly
bars_for_static <- bars +
geom_col(aes(group = name),position = position_dodge())
ggsave(filename = "figures/01-bars.png", bars_for_static, width = 5, height = 5, dpi = 300)
## Now can add the bars to the animatable version
bars <- bars + geom_col()
## Make a lineplot that we ended up cutting from the slides :(
lines <- ggplot(weather, aes(x = month, y = rainfall, colour = state, group = name)) +
geom_line() +
scale_color_ipsum() +
scale_x_continuous(breaks = 1:12, labels = month.abb) +
scale_y_continuous(breaks = c(2,4,6,8,10)) +
theme_ipsum_tw() +
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
legend.position = "bottom"
) +
labs(
title = "Average monthly rainfall per station",
x = NULL,
y = "Average rainfall (mm)",
colour = NULL
)
ggsave(filename = "figures/02-lines.png", lines, width = 5, height = 5.5, dpi = 300)
## Make the base scatter plot for animation
points <- ggplot(weather, aes(x = temp_min, y = temp_max, size = solar, colour = state)) +
geom_point() +
scale_colour_ipsum() +
coord_fixed() +
theme_ipsum_tw() +
labs(
title = "Temperature relationship",
x = "Minimum temperature",
y = "Maximum temperature",
colour = NULL,
size = "Solar exposure"
)
ggsave(filename = "figures/03-points.png", points, width = 6, height = 5, dpi = 300)
# What to animate ----
## Individual steps from 1,1 to 10,10
plot_xy <- function(x, y) {
p <- ggplot(tibble(x = x, y = y), aes(x,y)) +
geom_point(size = 3) +
cowplot::theme_half_open() +
scale_x_continuous(breaks = 1:10, limits = c(1,10)) +
scale_y_continuous(breaks = 1:10, limits = c(1,10))
ggsave(filename = glue::glue("figures/04-transitions_{x}.png"), plot = p, width = 3, height = 3)
}
## save frames for transition
walk2(1:10, 1:10, plot_xy)
# Transitions ----
## Facet comparison
ggsave("figures/05-bars_facet.png", bars + facet_wrap(~name), width = 14, height = 7)
## Animate between states for the barplot
anim_save(
filename = "animations/04_bars_transition.gif",
animation = bars + transition_states(name) + labs(subtitle = "{closest_state}"),
fps = 20,
duration = 15,
detail = 3,
width = 1200,
height = 1200,
type = "cairo-png",
res = 200
)
## Animate using transition_time for the point plot
## transition_time not strictly necessary here because of the regular interval
anim_save(
filename = "animations/05-points_transition.gif",
animation = points + transition_time(month) + labs(subtitle = "{month.name[{frame_time}]}"),
fps = 20,
duration = 10,
detail = 3,
width = 1200,
height = 1000,
res = 200,
type = "cairo-png"
)
# Context -----
## Points facet plot for discussing contect in animation
ggsave("figures/06-points_facet.png", points + facet_wrap(~month, nrow = 2), width = 10, height = 5)
## Add a wake to the points plot
points_shadow <- points +
transition_time(month) + shadow_wake(wake_length = 0.2, wrap = F) +
labs(subtitle = "{month.name[{frame_time}]}")
anim_save(
filename = "animations/06-points_shadow.gif",
animation = points_shadow,
fps = 20,
duration = 10,
detail = 3,
width = 1200,
height = 1000,
res = 200,
type = "cairo-png"
)
## Now use shadow_mark to show the max rainfall
bars_shadow <- bars +
transition_states(name) +
shadow_mark(past = TRUE, future = TRUE, fill = "grey70") +
labs(subtitle = "{closest_state}")
anim_save(
filename = "animations/07_bars_shadow.gif",
animation = bars_shadow,
fps = 20,
duration = 15,
detail = 3,
width = 1200,
height = 1200,
type = "cairo-png",
res = 200
)
# Easings -----
## Add an easing to the points plot
points_easing <- points +
transition_time(month) +
ease_aes("cubic-in-out") +
labs(subtitle = "{month.name[{frame_time}]}")
anim_save(
filename = "animations/08-points_easing.gif",
animation = points_easing,
fps = 20,
duration = 10,
detail = 3,
width = 1200,
height = 1000,
res = 200,
type = "cairo-png"
)
## Now for the bars
bars_easing <- bars +
transition_states(name) +
ease_aes("back-in") +
labs(subtitle = "{closest_state}")
anim_save(
filename = "animations/09_bars_easing.gif",
animation = bars_easing,
fps = 30,
duration = 15,
detail = 3,
width = 1200,
height = 1200,
type = "cairo-png",
res = 200
) |
fa6176cfa8fa86e5241651db15c15a3039afc76d | 47d25769b13c15961cfe9e2c2a6471c3300771df | /bt.etf.weekly.R | fdc2a1a74d2381071010e3f5cdc7473a6d863c9e | [] | no_license | catocugum/PQFG | ce6bb36c8a3d040ee5df1359cfe4be03acdbdffc | 1aeb56e980c81b1c180740323846a07b71d29b16 | refs/heads/master | 2021-05-02T05:23:56.699850 | 2018-02-09T15:24:20 | 2018-02-09T15:24:20 | 120,920,336 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,976 | r | bt.etf.weekly.R | #######################################################################################################
##
## Persephone Risk & Performance Management
##
## backtest control script
##
## (c) 2017 Persephone Quantitative Finance Group GmbH
##
## Author: Gregor Povh
##
#######################################################################################################
library(reshape2)
library(futile.logger)
library(openxlsx)
library(timeSeries)
library(yaml)
library(rugarch)
library(VineCopula)
source("R/o_setGeneric.R")
source("R/o_MarketData.R")
source("R/o_InstrumentData.R")
source("R/o_Universe.R")
source("R/o_PortfolioStatus.R")
source("R/o_Portfolio.R")
source("R/o_Backtest.R")
source("R/o_Constraints.R")
source("R/o_PDEopt.R")
source("R/g_Functions.R")
#######################################################################################################
# Load configurations
#######################################################################################################
g_cfg <- loadGlobalConfiguration("config.etf.weekly.yml")
#######################################################################################################
# market data
#######################################################################################################
g_l_marketData <- list()
g_l_marketData <- append(g_l_marketData, loadAriva())
g_l_marketData <- append(g_l_marketData, loadCioIndices())
g_l_marketData <- append(g_l_marketData, createIndexInstruments())
#######################################################################################################
# backtest
#######################################################################################################
name <- "bt_etf"
name <- paste0(Sys.Date(), ".", name)
bt <- new("Backtest", name = name)
bt <- runBacktest(bt)
save(bt, file = paste0("results/", name, ".RData"))
|
8f4436ee449bffd5864265e47c2e12f0fca211ed | 74073f1b01f18d7c2ffa76d4f46e50b816a0974f | /R Files/Problem 101-120/EulerProject113.R | db64ea73cc386a39b494db335a35d03b0c13f0ad | [] | no_license | maximizedchang/projecteuler | 6193489c4bb1d8354274a3247eef5c63b38c66d3 | 44745e54d85a6bcb09e3ec184daf60fd1481fe30 | refs/heads/master | 2016-09-06T09:18:03.421528 | 2014-08-18T06:30:51 | 2014-08-18T06:30:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 646 | r | EulerProject113.R | # Solution: 51161058134250
# Time: < 1 second
digits <- 100
increasing <- matrix(0, digits, 9)
decreasing <- matrix(0, digits, 9)
for(y in 1:9){
increasing[1, y] <- 1
}
for(x in 2:digits){
for(y in 1:9){
sum <- 0
for(i in y:9){
sum <- sum + increasing[x - 1, i]
}
increasing[x,y] <- sum
}
}
for(y in 1:9){
decreasing[1, y] <- 1
}
for(x in 2:digits){
for(y in 1:9){
sum <- 1
for(i in 1:y){
sum <- sum + decreasing[x - 1, i]
}
decreasing[x,y] <- sum
}
}
sum <- 0
for(i in 1:digits){
for(j in 1:9){
sum <- sum + increasing[i,j] + decreasing[i,j]
}
}
sum <- sum - 9*digits
print(sum) |
64925ee354ad89d19868a487e7a1c2858cf191e0 | a5ebb7e54553fee55e0b7ac853b96638cdc7511e | /plot3.R | 6c341d4f31bca81ac58b9a4059068af37450c80a | [] | no_license | juliangilliatt14/ExData_Plotting1 | 1e2fc4f76b6b41156b77587d8742c7a870697843 | 41fa92caf596e8602cdcbc183c57d0eaad0b37fb | refs/heads/master | 2021-01-22T21:23:31.452299 | 2014-12-07T18:51:59 | 2014-12-07T18:51:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,023 | r | plot3.R | ## set the working directory
setwd("/Users/jgilliatt14/downloads")
## load in data; subset
powerConsumption <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
subPower <- subset(powerConsumption, Date == "1/2/2007" | Date == "2/2/2007")
subPower$Date <- as.Date(subPower$Date, format="%d/%m/%Y")
rm(powerConsumption)
## convert dates and create DateTime column
dateTime <- paste(as.Date(subPower$Date), subPower$Time)
subPower$dateTime <- as.POSIXct(dateTime)
## create plot3
with(subPower, {
plot(Sub_metering_1~dateTime, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
})
## add legend
legend("topright", col=c("black", "red", "blue"), lty = 1, lwd = 2, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## save as png
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
d7515af74c49c84c183073196044f69279118246 | e442e44116cf48a0a09dfe51df1977452c375b2d | /Biogeography extant/WWF_Ecoregions.R | 6c39ceab47a77471a3a275ec6ecfd738f200b503 | [] | no_license | Dayenari/GABI-biogeography | 38eae4586ba36bd9b0f5042bc2ad4db6f837983b | d28bb9495f7c32577034ecd6069729a3b435d0e5 | refs/heads/main | 2023-08-25T20:25:11.389152 | 2021-11-05T18:09:37 | 2021-11-05T18:09:37 | 410,127,943 | 0 | 0 | null | 2021-09-24T23:33:38 | 2021-09-24T23:33:38 | null | UTF-8 | R | false | false | 13,446 | r | WWF_Ecoregions.R | # Distribution of mammals in the Americas among different Biomes
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# auxiliar function su wap objects from a dataframe
swap <- function(x, from, to) { tmp <- to[ match(x, from) ]
return(tmp)}
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Load and clean WWF data
# To run this script you will need to download the files of WildFinder Database
# https://www.worldwildlife.org/publications/wildfinder-database
# set the working directory to the folder containing this script:
# (similar to RStudio menu "Session - Set Working Directory - To Source File Location"
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
dir()
# Load data from the WildFinder Database
if(Sys.info()[[1]] == "Windows") {
# Option 1 (Windows)
install.packages("RODBC")
library(RODBC)
wwf <- odbcConnectAccess2007(path.expand("WildfinderUpdate.mdb"))
spp <- sqlFetch(wwf, "species")
genus <- sqlFetch(wwf, "genus")
family <- sqlFetch(wwf, "family")
order <- sqlFetch(wwf, "order_")
classx <- sqlFetch(wwf, "class")
ecorspp <- sqlFetch(wwf, "ecoregion_species")
ecor <- sqlFetch(wwf, "ecoregions")
biome <- sqlFetch(wwf, "biomes")
} else {
# Option 2 (Mac / Linux)
# You must install the mdbtools package
# https://github.com/mdbtools/mdbtools
# For a Mac in the terminal
# install Home brew https://brew.sh/
# then run: brew install mdbtools
install.packages("Hmisc")
library(Hmisc)
# Read .mdb file
wwf <-mdb.get('WildfinderUpdate.mdb')
contents(wwf)
spp <- wwf$species
names(spp) <- gsub("\\.", "_", names(spp))
genus <-wwf$genus
names(genus) <- gsub("\\.", "_", names(genus))
family <- wwf$family
names(family) <- gsub("\\.", "_", names(family))
order <- wwf$order_
names(order) <- gsub("\\.", "_", names(order))
classx <- wwf$class
names(classx) <- gsub("\\.", "_", names(classx))
ecorspp <- wwf$ecoregion_species
names(ecorspp) <- gsub("\\.", "_", names(ecorspp))
ecor <- wwf$ecoregions
names(ecor) <- gsub("\\.", "_", names(ecor))
biome <- wwf$biomes
names(biome) <- gsub("\\.", "_", names(biome))
}
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Clean taxonomical dataframe
wwf_by_sp <- ecorspp[ ,c("ECOREGION_CODE","SPECIES_ID")]
# set spp name and code from the next level
wwf_by_sp$SPECIES <- swap(wwf_by_sp$SPECIES_ID, spp$SPECIES_ID, spp$SPECIES)
wwf_by_sp$GENUS_ID <- swap(wwf_by_sp$SPECIES_ID, spp$SPECIES_ID, spp$GENUS_ID)
wwf_by_sp$GENUS <- swap(wwf_by_sp$GENUS_ID, genus$GENUS_ID, genus$GENUS)
wwf_by_sp$FAMILY_ID <- swap(wwf_by_sp$GENUS, genus$GENUS, genus$FAMILY_ID)
wwf_by_sp$FAMILY <- swap(wwf_by_sp$FAMILY_ID, family$FAMILY_ID, family$FAMILY)
wwf_by_sp$ORDER_ID <- swap(wwf_by_sp$FAMILY_ID, family$FAMILY_ID, family$ORDER_ID)
wwf_by_sp$ORDER <- swap(wwf_by_sp$ORDER_ID, order$ORDER_ID, order$ORDER_DESC)
wwf_by_sp$CLASS_ID <- swap(wwf_by_sp$ORDER_ID, order$ORDER_ID, order$CLASS_ID)
wwf_by_sp$CLASS <- swap(wwf_by_sp$CLASS_ID, classx$CLASS_ID, classx$CLASS)
# FILTER ONLY MAMMALS
wwf_mams <- wwf_by_sp[which(wwf_by_sp$CLASS == "Mammalia"),]
# SET ECOREGIONS AND FILTER AMERICAN ECOREGIONS
wwf_mams$ECOREGION_NAME <- as.character(swap(wwf_mams$ECOREGION_CODE, ecor$ECOREGION_CODE, ecor$ECOREGION_NAME))
# load table with equivalences of wwf biomes and the biomes defined in this work
install.packages("readxl")
library (readxl)
wwf_biomes <- read_excel("wwf_New_Biomes.xlsx", col_names=TRUE)
# match ecoregion names
wwf_econames <- match(unique(wwf_biomes$ECO_NAME),unique(ecor$ECOREGION_NAME))
no_match <- unique(wwf_biomes$ECO_NAME)[which(is.na(wwf_econames))]
# some names are not recognized, loading equivalencies
equi_names <- read.csv("Ecoregions_matches.csv")
# match names
change <- which(is.na(match(wwf_mams$ECOREGION_NAME,equi_names[,2]))==FALSE)
for(i in change){
wwf_mams$ECOREGION_NAME[i] <- swap(wwf_mams$ECOREGION_NAME[i],equi_names[,2],equi_names[,1])
}
# Set biomes
wwf_mams$NEW_BIOME <- swap(wwf_mams$ECOREGION_NAME, wwf_biomes$ECO_NAME, wwf_biomes$New_Biome)
# Yunga y Mata Atlantica separados de A H
wwf_biomes$New_Biome2 <- ifelse(is.na(wwf_biomes$...6) == FALSE, "YM", wwf_biomes$New_Biome)
wwf_mams$NEW_BIOME_2 <- swap(wwf_mams$ECOREGION_NAME, wwf_biomes$ECO_NAME, wwf_biomes$New_Biome2)
# take out NA's and Non american biomes
amer_mams <- wwf_mams[wwf_mams$NEW_BIOME_2 %in% unique(wwf_biomes$New_Biome2),]
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# SET ORIGINS FOR MAMMALS
# taxonomical info by spp
amer_mams$SCIENTIFIC_NAME <- paste(amer_mams$GENUS, amer_mams$SPECIES, sep = "_")
tax_info <- aggregate(amer_mams$GENUS ~ amer_mams$SCIENTIFIC_NAME, FUN = unique)
names(tax_info)<-c("SCIENTIFIC_NAME","GENUS")
tax_info$FAMILY <- aggregate(amer_mams$FAMILY ~ amer_mams$SCIENTIFIC_NAME, FUN = unique)[ ,2]
tax_info$ORDER <- aggregate(amer_mams$ORDER ~ amer_mams$SCIENTIFIC_NAME, FUN = unique)[ ,2]
# from Nathan's dataframe
#load(file="C:/Users/ASUS/Dropbox/MS_2021_GABI/GABI_mammalClades/mamPhy360_wwf360Grid_land_NewWorld_NewBiomeAreas_REFINED.Rda")
load("mamPhy360_wwf360Grid_land_NewWorld_NewBiomeAreas_REFINED.Rda")
# from spp
ori_class <- aggregate(mamPhy360_NW_wNewBiomes$Origin_Class ~ mamPhy360_NW_wNewBiomes$scientificname, FUN = unique)
tax_info$Origin_Class <-swap(tax_info$SCIENTIFIC_NAME, ori_class[,1], ori_class[ ,2])
# add a new level for factor Origin Class (both)
levels(tax_info$Origin_Class) <- c(levels(tax_info$Origin_Class),"both")
# from genus
ori_class_gen <- aggregate(mamPhy360_NW_wNewBiomes$Origin_Class ~ mamPhy360_NW_wNewBiomes$gen, FUN = unique)
# check NAs
na_sp = which(is.na(tax_info$Origin_Class))
tax_info$Origin_Class[na_sp] <- swap(tax_info$GENUS[na_sp], ori_class_gen[,1], ori_class_gen[ ,2])
# from family
ori_class_fam <- aggregate(mamPhy360_NW_wNewBiomes$Origin_Class ~ mamPhy360_NW_wNewBiomes$family, FUN = unique)
ori_class_fam[ ,2] <- ifelse(lapply(ori_class_fam[ ,2], length) == 1,
as.character(unlist(lapply(ori_class_fam[ ,2], function(xi) xi[[1]]))),"both")
# check NAs
na_gen = which(is.na(tax_info$Origin_Class))
tax_info$Origin_Class[na_gen] <- as.factor(swap(toupper(tax_info$FAMILY[na_gen]), ori_class_fam[,1], ori_class_fam[ ,2]))
# from Juan
juans <- read.csv("no_matched_spp_origin.csv")
na_fam = which(is.na(tax_info$Origin_Class))
tax_info$Origin_Class[na_fam] <- as.factor(swap(tax_info$SCIENTIFIC_NAME[na_fam],juans$SCIENTIFIC_NAME,juans$Origin))
# . . . . . . . . . . . . . . . . . . .
# Set Equivalent names
# from VertLife
vertlife <- read.csv("taxonomy_mamPhy_5911species.csv")
tax_info$Equivalent <- swap(tax_info$SCIENTIFIC_NAME, vertlife$MSW3_sciName_matched, vertlife$Species_Name)
tax_info$Equivalent_source[is.na(tax_info$Equivalent) == FALSE] <- "VertLife_MSW3"
# Species with no equivalent in Vert or Nathan's Database
# na_vl_equiv <- which(is.na(tax_info$Equivalent))
# write.csv(tax_info[na_vl_equiv,], "taxa_without_equivalent.csv")
# We checked manually the species from the MSW3 taxonomy (used in the WildFinder Database) that had no match in
# VertLife taxonomy. We matched the synonyms for each species following the taxonomic information from VertLife
# and the ASM Mammal Diversity Database
# https://www.mammaldiversity.org/index.html
# . . . . . . . . . . . . . . . . . . .
# Set Orgin on the occurrences df
amer_mams$Origin_Class <- swap(amer_mams$SCIENTIFIC_NAME, tax_info$SCIENTIFIC_NAME, tax_info$Origin_Class)
which(is.na(amer_mams$Origin_Class)) # zero
# Revision of occurrences in Paramos Ecoregions
rectified <- read.csv("Mountain Ecoregion Species.csv")
not <- rectified[which(rectified$RECTIFIED == "yes" ), c("GENUS","SPECIES","ECOREGION.CODE") ]
not_c <- paste(not$GENUS, not$SPECIES, not$ECOREGION.CODE, sep= ":")
mams_c <- paste(amer_mams$GENUS,amer_mams$SPECIES,amer_mams$ECOREGION_CODE, sep = ":")
which(mams_c %in% not_c == TRUE)# 21 (all records)
# Take out rectified occurrences in Paramos
biomams <- amer_mams[mams_c %in% not_c == FALSE, ]
# take out indetermined origin(both) # Nyctinomops
biomams <- biomams[biomams$Origin_Class != "both", ]
# take out repeated records
biomams <- unique(biomams)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# AGGREGATE DATA BY BIOMES = all mammals =
# AH & YM together
# take out repeated species by biome
biomams1 <- unique(biomams[ , c("SCIENTIFIC_NAME", "ORDER","NEW_BIOME", "Origin_Class")])
bybiome1 <- aggregate(biomams1$SCIENTIFIC_NAME ~ biomams1$NEW_BIOME, FUN = length)
names(bybiome1)<- c("Biome","RICHNESS")
bybiome1$NAO <- aggregate(biomams1$Origin ~ biomams1$NEW_BIOME, FUN = table)[,2][,1]
bybiome1$SAO <- aggregate(biomams1$Origin ~ biomams1$NEW_BIOME, FUN = table)[,2][,2]
bybiome1$propNAO <- bybiome1$NAO * 100 / bybiome1$RICHNESS
bybiome1$propSAO <- bybiome1$SAO * 100 / bybiome1$RICHNESS
# AH & YM separated
# take out repeated species by biome
biomams2 <- unique(biomams[ , c("SCIENTIFIC_NAME", "ORDER","NEW_BIOME_2", "Origin_Class")])
bybiome2 <- aggregate(biomams2$SCIENTIFIC_NAME ~ biomams2$NEW_BIOME_2, FUN = length)
names(bybiome2)<- c("Biome","RICHNESS")
bybiome2$NAO <- aggregate(biomams2$Origin ~ biomams2$NEW_BIOME_2, FUN = table)[,2][,1]
bybiome2$SAO <- aggregate(biomams2$Origin ~ biomams2$NEW_BIOME_2, FUN = table)[,2][,2]
bybiome2$propNAO <- bybiome2$NAO * 100 / bybiome2$RICHNESS
bybiome2$propSAO <- bybiome2$SAO * 100 / bybiome2$RICHNESS
bybiome1[1,1] <- "AH+YM"
bybiome <- rbind(bybiome2,bybiome1[1,])
# - - - - - - - - - - - - - - - - - - -
# ONLY RODENTS
rbioms <- biomams[biomams$ORDER == "Rodentia", ]
# AH & YM together
rbioms1 <- unique(rbioms[ , c("SCIENTIFIC_NAME", "ORDER","NEW_BIOME", "Origin_Class")])
rbybiome1 <- aggregate(rbioms1$SCIENTIFIC_NAME ~ rbioms1$NEW_BIOME, FUN = length)
names(rbybiome1)<- c("Biome","RICHNESS")
rbybiome1$NAO <- aggregate(rbioms1$Origin ~ rbioms1$NEW_BIOME, FUN = table)[,2][,1]
rbybiome1$SAO <- aggregate(rbioms1$Origin ~ rbioms1$NEW_BIOME, FUN = table)[,2][,2]
rbybiome1$propNAO <- rbybiome1$NAO * 100 / rbybiome1$RICHNESS
rbybiome1$propSAO <- rbybiome1$SAO * 100 / rbybiome1$RICHNESS
# AH & YM separated
rbioms2 <- unique(rbioms[ , c("SCIENTIFIC_NAME", "ORDER","NEW_BIOME_2", "Origin_Class")])
rbybiome2 <- aggregate(rbioms2$SCIENTIFIC_NAME ~ rbioms2$NEW_BIOME_2, FUN = length)
names(rbybiome2)<- c("Biome","RICHNESS")
rbybiome2$NAO <- aggregate(rbioms2$Origin ~ rbioms2$NEW_BIOME_2, FUN = table)[,2][,1]
rbybiome2$SAO <- aggregate(rbioms2$Origin ~ rbioms2$NEW_BIOME_2, FUN = table)[,2][,2]
rbybiome2$propNAO <- rbybiome2$NAO * 100 / rbybiome2$RICHNESS
rbybiome2$propSAO <- rbybiome2$SAO * 100 / rbybiome2$RICHNESS
rbybiome1[1,1] <- "AH+YM"
rbybiome <- rbind(rbybiome2,rbybiome1[1,])
# - - - - - - - - - - - - - - - - - - -
# WITHOUT RODENTS
wrbioms <- biomams[biomams$ORDER != "Rodentia", ]
# AH & YM together
wrbioms1 <- unique(wrbioms[ , c("SCIENTIFIC_NAME", "ORDER","NEW_BIOME", "Origin_Class")])
wrbybiome1 <- aggregate(wrbioms1$SCIENTIFIC_NAME ~ wrbioms1$NEW_BIOME, FUN = length)
names(wrbybiome1)<- c("Biome","RICHNESS")
wrbybiome1$NAO <- aggregate(wrbioms1$Origin ~ wrbioms1$NEW_BIOME, FUN = table)[,2][,1]
wrbybiome1$SAO <- aggregate(wrbioms1$Origin ~ wrbioms1$NEW_BIOME, FUN = table)[,2][,2]
wrbybiome1$propNAO <- wrbybiome1$NAO * 100 / wrbybiome1$RICHNESS
wrbybiome1$propSAO <- wrbybiome1$SAO * 100 / wrbybiome1$RICHNESS
# AH & YM separated
wrbioms2 <- unique(wrbioms[ , c("SCIENTIFIC_NAME", "ORDER","NEW_BIOME_2", "Origin_Class")])
wrbybiome2 <- aggregate(wrbioms2$SCIENTIFIC_NAME ~ wrbioms2$NEW_BIOME_2, FUN = length)
names(wrbybiome2)<- c("Biome","RICHNESS")
wrbybiome2$NAO <- aggregate(wrbioms2$Origin ~ wrbioms2$NEW_BIOME_2, FUN = table)[,2][,1]
wrbybiome2$SAO <- aggregate(wrbioms2$Origin ~ wrbioms2$NEW_BIOME_2, FUN = table)[,2][,2]
wrbybiome2$propNAO <- wrbybiome2$NAO * 100 / wrbybiome2$RICHNESS
wrbybiome2$propSAO <- wrbybiome2$SAO * 100 / wrbybiome2$RICHNESS
wrbybiome1[1,1] <- "AH+YM"
wrbybiome <- rbind(wrbybiome2,wrbybiome1[1,])
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# = = = P L O T S = = = (proportions of NAO / SAO)
bor <-c(3,7,4,8,5,9,6,10,1,11,12) # just an order for BIOMES
# pdf("NAO_proportion_biome_SEP_2021.pdf")
par(mfrow=c(3,1), mar= c(4,4,2,2))
barplot(t(bybiome[bor,5:6]), las= 1,col=c("lightblue","coral"), las=1, ylab= "% NAO",xlim=c(1,15),
names.arg= bybiome[bor,1], cex.axis=0.7, cex.names=0.7, space= c(0.5,0.25), ylim=c(0,110), main= "TOTAL")
text(c(1,2.25,3.75,5,6.5,7.75,9.25,10.5,12,13.25,14.75),105,labels=paste ("S",bybiome$RICHNESS[bor],sep="="),cex=0.7)
barplot(t(rbybiome[bor,5:6]), las= 1,col=c("lightblue","coral"), las=1, ylab= "% NAO",xlim=c(1,15),
names.arg= rbybiome[bor,1], cex.axis=0.7, cex.names=0.7, space= c(0.5,0.25), ylim=c(0,110), main= "Rodents")
text(c(1,2.25,3.75,5,6.5,7.75,9.25,10.5,12,13.25,14.75),105,labels=paste ("S",rbybiome$RICHNESS[bor],sep="="),cex=0.7)
barplot(t(wrbybiome[bor,5:6]), las= 1,col=c("lightblue","coral"), las=1, ylab= "% NAO",xlim=c(1,15),
names.arg= wrbybiome[bor,1], cex.axis=0.7, cex.names=0.7, space= c(0.5,0.25), ylim=c(0,110), main= "Non-Rodents")
text(c(1,2.25,3.75,5,6.5,7.75,9.25,10.5,12,13.25,14.75),105,labels=paste ("S",wrbybiome$RICHNESS[bor],sep="="),cex=0.7)
# dev.off()
|
0b9bea88d1d152c27faf1f2c2b0a86fed404bcd6 | 7e3cc5ba96869845b03d8d6919657a1cadc93df3 | /src/readAdjacencyMatrix.R | 72d340943b6f5758e9fd5a62fff4dc0a60f350a7 | [
"MIT"
] | permissive | Pablo1990/Analyzing-PPIs | b50551e53fbb22278f4909a3b8ce7db24deaaea0 | 35e4f52e8668b96457c71c53c251f634dfcef67d | refs/heads/master | 2020-04-09T05:28:24.805852 | 2015-10-27T15:33:43 | 2015-10-27T15:33:43 | 42,508,683 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,205 | r | readAdjacencyMatrix.R | library(igraph)
readAdjacencyMatrix <- function (fileName, outputFileName) {
adjacencyData <- read.csv(header = F, file = fileName, sep = "\t")
#adjacencyData[1] <- NULL
adjacencyMatrix <- as.matrix(adjacencyData)
graphM <- graph.adjacency(adjacencyMatrix, mode = "undirected")
write.graph(graphM, file = outputFileName, "pajek")
}
readAdjacencyMatrix("data/adjacency/Drosophila PIN Confidence-mainA.txt", "data/raw/DrosophilaAdj.sif")
readAdjacencyMatrix("data/adjacency/Hpyroli-mainA.txt", "data/raw/HpyroliAdj.sif")
readAdjacencyMatrix("data/adjacency/KSHV-A.txt", "data/raw/KSHVAdj.sif")
readAdjacencyMatrix("data/adjacency/Malaria-PIN-mainA.txt", "data/raw/MalariaAdj.sif")
readAdjacencyMatrix("data/adjacency/PIN Ecoli-validated-mainA.txt", "data/raw/EcoliAdj.sif")
readAdjacencyMatrix("data/adjacency/PIN-Afulgidus-mainA.txt", "data/raw/AfulgidusAdj.sif")
readAdjacencyMatrix("data/adjacency/PIN-Human-mainA.txt", "data/raw/HumanAdj.sif")
readAdjacencyMatrix("data/adjacency/Pin-Bsubtilis-mainA.txt", "data/raw/BsubtilisAdj.sif")
readAdjacencyMatrix("data/adjacency/VZV-main-A.txt", "data/raw/VZVAdj.sif")
readAdjacencyMatrix("data/adjacency/YeastS-main.txt", "data/raw/YeastSAdj.sif")
|
f0a6c87b1e0c30fc3a77ee9b46a2772840343c7d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rsurfer/examples/get.opposite.hemisphere.measurement.Rd.R | ca7fc5e2ffe6f36b7ec8b99e80a9142c5998ab3a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 285 | r | get.opposite.hemisphere.measurement.Rd.R | library(rsurfer)
### Name: get.opposite.hemisphere.measurement
### Title: Get Opposite Hemisphere Measurement
### Aliases: get.opposite.hemisphere.measurement
### ** Examples
get.opposite.hemisphere.measurement("Right.vessel")
get.opposite.hemisphere.measurement("lhCortexVol")
|
47f5a8a77e4a0c966b80ab1cd594e9a2a14673be | 1eee16736f5560821b78979095454dea33b40e98 | /subroutines/HMMLib.r | 96196eabb8bb7af93355dd25a920f7014f9d7819 | [] | no_license | karl616/gNOMePeaks | 83b0801727522cbacefa70129c41f0b8be59b1ee | 80f1f3107a0dbf95fa2e98bdd825ceabdaff3863 | refs/heads/master | 2021-01-21T13:52:44.797719 | 2019-03-08T14:27:36 | 2019-03-08T14:27:36 | 49,002,976 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,518 | r | HMMLib.r |
library(HiddenMarkov)
library(snow)
loadAndSmoothBedFile <- function(filename){
bed=read.table(filename,stringsAsFactors=F)
names(bed)=c("chr","pos","type","r","count")
bed$meth=round(bed$r/100*bed$count)
#compute easy smoothingbw.ucv(x, nb = 1000
#bed$smooth=ksmooth(bed$pos,bed$r,bandwidth=20,x.points=bed$pos)$y
#bed$smooth=ksmooth(bed$pos,bed$r,bandwidth=bw.nrd(bed$r),x.points=bed$pos)$y
#better use a density estimate with cross validated bandwidth
#bed$density=density(bed$r,bw=bw.nrd(bed$r))$y
#plot(bed$pos[1:100],bed$r[1:100])
#lines(bed$pos[1:100],bed$smooth[1:100],col=2)
return(bed)
}
constructHMMFromBed <- function(bed,states=2,nrOfCpus=1){
#init HMM parameters
# vector of "fixed & known" number of Bernoulli trials
pn <- list(size=bed$count)
#depending on model complexity initialize HMM differently
if(states==2){
delta=c(1,0)
Pi = matrix(c(0.8,0.2,0.3,0.7),byrow=T,nrow=2)
x <- dthmm(NULL, Pi, delta, "binom", list(prob=c(0.1, 0.6)), pn,
discrete=TRUE,nonstat=T)
}
if(states==3){
delta=c(1,0,0)
Pi = matrix(c(0.8,0.1,0.1,0.1,0.8,0.1,0.1,0.1,0.8),byrow=T,nrow=3)
x <- dthmm(NULL, Pi, delta, "binom", list(prob=c(0.1,0.3, 0.6)), pn,
discrete=TRUE,nonstat=T)
}
if(states==4){
delta=c(1,0,0,0)
Pi = matrix(c(0.85,0.05,0.05,0.05,0.05,0.85,0.05,0.05,0.05,0.05,0.85,0.05,0.05,0.05,0.05,0.85),byrow=T,nrow=4)
x <- dthmm(NULL, Pi, delta, "binom", list(prob=c(0.1,0.3,0.5, 0.7)), pn,
discrete=TRUE,nonstat=T)
}
#assign the vector of positive events (methylated C's)
x$x=bed$meth
c1<- makeCluster(nrOfCpus,"MPI")
# use above parameter values as initial values and start do EM
# y <- BaumWelch(x,bwcontrol(maxiter=1000,tol=0.0001),SNOWcluster=c1)
# y <- BaumWelch(x,bwcontrol(maxiter=1000,tol=5e-3),SNOWcluster=c1)
y <- BaumWelch(x,bwcontrol(maxiter=1000,tol=5e-3))
stopCluster(c1)
return(y)
}
#update the bed table with predictins from the hmm object
annotateBedFile <- function(hmm,bed){
#bed$viterbi=Viterbi(hmm)
bed$postdecod=apply(hmm$u,1,which.max)
#by default use the 2nd state posterior
ff=forwardback(hmm$x,hmm$Pi,hmm$delta,hmm$distn,hmm$pm,hmm$pn)
bed$posterior1=ff$logalpha[,1]+ff$logbeta[,1]
bed$posterior2=ff$logalpha[,2]+ff$logbeta[,2]
if(ncol(ff$logbeta)>=3){
bed$posterior3=ff$logalpha[,3]+ff$logbeta[,3]
}
return(bed)
}
modelSelectionScores <- function(hmm){
#compute AICc and BIC for the HMM
#k = free parameters of the model
k=length(hmm$Pi)-nrow(hmm$Pi) + length(hmm$pm)
#n number of datapoints
n=length(hmm$x)
aicc=(2*k - 2* hmm$LL) + 2*k*(k+1)/(n-k-1)
bic=-2*hmm$LL + k*log(n)
return(list(c(AICc=aicc,BIC=bic)))
}
computePeaksFromBed <- function(bed){
#we use the column postdecod for "peak" calling
bed$peak=rep("1",nrow(bed))
for( i in nrow(bed)-1){
bed$peak=paste()
}
#extract only lines with 12 or 21
subset(bed,peak == "12" | peak == "21")
}
#for a given HMM object show a selected range x..y (vector positions) in a plot comparing
#state assignments and
visualComparison <- function(bed,x,y){
#change plotting device
par(mfrow=c(3,1))
plot(bed$pos[x:y],bed$r[x:y]/100)
# plot(bed$pos[x:y],bed$viterbi[x:y],type="l",lwd=3,col="red")
plot(bed$pos[x:y],bed$postdecod[x:y],type="l",lwd=3,col="blue")
plot(bed$pos[x:y],bed$posterior1[x:y],type="l",lwd=3,col="green")
lines(bed$pos[x:y],bed$posterior2[x:y],lwd=3,col="blue")
par(mfrow=c(1,1))
}
visualComparison2 <- function(bed2,bed3,x,y){
#change plotting device
par(mfrow=c(3,1))
plot(bed2$pos[x:y],bed2$r[x:y]/100,ylab="methylation ratio")
# plot(bed$pos[x:y],bed$viterbi[x:y],type="l",lwd=3,col="red")
plot(bed2$pos[x:y],bed2$postdecod[x:y],type="l",lwd=3,col="blue",ylab="Post Decoding 2 state HMM")
plot(bed2$pos[x:y],bed3$postdecod[x:y],type="l",lwd=3,col="blue",ylab="Post Decoding 3 state HMM")
par(mfrow=c(1,1))
}
visualComparison3 <- function(bed2,bed3,bed4,x,y){
#change plotting device
par(mfrow=c(4,1))
plot(bed2$pos[x:y],bed2$r[x:y]/100,ylab="methylation ratio")
# plot(bed$pos[x:y],bed$viterbi[x:y],type="l",lwd=3,col="red")
plot(bed2$pos[x:y],bed2$postdecod[x:y],type="l",lwd=3,col="blue",ylab="Post Decoding 2 state HMM")
plot(bed2$pos[x:y],bed3$postdecod[x:y],type="l",lwd=3,col="blue",ylab="Post Decoding 3 state HMM")
plot(bed2$pos[x:y],bed4$postdecod[x:y],type="l",lwd=3,col="blue",ylab="Post Decoding 4 state HMM")
par(mfrow=c(1,1))
}
|
dfe7df8eb8a41e57cafb70e5af537ef4e954d93b | 2de28b3256606692c48ffdaf05997422527084a1 | /R/met.plot.R | 207295d17e69a00b9b7af1f55c1cba48ef7f7253 | [] | no_license | yzhlinscau/AAFMM | 4f999a028d54d2a3e90cb08899ab6e5d627664ee | 6d016c60ae72901323c84f8988a69c0a1fa5aa16 | refs/heads/master | 2020-03-27T02:29:05.260122 | 2018-08-27T05:07:25 | 2018-08-27T05:07:25 | 145,409,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,832 | r | met.plot.R | #' @title Plot asreml-MET data.
#'
#' @description
#' \code{met.plot} This function plots MET data for further
#' factor analytic by asreml to find the relation of trial sites, etc.
#'
#' @usage met.plot(object, plot.title = NULL)
#'
#' @param object MET data.
#' @param plot.title MET plot title.
#'
#' @export met.plot
#' @author Yuanzhen Lin <yzhlinscau@@163.com>
#' @references
#' Yuanzhen Lin. R & ASReml-R Statistics. China Forestry Publishing House. 2016
#' AAFMM website:https://github.com/yzhlinscau/AAFMM
#' @examples
#' \dontrun{
#' library(AAFMM)
#'
#' data(MET)
#'
#' # example 1
#' # variable order: genotype,yield,site,row,col
#' MET2<-MET[,c(1,9,2,4:5)]
#'
#' met.plot(MET2)
#'
#' # example 2
#' # variable order on MET2: Rep, Block
#' MET3<-MET[,c(1,9,2,4:7)]
#'
#' met.plot(MET3,"My met trials")
#' }
#'
met.plot <-function(object,plot.title=NULL){
#require(agridat)
#require(grid)
#require(reshape2)
if(is.null(plot.title)) plot.title <- "MET data plot"
dat <- object
levels(dat[,3]) <- paste("S",1:nlevels(dat[,3]),sep="")
names(dat)[1:5] <- c("genotype","yield","site","row","col")
for(i in 4:5) dat[,i] <- as.numeric(dat[,i])
#windows(10,8)
# desplot(yield~ col*row|site, dat, main=plot.title)
if(length(dat)==5){
desplot::desplot(yield~ col*row|site, dat, main=plot.title)
}else{
names(dat)[6:7] <- c("Rep","Blk")
desplot::desplot(yield ~ col*row|site, dat, main=plot.title,
out1=Rep, out2=Blk,strip.cex=1.5,
out1.gpar=gpar(col="blue", lwd=4),
out2.gpar=gpar(col="red", lwd=1, lty=1),
par.settings = list(layout.heights=list(strip=2)))
}
#windows(10,8)
#desplot(genotype~ col*row|site, dat,main="Genotype plot placement")
}
|
af97fa4dbc772b197198c8a32148a92484b82c54 | 9bccf862820bc6edb04817eb16ccace22ff4b618 | /Lab1Part3/Scripts/tweetCollector_Script.R | 340fbddd230f6a1ca35b2faba67fcca35138ed8b | [
"MIT"
] | permissive | Muthu2093/Twitter-Client-for-Sentiment-Analysis | 548e10dd6bb1725257b98244cdffd34321e72ef5 | 67f7cfdae786f08beedae846f24b1e2007462f2f | refs/heads/master | 2021-03-27T08:24:13.069560 | 2018-03-15T01:05:41 | 2018-03-15T01:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,347 | r | tweetCollector_Script.R | rm(list =ls())
#### Code to collect tweets##
###SET THE WORKING DIRECTORY TO 'SCRIPTS' FOLDER IN LAB1 PART3 BEFORE RUNNING THE SCRIPT####
## All tweets collected are filter and appended to csv files in data folder automatically
## Geocodes are fetected for all tweet and saved in location_GeoCode.csv file - used for later plotting
library(twitteR)
library(ggplot2)
library(ggmap)
library(data.table)
## Setup oauth
setup_twitter_oauth("VxJ6qp5XL3VTclBzMBsD1Ez1A", "owezT5IVRVG8nvkSHXxqq4t2McwPO6mxesJTGU2549yHTJbP8m", "340449785-0AWt3nkBVvLlX7hbUFLl0fEqIKs47qUU7V5UnFWH", "qnaD0Pyp9jUXfwVb82RlSKikuvVi2MAWxp1J0mD1Fle4d")
############## Collection of Tweets ###################
## Searching for tweets ##
search.string <- "#flu"
no.of.tweets <- 3500
tweets <- searchTwitter(search.string, n=no.of.tweets, lang="en")
## Conversion of searched tweets to Data frame
tweets <- twListToDF(tweets)
## Saving collected data to a csv file - only the tweets collection this session
setwd("../data_collected")
Name=paste("New", no.of.tweets," Tweets Collected on ",Sys.time())
write.csv(tweets, file = Name)
setwd("../Scripts")
# Reading all tweets collected so far
CDF=read.csv("../data_collected/consolidated_Tweets_Total")
CDF<- subset(CDF, select = -c(X)) #removing column named X
# Creating a consolided data frame of all the tweets collected so far
consolidated_Tweets_Total=rbind(CDF,tweets)
consolidated_Tweets_Total <- unique( consolidated_Tweets_Total[ , 1:16 ] ) #remove duplicates
# Saving all Tweets Collected from day 1 to csv file
setwd("../data_collected")
write.csv(consolidated_Tweets_Total, file = "consolidated_Tweets_Total")
setwd("../Scripts")
################# Filtering Tweets ####################
## Eliminating duplicate users by lookingUp screenName
usernames <- tweets$screenName
temp_df <- twListToDF(lookupUsers(usernames))
## Remove users without any location information
DWL=read.csv("../data_collected/data_With_location")
DWL<- subset(DWL, select = -c(X)) #removing column named X
tweets_With_location <- subset(temp_df, temp_df$location != "")
data_With_location <- rbind(DWL, tweets_With_location)
data_With_location <- unique( data_With_location[ , 1:17 ] )
#Saving the data with location to csv file
setwd("../data_collected")
write.csv(data_With_location, file = "data_With_location")
setwd("../Scripts")
########################## Fetching Geocode of users #######################
## Code to access geocode - limit 2500 per day - dont waste it
## Use it after you have extracted tweets with location info (20 to 30 maybe)
# locatedUsers <- !is.na(tweets_with_location$location)
j<-1;
for (i in tweets_With_location$location){
loc <- i
if (stringi::stri_enc_mark(loc)=="ASCII"){
if (j==1){
locations <- geocode(loc)
}
if (j>1){
locations <- rbind(locations,geocode(loc))
}
j <- j+1
}
}
setwd("../data_collected")
location_GeoCode <- read.csv("../data_collected/location_GeoCode")
location_GeoCode<- subset(location_GeoCode, select = -c(X)) #removing column named X
location_GeoCode <- rbind(location_GeoCode,locations)
location_GeoCode <- subset(location_GeoCode, location_GeoCode$lon != "")
write.csv(location_GeoCode, file = "location_GeoCode")
setwd("../Scripts")
|
32a39cf122a036bec9ecf8161b3c39597cf36fe6 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/springer/man/cv.springer.Rd | cf24a5c8e5df74fdeb50f5f4851a900dafc3c295 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,512 | rd | cv.springer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.springer.R
\name{cv.springer}
\alias{cv.springer}
\title{k-folds cross-validation for springer}
\usage{
cv.springer(
clin = NULL,
e,
g,
y,
beta0,
lambda1,
lambda2,
nfolds,
func,
corr,
structure,
maxits = 30,
tol = 0.001
)
}
\arguments{
\item{clin}{a matrix of clinical covariates. The default value is NULL. Whether to include the clinical covariates is decided by user.}
\item{e}{a matrix of environment factors.}
\item{g}{a matrix of genetic factors.}
\item{y}{the longitudinal response.}
\item{beta0}{the initial value for the coefficient vector.}
\item{lambda1}{a user-supplied sequence of \eqn{\lambda_{1}} values, which serves as a tuning parameter for the individual-level penalty.}
\item{lambda2}{a user-supplied sequence of \eqn{\lambda_{2}} values, which serves as a tuning parameter for the group-level penalty.}
\item{nfolds}{the number of folds for cross-validation.}
\item{func}{the framework to obtain the score equation. Two choices are available: "GEE" and "QIF".}
\item{corr}{the working correlation structure adopted in the estimation algorithm. The springer provides three choices for the
working correlation structure: exchangeable, AR-1,and independence.}
\item{structure}{Three choices are available for structured variable selection. "bilevel" for sparse-group selection on both group-level and individual-level. "group" for selection on group-level only. "individual" for selection on individual-level only.}
\item{maxits}{the maximum number of iterations that is used in the estimation algorithm. The default value is 30.}
\item{tol}{The tolerance level. Coefficients with absolute values that are smaller than the tolerance level will be set to zero. The adhoc value can be chosen as 0.001.}
}
\value{
an object of class "cv.springer" is returned, with is a list with components below:
\item{lam1}{the optimal \eqn{\lambda_{1}}.}
\item{lam2}{the optimal \eqn{\lambda_{2}}.}
}
\description{
This function conducts k-fold cross-validation for springer and returns the optimal values of the tuning parameters.
}
\details{
For bi-level sparse group selection, cv.springer returns two optimal tuning parameters,
\eqn{\lambda_{1}} and \eqn{\lambda_{2}}; for group-level selection, this function returns the optimal \eqn{\lambda_{2}} with \eqn{\lambda_{1}}=0;
for individual-level selection, this function returns the optimal \eqn{\lambda_{1}} with \eqn{\lambda_{2}}=0.
}
|
64af42db4316cff77a08340bd1275e811432a9c2 | a1cc22bafb4429b53898962b1131333420eddf05 | /example-models/R/tools/cmdStanTools.R | d970354ec30dc8254cbdec4e842bbfc9a7e21443 | [
"BSD-3-Clause"
] | permissive | metrumresearchgroup/Torsten | d9510b00242b9f77cdc989657a4956b3018a5f3a | 0168482d400e4b819acadbc28cc817dd1a037c1b | refs/heads/master | 2023-09-01T17:44:46.020886 | 2022-05-18T22:46:35 | 2022-05-18T22:46:35 | 124,574,336 | 50 | 18 | BSD-3-Clause | 2023-09-09T06:32:36 | 2018-03-09T17:48:27 | C++ | UTF-8 | R | false | false | 3,693 | r | cmdStanTools.R | ## 5/27/2016: v1.0
## functions to run cmdStan
compileModel <- function(model, stanDir = stanDir){
modelName <- basename(model)
dir.create(model)
file.copy(paste(model, "stan", sep = "."), file.path(model, paste(modelName, "stan", sep = ".")),
overwrite = TRUE)
model <- file.path(model, modelName)
system(paste("make --directory=", stanDir, " ", model, sep = ""))
}
runModel <- function(model, data, iter, warmup, thin, init, seed, chain = 1,
stepsize = 1, adapt_delta = 0.8, max_depth = 10, refresh = 100, tag=NULL){
modelName <- basename(model)
model <- file.path(model, modelName)
if(! is.null(tag)) output <- paste0(model, "_", tag, "_") else output=model
system(paste(model, " sample algorithm=hmc engine=nuts",
" max_depth=", max_depth,
" stepsize=", stepsize,
" num_samples=", iter,
" num_warmup=", warmup, " thin=", thin,
" adapt delta=", adapt_delta,
" data file=", data,
" init=", init, " random seed=", seed,
" output file=", paste(output, chain, ".csv", sep = ""),
" refresh=", refresh,
sep = ""))
}
runDiagnose <- function(model, data, init, seed, chain = 1, refresh=100){
modelName <- basename(model)
model <- file.path(model, modelName)
system(paste(model, " diagnose",
" data file=", data,
" init=", init, " random seed=", seed,
" output file=", paste(model, chain, ".csv", sep = ""),
" refresh=", refresh,
sep = ""))
}
# runModelFixed <- function(model, data, iter, warmup, thin, init, seed, chain = 1,
# stepsize = 1, adapt_delta = 0.8, max_depth = 10, refresh = 100){
# modelName <- basename(model)
# model <- file.path(model, modelName)
# system(paste(model, " sample algorithm=fixed_param",
# " num_samples=", iter,
# " data file=", data,
# " random seed=", seed,
# " output file=", paste(model, chain, ".csv", sep = ""),
# " refresh=", refresh,
# sep = ""), invisible = FALSE)
# }
# runModelFixed <- function(model, data, iter, warmup, thin, init, seed, chain = 1,
# stepsize = 1, adapt_delt = 0.8, max_depth = 10, refresh = 100){
# modelName <- basename(model)
# model <- file.path(model, modelName)
# print(paste0(model, " sample algorithm=fixed_param",
# " num_samples=1 num_warmup=0",
# " data file=", data,
# " random seed=", seed,
# " output file=", paste(model, chain, ".csv", sep = ""),
# " refresh=", refresh))
#
# system(paste0(model, " sample algorithm=fixed_param",
# " num_samples=1 num_warmup=0",
# " data file=", data,
# " init=", init,
# " random seed=", seed,
# " output file=", paste(model, chain, ".csv", sep = ""),
# " refresh=", refresh), invisible = FALSE)
#
# }
runModelFixed <- function(model, data, iter, warmup, thin, init, seed, chain = 1,
stepsize = 1, adapt_delta = 0.8, max_depth = 10, refresh = 100){
modelName <- basename(model)
model <- file.path(model, modelName)
system(paste(model, " sample algorithm=fixed_param",
" num_samples=", iter,
" data file=", data,
" random seed=", seed,
" output file=", paste(model, chain, ".csv", sep = ""),
" refresh=", refresh,
sep = ""), invisible = FALSE)
}
|
4403fad5838977661be9ca0b7503a9643153f2ae | a17cf22be2304c96d267fc1b68db7b7279c4a293 | /R/clustal.R | 944c264d7464d232d23848a02934e6574353e62a | [] | no_license | robertdouglasmorrison/DuffyTools | 25fea20c17b4025e204f6adf56c29b5c0bcdf58f | 35a16dfc3894f6bc69525f60647594c3028eaf93 | refs/heads/master | 2023-06-23T10:09:25.713117 | 2023-06-15T18:09:21 | 2023-06-15T18:09:21 | 156,292,164 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,177 | r | clustal.R | `clustal` <- function( fastaFile, outFile="clustal.aln", clustalProgram=Sys.getenv("CLUSTAL"),
iterations=NULL, outfmt=c("clustal", "fasta", "phylip"),
guidetree=NULL, clustalArgs=NULL, verbose=FALSE) {
if ( is.null( clustalProgram) || nchar( clustalProgram) < 1) {
cat( "\nRequired 'clustalProgram' argument missing or empty...")
return(NULL)
}
outfmt <- match.arg( outfmt)
# build the command line
cmdLine <- paste( clustalProgram, " -i ", fastaFile, " -o ", outFile,
" --outfmt ", outfmt, " --force")
if ( ! is.null( iterations)) cmdLine <- paste( cmdLine, " --iterations ", iterations)
if ( ! is.null( guidetree)) cmdLine <- paste( cmdLine, " --guidetree-out ", guidetree)
if ( ! is.null( clustalArgs)) cmdLine <- paste( cmdLine, clustalArgs)
if (verbose) cmdLine <- paste( cmdLine, " -v")
# clean away the expected result
file.delete( outFile)
# call it!
catch.system( cmdLine)
if ( ! file.exists( outFile)) {
cat( "\nError: No result made by CLUSTAL")
return(NULL)
}
# consume the result
ans <- NULL
if ( outfmt == "clustal") ans <- readALN( outFile, verbose=verbose)
return( ans)
}
|
6e205f46a38ef2eafb533981ba53844b78fae0dd | aa01d8e6255d80b35ed0fc86686ff70554ebd68d | /slides/class_10/class_10.R | 91f758f7fe160f168b7ce80d128795b547049645 | [] | no_license | frolivares/dar_soc4001 | bc4dac0bcd62e7a5c041a2e8338e349bb290667d | 0a04f96caf706d996236bfba561b13570079d656 | refs/heads/master | 2023-08-17T05:26:50.153292 | 2021-10-13T14:52:08 | 2021-10-13T14:52:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,291 | r | class_10.R | # Limpiar pantalla y remover objetos existentes
cat("\014")
rm(list = ls())
## Bases de datos ordenadas ("tidy")
library("tidyverse")
library("janitor")
library("readr")
setwd("/Users/Mauricio/Library/Mobile Documents/com~apple~CloudDocs/Teaching/ISUC/2020_2_data_analysis_r/repo/slides/class_10/")
va_messy <- read.delim("value_added_agricultue.csv", sep= ";")
va_messy %>% View()
va_data <- va_messy %>% row_to_names(row_number = 1) %>%
rename(country = 1) %>%
mutate(across(-country, ~ as.character(.x))) %>% View()
pivot_longer(cols = -c("country"), names_to = "year", values_to = "added_value") %>%
mutate(added_value = if_else(added_value == "", NA_character_, added_value)) %>%
drop_na(added_value) %>% mutate(added_value = scan(text=added_value, dec=",", sep=".")) %>%
arrange(year,country)
va_data
## Valores perdidos implícitos
va_data <- va_data %>% complete(country,year)
va_data %>% complete(country,year, fill = list(added_value = 0))
## Completa valores perdidos
va_data %>% arrange(country,year) %>%
group_by(country) %>%
fill(added_value, .direction = c("down")) %>%
replace_na(list(added_value = 0)) %>%
ggplot(aes(x=year,y=added_value, group=country,colour=country)) +
geom_line() +
theme(legend.position = "none")
|
2b092a5acabfec50ed520e2c360c4ecc1e333368 | 0d52cf571dbc0a15096357a46c7a52b954ee090d | /rebecca/R-stuff/countryFocus.R | 70676bec15fd04dc80acbfae30123ef433442f27 | [] | no_license | arixha/MVTEC-Stats-Project1 | 6da88ca4dcfc767945e00ea54696117bc148dc06 | 6619df7860673f79c75471b005af8a5c2cf936bf | refs/heads/main | 2023-02-15T20:07:54.299459 | 2021-01-08T18:51:54 | 2021-01-08T18:51:54 | 316,367,309 | 0 | 4 | null | 2020-12-04T15:24:43 | 2020-11-27T00:56:30 | R | UTF-8 | R | false | false | 384 | r | countryFocus.R | library(dplyr)
library(tidyr)
originalCovid = read.csv(url("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv"))
removeNA = originalCovid %>%
drop_na(weekly_hosp_admissions_per_million, hospital_beds_per_thousand)
countries = unique(removeNA$location)
length(countries) #check how many countries
print(countries) #check country names
|
c4d8fab74a27fd11a54750461764dedccd2ec7a5 | 27e47edea64b67aa62790a49079597deb3879745 | /man/les_boxplots.Rd | 32cce919177fd4a7fccafd75071515103be044f8 | [
"MIT"
] | permissive | C-Juliette/randomfields | 9348b69b61761eec186e2fd9976dfc7f83cfded1 | c220f513568726e2bcbf93dcc36f9dd4485de67c | refs/heads/main | 2023-08-30T22:44:30.218141 | 2023-08-27T09:23:56 | 2023-08-27T09:23:56 | 387,907,122 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 477 | rd | les_boxplots.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/les_boxplots.R
\name{les_boxplots}
\alias{les_boxplots}
\title{Boxplots}
\usage{
les_boxplots(Z, Y, titre = "Boxplots", nom_axeX = "", nom_axeY = "")
}
\arguments{
\item{Z}{first matrix}
\item{Y}{second matrix}
\item{titre}{title of the graph}
\item{nom_axeX}{x label}
\item{nom_axeY}{y label}
}
\value{
a plot with the boxplots
}
\description{
Boxplots
}
\examples{
les_boxplots(1:10, 10:20)
}
|
1bbcb0a8880b58552b0971cd93af62e336057ee1 | 41830820e0969b6d211691c1d6fe0f2273d5c93d | /cachematrix.R | 81b8d126753b426b90f3dc5b3dc4c4f99339ce7c | [] | no_license | dariomartinezb/ProgrammingAssignment2 | 39e8a68cf4cfc416bc59996a10d54d2247dcf73b | e1fcb44c162247ae6e2091b9837fddf3b53a380f | refs/heads/master | 2020-12-24T23:29:18.172049 | 2015-02-22T21:00:49 | 2015-02-22T21:00:49 | 31,177,632 | 0 | 0 | null | 2015-02-22T19:59:06 | 2015-02-22T19:59:04 | null | UTF-8 | R | false | false | 1,190 | r | cachematrix.R | ## These functions manage the caching and retrieval of the inverse
## of provided matrices
## makeCacheMatrix will take a provided matrix and store it in a cached
## copy, and then it will compute the inverse of the provided matrix and
## store it also in the object for future retrieval (using cacheSolve).
makeCacheMatrix <- function(x = matrix()) {
volteada <- NULL
set <- function(y) {
x <<- y
volteada <<- NULL
}
get <- function() x
setvolteada <- function(solve) volteada <<- solve
getvolteada <- function() volteada
list(set = set,
get = get,
setvolteada = setvolteada,
getvolteada = getvolteada)
}
## cacheSolve first verifies that it has the inverse of the provided matrix
## (created with makeCacheMatrix). If the inverse doesn't exists,
## cacheSolve will compute the inverse, but if the inverse already exists,
## cacheSolve will retrieve the inverse of the matrix from the cached
## object.
cacheSolve <- function(x, ...) {
matriz <- x$getvolteada()
if(!is.null(matriz)) {
message("getting cached data")
return(matriz)
}
data <- x$get()
matriz <- solve(data, ...)
x$setvolteada(matriz)
matriz
}
|
1bf1a0d9a4e6439020aee4b64fc498caf657615a | 9716b6538d1ec98f8773f87b844382e062e2b22f | /hw1/runSim.R | 106f8ae050cc8c9348864e9b2aa34efbd3abd1bc | [] | no_license | FrancisShizy/biostat-m280-2019-winter | fcf69ef3046ffd6ef074dab786d79c360f3f3fc9 | 8eb4aa2c851534034b6d04f23f4a8d13fe6a64e8 | refs/heads/master | 2020-04-17T07:08:02.584328 | 2019-03-02T07:12:02 | 2019-03-02T07:12:02 | 166,355,453 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,175 | r | runSim.R | ## parsing command arguments
for (arg in commandArgs(TRUE)) {
eval(parse(text=arg))
}
## check if a given integer is prime
isPrime = function(n) {
if (n <= 3) {
return (TRUE)
}
if (any((n %% 2:floor(sqrt(n))) == 0)) {
return (FALSE)
}
return (TRUE)
}
## estimate mean only using observation with prime indices
estMeanPrimes = function (x) {
n <- length(x)
ind <- sapply(1:n, isPrime)
return (mean(x[ind]))
}
# simulate data
compare_methods <- function(dist, n, reps, seed) {
set.seed(seed)
msePrimeAvg <- 0.0
mseSamplAvg <- 0.0
for (r in 1:reps) {
if (dist == "gaussian"){
x <- rnorm(n)
} else if (dist == "t1"){
x <- rt(n,df=1)
} else if (dist == "t5"){
x <- rt(n, 5)
} else {
stop(paste("unrecognized dist: ", dist))
}
# compute MSEs for the primed-indexed average estimator
# and the classical sample average estimator
msePrimeAvg <- msePrimeAvg + estMeanPrimes(x)^2
mseSamplAvg <- mseSamplAvg + mean(x)^2
}
mseSamplAvg <- mseSamplAvg / reps
msePrimeAvg <- msePrimeAvg / reps
return(c(mseSamplAvg, msePrimeAvg))
}
compare_methods (dist, n, reps, seed)
# estimate mean
#estMeanPrimes(x)
|
36c9a3901e5f36c7aeb0c61782d287a8bb0c1d7f | 23fb38b92e3723d0b07020379aafa8eb335da5e2 | /man/valid.Rd | 0251d4d5f56de4dfa9a7c05d7637fb26b3231d10 | [] | no_license | cran/ddp | b90d7589ebe9fcc9d0a071655ce29a608630fcf9 | fd5c20aae2142155f9b291e0d8c23299b4444506 | refs/heads/master | 2023-04-29T02:44:47.139215 | 2021-05-08T14:30:02 | 2021-05-08T14:30:02 | 284,766,082 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,405 | rd | valid.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/valid.R
\name{valid}
\alias{valid}
\title{Validity and Reliability check.}
\usage{
valid(data, alpha = 0.05, total = NULL)
}
\arguments{
\item{data}{A data set/ matrix (\emph{see} \strong{Details}).}
\item{alpha}{An alpha value (\emph{see} \strong{Details}).}
\item{total}{A single numeric value of the index column (\emph{see} \strong{Details}).}
}
\value{
Function returns a data frame with \emph{k} row and four columns.
the columns indicate the item-rest correlation, correlation threshold,
p value, and validity and reliability conclusion.
}
\description{
This function calculates the item-rest correlation.
}
\details{
The data set is a data frame/ matrix \emph{n x k}. The row is
the name of the respondent as many as \emph{n}, while the column is
the variables (\emph{k}). The alpha value is set between 0.0001 and
0.20, the default is 0.05. If the \code{total} input is \code{NULL},
it means that the total score will be calculated first,
the column index of the total score can be also stated otherwise.
The index of the column is a numeric value with a length of one.
It has to be between 1 and (\emph{k}).
}
\examples{
#data simulation of 10 person 5 variables
set.seed(1)
dat <- matrix(sample(1:7,10*5, replace = TRUE), 10,5)
valid(dat)
}
\author{
Weksi Budiaji \cr Contact: \email{budiaji@untirta.ac.id}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.