blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
28288bca64d1a9ec83ad6ecdcf598032bf0cbff8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SensoMineR/examples/compo.cocktail.Rd.R
|
b5c8f0ef99ec04009e9900e12a3e3513f343ed99
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
compo.cocktail.Rd.R
|
library(SensoMineR)
### Name: compo.cocktail
### Title: Composition of the cocktails data
### Aliases: compo.cocktail
### Keywords: datasets
### ** Examples
data(cocktail)
|
35de9378cd129f83209e2e80b0ae09f7cfc1b872
|
865f81acbeb5e6014f1c118933e395b64053cd75
|
/aniR/R/red.R
|
706c1da2e94ed279f3d757ca32e6d3ce7acbb9fb
|
[] |
no_license
|
ntyndall/ani
|
a4450c65fb2b6ff90fa71ef32078a66e334f561e
|
abcd5cec32a2388e05aa266040651203c98213c8
|
refs/heads/master
| 2020-03-23T07:38:09.880278
| 2018-10-09T14:01:38
| 2018-10-09T14:01:38
| 141,282,515
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 239
|
r
|
red.R
|
#' @title Create Redis Connection
#'
#' @param db An integer value defining the database number
#' to store data.
#'
#' @return A redux redis connection
#'
#' @export
red <- function(db = 9) {
return(
redux::hiredis(db = db)
)
}
|
0ba3f27599876ee23cc3622df56469e6d43707bc
|
8efdaed601b9dbcdb0ebafd975f1dcba90ce278a
|
/filterAlignment.R
|
fae35ec3b0ea7224076a1a3ec5e24d98c0aa780c
|
[] |
no_license
|
ValentinaBoP/NeurosporaSpecificTE
|
b531bc1f1dd7a5eaca6959af0aaac2718727c7e5
|
dbce1dc3bcbd5af55d4452c3410f89d42259415f
|
refs/heads/master
| 2022-10-24T15:37:51.983046
| 2020-06-10T12:54:18
| 2020-06-10T12:54:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,656
|
r
|
filterAlignment.R
|
library(data.table)
library(dplyr)
files = list.files(pattern = "window")
for (file in files){
window = fread(file)
# filter for the flanks of the same element that align close to one another
window$ID = sub(pattern = "_flank3", replacement = "_flank5", x = window$V13)
boo = window$V4 == window$ID
filter = window[boo,]
# filter for alignment length
boo = filter$V5 >= 100
filter = filter[boo,]
# filter for position of the alignment: close to the interface
boo = filter$V9 >= 400 & filter$V18 >= 400
filter = filter[boo,]
# save intermediate file with the coordinates of the putative empty sites
filename = sub(pattern = ".window", replacement = "_putativeEmpty", x = file)
write.table(x = filter, file = filename, sep = "\t", quote = F, col.names = F, row.names = F)
# make BED format of the putative empty sites
bed = data.frame(chr = character(), start = integer(), end = integer(), name = character(), score = integer(), strand = character())
if(nrow(filter) > 0){
for(j in 1:nrow(filter)){
chr = filter[j, 1]
start = min(filter[j, c(2,3,11,12)])
end = max(filter[j, c(2,3,11,12)])
name = sub(pattern = "_flank5", replacement = "", x = filter[j, 4])
score = filter[j, 5]
strand = filter[j, 6]
newLine = data.frame(chr = chr, start = start, end = end, name = name, score = score, strand = strand)
bed = rbind(bed, newLine)
}
# save bed file for putative empty sites
filename = sub(pattern = ".window", replacement = "_putativeEmpty.bed", x = file)
write.table(x = bed, file = filename, sep = "\t", quote = F, col.names = F, row.names = F)
} else {
print("empty file")
}
}
|
6817d983cb39c4fa0e73f710cc31d499ffc1e3fb
|
95810d83e5600112be76ea12d344f2ecaacfa414
|
/man/make_request.Rd
|
a6eb9a3721c4bd03f5ea1b88d7a74dc8246000f7
|
[] |
no_license
|
ExtraSpace/openfootballr
|
a8695abc25daf8a1c549cf4103d628aa5b295fc1
|
1d5e1fb2b43a29f9f68fced9a94af003cc53fe80
|
refs/heads/master
| 2020-03-21T04:05:30.463535
| 2018-06-20T22:00:28
| 2018-06-20T22:00:28
| 138,089,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 408
|
rd
|
make_request.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/football.R
\name{make_request}
\alias{make_request}
\title{Make a url request using the header}
\usage{
make_request(url, header)
}
\arguments{
\item{url}{A character of api url to make request}
\item{header}{The header to use to request the url}
}
\value{
A response object
}
\description{
Make a url request using the header
}
|
11079f021df702d82e6ce6a4ec993f03ec011d9a
|
b7d1239a7708bcd011f712c4119cf907816fc6df
|
/Midterm/generate-pictures-for-midterm-2-1-2018.R
|
13939ccc6294c6b5a1409b0c3e9560168ac48cc1
|
[] |
no_license
|
wstuetzle/STAT180
|
45513e05576833ca9dd62ca732c9ffd33c6a8ee6
|
5a9ab7ab5cb63ed502181229a4fa58499c396f89
|
refs/heads/master
| 2021-05-13T12:47:00.509865
| 2019-02-20T20:11:20
| 2019-02-20T20:11:20
| 116,683,798
| 1
| 2
| null | 2019-05-29T22:40:01
| 2018-01-08T14:01:23
|
HTML
|
UTF-8
|
R
| false
| false
| 1,221
|
r
|
generate-pictures-for-midterm-2-1-2018.R
|
## Generate pictures for STAT180 midterm (2-1-2018)
## ================================================
n <- 200
x <- 2 * pi * runif(n)
y <- sin(x) + 0.3 * rnorm(n)
jpeg("sin-wave.jpg")
plot(x, y, pch = 20, xlab = "X", ylab = "Y")
dev.off()
n <- 1000
count <- 0
outer <- 1
inner <- 0.7
X = matrix(0, nrow = 500, ncol = 2)
for (i in 1:n) {
x = runif(2, -1, 1)
rad <- sqrt(sum(x^2))
if ((rad <= outer) & (rad >= inner)) {
count <- count + 1
X[count, ] <- x
}
}
jpeg("annulus.jpg")
plot(X[1:count, ], pch = 20, xlab = "X", ylab = "Y")
dev.off()
count
##-----------------------------------------------------------------
## generate pictures for solution
n <- 200
x <- 2 * pi * runif(n)
y <- sin(x) + 0.3 * rnorm(n)
jpeg("new-sin-wave-solution.jpg")
plot(x, y, pch = 20, xlab = "X", ylab = "Y")
lines(sort(x), sin(sort(x)), lwd = 3)
dev.off()
n <- 1000
count <- 0
outer <- 1
inner <- 0.7
X = matrix(0, nrow = 500, ncol = 2)
for (i in 1:n) {
x = runif(2, -1, 1)
rad <- sqrt(sum(x^2))
if ((rad <= outer) & (rad >= inner)) {
count <- count + 1
X[count, ] <- x
}
}
jpeg("new-annulus-solution.jpg")
plot(X[1:count, ], pch = 20, xlab = "X", ylab = "Y")
abline(h = 0, lwd = 3)
dev.off()
count
|
83dd4042d422f35802f9c1ce46c0f23beb2ddb86
|
93847072348289fec5ec39ac260ebd70f1642cb3
|
/app.R
|
2bbc18e6a826cd4c89d0f713de1a7e1cf9a98004
|
[] |
no_license
|
RockfordMankini/R6_Pack_Probabilties
|
c4a3eaa0a57be142e72401b8f82eca3ae08d5000
|
5ad8ef13ea58e4f4ae44c59592e09dcd47ec1bc1
|
refs/heads/master
| 2023-04-03T09:04:56.163953
| 2021-03-29T18:02:58
| 2021-03-29T18:02:58
| 352,737,308
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,802
|
r
|
app.R
|
library(shiny)
library(tidyverse)
library(ggplot2)
library(plotly)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Rainbow Six Alpha Pack Simulator"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("winRate",
"Player win rate:",
min = 0.01,
max = 1,
value = .5),
numericInput("packs", label = h3("Amount of Packs"), value = 1000, min=1, max=100000)
),
# Show a plot of the generated distribution
mainPanel(
plotlyOutput("gamePlot"),
plotlyOutput("probPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
simulatePacks <- function(winRate, packs) {
games <- rep(NA, packs)
probs <- rep(NA, packs)
for(i in 1:packs) {
prob <- .02
game <- 0
loopOver <- FALSE
while (!loopOver) {
game <- game + 1
gameResult <- sample(c(1:2), size = 1, prob = c(winRate, 1-winRate))
if(gameResult == 1) {
prob <- prob + .02
}
else {
prob <- prob + .015
next
}
if(prob >= 1) {
gameResult <- 1
}
else {
gameResult <- sample(c(1:2), size = 1, prob = c(prob, 1-prob))
}
if(gameResult == 1) {
loopOver <- TRUE
games[i] <- game
probs[i] <- prob
}
}
}
data.frame(Ob=1:packs, games=games, prob=probs)
}
getStats <- function(df) {
mean_games <- mean(df$games)
mean_prob <- mean(df$prob)
median(df$games)
median(df$prob)
games_plot <- df %>%
ggplot(aes(x=games)) +
geom_histogram(aes(y=..density..), fill="gold", color="black", binwidth = 2) +
geom_density() +
theme_minimal() +
xlab("Games Played") +
ylab("Probability Density") +
labs(title = paste("Alpha Packs (", nrow(df), " packs simulated)", sep="")) +
geom_vline(xintercept = mean_games, color = "red")
games_plot <- ggplotly(games_plot)
games_plot
length(unique(df$prob))
probs_plot <- ggplot(df, aes(prob)) +
geom_histogram(aes(y=..density..), fill="gold", color="black", binwidth = .05) +
geom_density() +
theme_minimal() +
xlab("Probability At Success") +
ylab("Probability Density") +
labs(title = paste("Alpha Packs (", nrow(df), " packs simulated)", sep="")) +
geom_vline(xintercept = mean_prob, color = "red")
probs_plot <- ggplotly(probs_plot)
probs_plot
list(games_plot, probs_plot)
}
data <- reactive({
input$winRate
df <- simulatePacks(input$winRate, input$packs)
getStats(df)
})
output$gamePlot <- renderPlotly({
data()[[1]]
})
output$probPlot <- renderPlotly({
data()[[2]]
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
87f45853083ae04a1e523f76770116e29bd55843
|
b946c489e67a65008de77a6a8da93f4d398e3f33
|
/man/bspl.Rd
|
be875522500f5a96bd9c51f4042cdbd97fa09862
|
[] |
no_license
|
EPauthenet/fda.oce
|
6380852e8998a0b956b46acc044c71717f5e1859
|
38c6b1d2dd6a4645235f7ec8db2dda7caad56c65
|
refs/heads/master
| 2022-12-10T14:37:26.095737
| 2021-02-01T13:14:00
| 2021-02-01T13:14:00
| 138,511,460
| 4
| 3
| null | 2022-12-07T02:45:42
| 2018-06-24T19:58:42
|
R
|
UTF-8
|
R
| false
| true
| 1,560
|
rd
|
bspl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bspl.R
\name{bspl}
\alias{bspl}
\title{B-spline fits on Multivariate Hydrographic Profiles}
\usage{
bspl(Pi, Xi, nbas = 20, fdn = list("Temperature", "Salinity"))
}
\arguments{
\item{Pi}{vector containing the levels}
\item{Xi}{array containing the profiles stored in this order \code{levels} x \code{stations} x \code{variables}}
\item{nbas}{number of Bsplines (coefficients), by default nbas = 20.}
\item{fdn}{a list of the variable names, by default fdn = list('Temperature','Salinity').}
}
\value{
\code{fdobj} : fd objects of the splines construction containing coefficients, basis etc... The coefficients are stored in an array \code{nbasis} x \code{stations} x \code{variables}
}
\description{
This function fits B-splines on multivariate hydrographic profiles and return a functional data object.
}
\references{
Pauthenet et al. (2017) A linear decomposition of the Southern Ocean thermohaline structure. Journal of Physical Oceanography, http://dx.doi.org/10.1175/JPO-D-16-0083.1
Ramsay, J. O., and B. W. Silverman, 2005: Functional Data Analysis. 2nd Edition Springer, 426 pp., Isbn : 038740080X.
}
\seealso{
\code{\link{fpca}} for functional principal component analysis of T-S profiles, \code{\link{proj}} for computing Principal Components, \code{\link{reco}} for reconstructing profiles with less modes.
}
\author{
Etienne Pauthenet \email{<etienne.pauthenet@gmail.com>}, David Nerini \code{<david.nerini@univ-amu.fr>}, Fabien Roquet \code{<fabien.roquet@gu.se>}
}
|
315867565e4540e04004ccf23f4ebeded6ebd1a4
|
b911d7880552de34e496b43db7672eade7821891
|
/man/plinks.Rd
|
fbafd60e3594fd5aa969c43668511a67e99087bb
|
[] |
no_license
|
cran/glmx
|
5c158017436bbd6211211d7a66da02a909be7dfb
|
d12629f9b7211c0470e640d80ff10db0dfb60052
|
refs/heads/master
| 2023-04-09T15:07:18.390260
| 2023-03-27T07:10:02
| 2023-03-27T07:10:02
| 17,696,446
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,078
|
rd
|
plinks.Rd
|
\name{plinks}
\alias{plinks}
\alias{talpha}
\alias{ao1}
\alias{ao2}
\alias{gj}
\alias{angular}
\alias{foldexp}
\alias{rocke}
\alias{nblogit}
\alias{gosset}
\alias{pregibon}
\alias{loglog}
\concept{parametric link}
\concept{transformation}
\title{Parametric Links for Binomial Generalized Linear Models}
\description{
Various symmetric and asymmetric parametric links for use as
link function for binomial generalized linear models.
}
\usage{
gj(phi, verbose = FALSE)
foldexp(phi, verbose = FALSE)
ao1(phi, verbose = FALSE)
ao2(phi, verbose = FALSE)
talpha(alpha, verbose = FALSE, splineinv = TRUE,
eps = 2 * .Machine$double.eps, maxit = 100)
rocke(shape1, shape2, verbose = FALSE)
gosset(nu, verbose = FALSE)
pregibon(a, b)
nblogit(theta)
angular(verbose = FALSE)
loglog()
}
\arguments{
\item{phi, a, b}{numeric.}
\item{alpha}{numeric. Parameter in \eqn{[0,2]}{[0,2]}.}
\item{shape1, shape2, nu, theta}{numeric. Non-negative parameter.}
\item{splineinv}{logical. Should a (quick and dirty) spline function be used
for computing the inverse link function? Alternatively, a more precise but somewhat
slower Newton algorithm is used.}
\item{eps}{numeric. Desired convergence tolerance for Newton algorithm.}
\item{maxit}{integer. Maximal number of steps for Newton algorithm.}
\item{verbose}{logical. Should warnings about numerical issues be printed?}
}
\details{
Symmetric and asymmetric families parametric link functions are available.
Many families contain the logit for some value(s) of their parameter(s).
The symmetric Aranda-Ordaz (1981) transformation
\deqn{y = \frac{2}{\phi}\frac{x^\phi-(1-x)^\phi}{x^\phi+(1-x)^\phi}}{y = \tfrac{2}{\phi}\tfrac{x^\phi-(1-x)^\phi}{x^\phi+(1-x)^\phi}}
and the asymmetric Aranda-Ordaz (1981) transformation
\deqn{y = \log([(1-x)^{-\phi}-1]/\phi)}{y = \log([(1-x)^{-\phi}-1]/\phi)}
both contain the logit for \eqn{\phi = 0}{\phi = 0} and
\eqn{\phi = 1}{\phi = 1} respectively, where the latter also includes the
complementary log-log for \eqn{\phi = 0}{\phi = 0}.
The Pregibon (1980) two parameter family is the link given by
\deqn{y = \frac{x^{a-b}-1}{a-b}-\frac{(1-x)^{a+b}-1}{a+b}.}
For \eqn{a = b = 0} it is the logit. For \eqn{b = 0} it is symmetric and
\eqn{b} controls the skewness; the heavyness of the tails is controlled by
\eqn{a}. The implementation uses the generalized lambda distribution
\code{\link{gl}}.
The Guerrero-Johnson (1982) family
\deqn{y = \frac{1}{\phi}\left(\left[\frac{x}{1-x}\right]^\phi-1\right)}{y = \frac{1}{\phi}\left(\left[\frac{x}{1-x}\right]^\phi-1\right)}
is symmetric and contains the logit for \eqn{\phi = 0}{\phi = 0}.
The Rocke (1993) family of links is, modulo a linear transformation, the
cumulative density function of the Beta distribution. If both parameters are
set to \eqn{0} the logit link is obtained. If both parameters equal
\eqn{0.5} the Rocke link is, modulo a linear transformation, identical to the
angular transformation. Also for \code{shape1} = \code{shape2} \eqn{= 1}, the
identity link is obtained. Note that the family can be used as a one and a two
parameter family.
The folded exponential family (Piepho, 2003) is symmetric and given by
\deqn{y = \left\{\begin{array}{ll}
\frac{\exp(\phi x)-\exp(\phi(1-x))}{2\phi} &(\phi \neq 0) \\
x- \frac{1}{2} &(\phi = 0)
\end{array}\right.}
The \eqn{t_\alpha} family (Doebler, Holling & Boehning, 2011) given by
\deqn{y = \alpha\log(x)-(2-\alpha)\log(1-x)}{y = \alpha\log(x)-(2-\alpha)\log(1-x)}
is asymmetric and contains the logit for \eqn{\phi = 1}{\phi = 1}.
The Gosset family of links is given by the inverse of the cumulative
distribution function of the t-distribution. The degrees of freedom \eqn{\nu}
control the heavyness of the tails and is restricted to values \eqn{>0}. For
\eqn{\nu = 1} the Cauchy link is obtained and for \eqn{\nu \to \infty} the link
converges to the probit. The implementation builds on \code{\link{qf}} and is
reliable for \eqn{\nu \geq 0.2}. Liu (2004) reports that the Gosset link
approximates the logit well for \eqn{\nu = 7}.
Also the (parameterless) angular (arcsine) transformation
\eqn{y = \arcsin(\sqrt{x})}{y = \arcsin(\sqrt{x})} is available as a link
function.
}
\value{
An object of the class \code{link-glm}, see the documentation of \code{\link{make.link}}.
}
\references{
Aranda-Ordaz F (1981). \dQuote{On Two Families of Transformations to Additivity for Binary Response Data.}
\emph{Biometrika}, \bold{68}, 357--363.
Doebler P, Holling H, Boehning D (2012). \dQuote{A Mixed Model Approach to Meta-Analysis of Diagnostic Studies with Binary Test Outcome.}
\emph{Psychological Methods}, \bold{17}(3), 418--436.
Guerrero V, Johnson R (1982). \dQuote{Use of the Box-Cox Transformation with Binary Response Models.}
\emph{Biometrika}, \bold{69}, 309--314.
Koenker R (2006). \dQuote{Parametric Links for Binary Response.}
\emph{R News}, \bold{6}(4), 32--34.
Koenker R, Yoon J (2009). \dQuote{Parametric Links for Binary Choice Models: A Fisherian-Bayesian Colloquy.}
\emph{Journal of Econometrics}, \bold{152}, 120--130.
Liu C (2004). \dQuote{Robit Regression: A Simple Robust Alternative to Logistic and Probit Regression.}
In Gelman A, Meng X-L (Eds.),
\emph{Applied Bayesian Modeling and Causal Inference from Incomplete-Data Perspectives}, Chapter 21,
pp. 227--238. John Wiley & Sons.
Piepho H (2003). The Folded Exponential Transformation for Proportions.
\emph{Journal of the Royal Statistical Society D}, \bold{52}, 575--589.
Pregibon D (1980). \dQuote{Goodness of Link Tests for Generalized Linear Models.}
\emph{Journal of the Royal Statistical Society C}, \bold{29}, 15--23.
Rocke DM (1993). \dQuote{On the Beta Transformation Family.}
\emph{Technometrics}, \bold{35}, 73--81.
}
\seealso{\code{\link{make.link}}, \code{\link{family}}, \code{\link{glmx}}, \code{\link{WECO}}}
\keyword{regression}
|
05f83967763fca826dab30ac9da40e1bba7f3bc7
|
f4c5459aa76bc672d9b0919662f8f279df176578
|
/tests/testthat/test_matrix_like_ops.R
|
22839eb145950e7698849865fdcc9fa75ba76c0c
|
[
"MIT"
] |
permissive
|
wvqusrai/rTorch
|
204f306d91220c1972fb0ab895605470508bb32a
|
0010f675b7c086a03d0ead38d883f752cf159e80
|
refs/heads/main
| 2023-08-31T09:13:07.842617
| 2021-10-23T09:44:19
| 2021-10-23T09:44:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,420
|
r
|
test_matrix_like_ops.R
|
library(testthat)
source("helper_utils.R")
skip_if_no_torch()
# matrix like tensor operations ------------------------------------------------
context("matrix like tensor operations")
test_that("Dot product of 2 tensors", {
# Dot product of 2 tensors
# direct operation with torch
r = torch$dot(torch$Tensor(list(4L, 2L)), torch$Tensor(list(3L, 1L)))
result <- r$item()
expect_equal(result, 14)
# using an R function and list
r <- tensor_dot(torch$Tensor(list(4L, 2L)), torch$Tensor(list(3L, 1L)))
result <- r$item()
expect_equal(result, 14)
# using an R function and vector
r <- tensor_dot(torch$Tensor(c(4L, 2L)), torch$Tensor(c(3L, 1L)))
result <- r$item()
expect_equal(result, 14)
r <- tensor_dot(torch$Tensor(c(4, 2)), torch$Tensor(c(3, 1)))
result <- r$item()
expect_equal(result, 14)
})
test_that("Cross product", {
# loginfo("Cross product")
m1 = torch$ones(3L, 5L)
m2 = torch$ones(3L, 5L)
# Cross product
# Size 3x5
r = torch$cross(m1, m2)
expect_equal(tensor_dim(r), c(3, 5))
})
test_that("multiply tensor by scalar", {
# loginfo("\n Multiply tensor by scalar")
tensor = torch$ones(4L, dtype=torch$float64)
scalar = np$float64(4.321)
# print(torch$scalar_tensor(scalar))
prod = torch$mul(tensor, torch$scalar_tensor(scalar))
expect_equal(prod$numpy(), array(c(4.321, 4.321, 4.321, 4.321)), tolerance = 1e-7)
# print(class(prod$numpy()))
})
|
c2b81b40b37732809ad5a66bc73d0c9521875995
|
f34a7d3a93744c6f2e2a4669612b0e6ad43e151b
|
/cachematrix.R
|
2c25573e3465ec0932d2e21a97cb10000f3baa4a
|
[] |
no_license
|
PhiPrime/ProgrammingAssignment2
|
55a393df80fe0c75342d29b5c18c0938aa49ff94
|
059bcbf0306f7ef8caeb94ffaaa6d49e457ab382
|
refs/heads/master
| 2021-01-03T07:58:45.356956
| 2020-02-16T22:01:13
| 2020-02-16T22:01:13
| 239,991,205
| 0
| 0
| null | 2020-02-12T10:59:40
| 2020-02-12T10:59:39
| null |
UTF-8
|
R
| false
| false
| 1,081
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix returns a list containing a function to:
#1 set the value of the matrix
#2 get the value of the matrix
#3 set the value of the inverse
#4 get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function () x
setInv <- function (inv) i <<- inv
getInv <- function() i
list (set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## cacheSolve calculates the inverse of the "matrix"
## First it checks if the inv. has already been calculated.
## If it has, it will skip additional computations
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInv()
if(!is.null(i)) {
message("getting cached data")
return(i)#Since Inv was already found return value of `i`
}
data <- x$get()
i <- solve(data, ...)#getInv() was null, so it is solved now
x$setInv(i)#value is saved to "matrix"
i
}
|
5d5ee3c22fbec3dde804e2f09ef5c8f5dabe8e50
|
2b86cbfbbbea042945eb923b9e27773758e404ee
|
/scripts/3_analysis_of_results/Probalistic_HD.R
|
e910d6cd0ff97164bc01593c4d5bf9543e8d1e11
|
[] |
no_license
|
avouacr/mobicountR
|
f83b2aa0c79a5d2d92bd7865586e62d2bcf3d8ba
|
61d5fd3834c407d1ba8892ca8d6b6c38b37cdb61
|
refs/heads/master
| 2022-04-08T19:39:43.470718
| 2020-02-21T15:16:28
| 2020-02-21T15:16:28
| 188,848,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,064
|
r
|
Probalistic_HD.R
|
# -----------------------------------------------------------------------------
# --- Voronoi_to_grid with priors from BD Topo
# -----------------------------------------------------------------------------
# Libraries declaration -------------------------------------------------------
install.packages("pryr")
install.packages("packages/minior-master.tar.gz", repos = NULL)
library(minior)
library(tidyverse)
library(data.table)
library(sf)
library(leaflet)
library(ggplot2)
minio_bucket <- "etgtk6"
# # Count people at tile level and export results to Minio ----------------------
#
# # Import probabilistic home detection results
#
# save_minio_file(file_path = "MobiCount/home_detection/PHD_bdtopo_france_09.csv",
# bucket_name = minio_bucket,
# ext_path = "~/")
#
# PHD_bdtopo <- fread("~/PHD_bdtopo_france_09.csv",
# col.names = c("month", "caller_id", "grid_id", "proba"))
#
# PHD_bdtopo <- PHD_bdtopo[, 'month' := NULL]
#
# PHD_bdtopo[,uniqueN(caller_id)] # 16,6 millions people home detected
#
# # Normalize each caller's probabilities so that they sum to one
#
# PHD <- PHD_bdtopo[, .(proba_norm = proba / sum(proba)), by = .(caller_id)]
# PHD <- PHD[order(caller_id, proba_norm)]
# PHD_grid <- PHD_bdtopo[order(caller_id, proba)]
# PHD <- PHD[, grid_id := PHD_grid[grid_id]]
#
# # Aggregate at tile level
#
# counts <- PHD[, .(n = sum(proba_norm)), by = .(grid_id)]
#
# # Export counts to minio
#
# write_csv(counts, "~/counts_PHD_bdtopo_france_09.csv")
# put_minio_file("~/counts_PHD_bdtopo_france_09.csv",
# "MobiCount/counts/counts_PHD_bdtopo_france_09.csv",
# "etgtk6")
# Import inputs ---------------------------------------------------------------
# Counts at tile level from PHD with BD Topo prior
save_minio_file("MobiCount/counts/counts_PHD_bdtopo_france_09.csv",
minio_bucket,
ext_path = "~")
counts <- read_csv("~/counts_PHD_bdtopo_france_09.csv")
# France grid
lapply(files_in_bucket(minio_bucket, "MobiCount/grids/grid_500_france"),
function(x){
save_minio_file(x,
bucket_name = minio_bucket,
ext_path = "~/grid_france")
})
grid <- st_read("~/grid_france", crs = 2154)
grid$grid_id <- as.character(grid$grid_id)
# Voronoi tesselation
lapply(files_in_bucket(minio_bucket, "MobiCount/shp/voronoi"),
function(x){
save_minio_file(x,
bucket_name = minio_bucket,
ext_path = "~/voronoi")
})
voronoi <- st_read("~/voronoi", crs = 2154)
# Table of intersections between grid and voronoi tesselation
save_minio_file("MobiCount/grids/grid_inter_voronoi_france.csv",
minio_bucket,
ext_path = "~")
table_inter <- read_csv("~/grid_inter_voronoi_france.csv")
# RFL counts at tile level
rfl <- read_csv("~/rfl_counts.csv")
# France departments shapefiles
lapply(files_in_bucket(minio_bucket, "MobiCount/shp/departements_fr"),
function(x){
save_minio_file(x,
bucket_name = minio_bucket,
ext_path = "~/shp_dep")
})
dep_shp <- st_read("~/shp_dep/departements-20180101.shp") %>%
st_transform(2154)
# Prepare data for comparison -------------------------------------------------
df_compar_geo <- grid %>%
left_join(counts) %>%
rename(n_bdtopo = n) %>%
left_join(rfl) %>%
rename(n_rfl = nbpersm) %>%
mutate(n_bdtopo = replace_na(n_bdtopo, 0),
n_rfl = replace_na(n_rfl, 0))
df_plot_rfl <- df_compar_geo %>%
filter(n_rfl != 0) %>%
mutate(log_n_rfl = log(n_rfl)) %>%
select(-n_bdtopo)
df_plot_bdtopo <- df_compar_geo %>%
filter(n_bdtopo != 0) %>%
mutate(log_n_bdtopo = log(n_bdtopo)) %>%
select(-n_rfl)
# Localized analysis ----------------------------------------------------------
compar_dep <- function(df_compar_geo, dep_shp, num_dep) {
# Filter comparison table for specified department
dep_shp <- dep_shp %>%
filter(code_insee == as.character(num_dep))
vec_inter <- sapply(st_intersects(df_compar_geo, dep_shp), length)
df_compar_sub <- df_compar_geo[vec_inter == 1,]
df_compar_sub_ng <- df_compar_sub %>% st_set_geometry(NULL)
# Compute correlation between PHD with BD Topo and RFL
cor <- cor(df_compar_sub_ng$n_bdtopo, df_compar_sub_ng$n_rfl)
# Compare densities
df_compar_sub_ng %>%
gather(source, "n", n_bdtopo:n_rfl) %>%
filter(n >= 1) %>%
mutate(n = round(n),
log_n = log(n)) %>%
ggplot(aes(x = log_n, colour = source, fill = source)) +
geom_density(alpha = 0.1)
#
}
# Compare densities of (log) population counts at France level
df_compar_ng %>%
gather(source, "n", n_bdtopo:n_rfl) %>%
filter(n >= 1) %>%
mutate(n = round(n),
#n = replace(n, n==0, 1),
log_n = log(n)) %>%
ggplot(aes(x = log_n, colour = source, fill = source)) +
geom_density(alpha = 0.1) +
theme_bw()
|
4cceb201243758b5f274eee5aa5facf820f98f28
|
30eb33b9f7963b89c4891738f74f983a1ee0ff68
|
/man/cloud_summary.Rd
|
83e6fc8ff578d8f2ade17b1b55d78f9d18fa04ed
|
[
"MIT"
] |
permissive
|
cderv/revdepcheck
|
385d3650b394720705e8105c50ebd9260255a28c
|
1ba55f7d592091ea529e31be99dbde0d822d6dd1
|
refs/heads/main
| 2023-08-27T15:00:44.742575
| 2021-10-21T20:37:31
| 2021-10-27T13:52:55
| 422,241,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 768
|
rd
|
cloud_summary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloud.R
\name{cloud_summary}
\alias{cloud_summary}
\title{Display revdep results}
\usage{
cloud_summary(job_name = cloud_job(pkg = pkg), pkg = ".")
}
\arguments{
\item{job_name}{The job name, as returned by \code{\link[=cloud_check]{cloud_check()}}.}
\item{pkg}{Path to package.}
}
\description{
Displays nicely formatted results of processed packages run in the cloud.
}
\seealso{
Other cloud:
\code{\link{cloud_broken}()},
\code{\link{cloud_cancel}()},
\code{\link{cloud_check}()},
\code{\link{cloud_details}()},
\code{\link{cloud_fetch_results}()},
\code{\link{cloud_plot}()},
\code{\link{cloud_report}()},
\code{\link{cloud_results}()},
\code{\link{cloud_status}()}
}
\concept{cloud}
|
255d3b0e529bf0809f0023a9580c074cad0c5322
|
09b7b891feae6b40c917cb7bbf506dfb5082f83f
|
/scripts/Figure4.R
|
8b52170841113d321728a272a237070fe68e4e1f
|
[] |
no_license
|
craigbrinkerhoff/2020_headwaterCO2_CT
|
2228946db6fb56b710fd1a50ce8fdca1fb31734f
|
b4207e9c512471aac87e7c3ccadd90011cf0572c
|
refs/heads/main
| 2023-01-01T18:10:10.589343
| 2020-10-26T20:27:37
| 2020-10-26T20:27:37
| 307,469,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,291
|
r
|
Figure4.R
|
#Creator: Craig Brinkerhoff
#Date: Summer 2020
#Description: Create Figure 4
#NOTE: there are many explicit file paths throughout this script that you will need to set manually in order to run
#This script also relies a bunch on external datasets you will need to download and map to yourself. Check the manuscript for where to access and download.
library(tidyverse)
library(cowplot)
library(RColorBrewer)
library(colorspace)
library(grid)
library(gridExtra)
theme_set(theme_cowplot())
rm(list = ls())
#Set working directory with results
setwd('~\\results')
#Load in results
lakes <- read.csv('lakes_results.csv')
sumEvaded_by_order <- read.csv('evasion_by_order_MA.csv')
colnames(sumEvaded_by_order) <- c('StreamOrde', 'MA_evaded')
sa_by_order <- read.csv('wetted_sa_by_order.csv')
sa_by_order <- select(sa_by_order, c('StreamOrde', 'MA'))
sumEvaded_by_order <- left_join(sumEvaded_by_order, sa_by_order, by='StreamOrde')
#Create subplots
#Surface area vs evasion efficiency
bin <- 20
lakesPlot_area <- ggplot(lakes, aes(x=relEvasion*100, fill=factor(lakeAreaQuants))) +
geom_histogram(size=0.75, binwidth=bin, color='black', position=position_dodge(bin-.4*(bin), preserve = 'total')) +
scale_fill_brewer(palette='YlGnBu', name=paste0('Surface Area \nQuantiles [km\u00b2]'),
labels=c('0-0.001', '0.001-0.002', '0.002-97' ))+
xlab('CO2 Evasion Efficiency [%]') +
ylab("") +
scale_x_continuous(limits=c(-10, 110), breaks = seq(0,110,25)) +
#scale_y_log10()+
theme(legend.position = c(0.08, 0.85),
axis.text=element_text(size=20),
axis.title=element_text(size=24,face="bold"),
legend.text = element_text(size=17),
legend.title = element_text(size=17, face='bold'))+
ylim(0,4600)
#HRT vs evasion efficiency
lakesPlot_hrt <- ggplot(lakes, aes(x=relEvasion*100, fill=factor(lakeHRTQuants))) +
geom_histogram(size=0.75, binwidth=bin, color='black', position=position_dodge(bin-.4*(bin), preserve = 'total')) +
scale_fill_brewer(palette='YlGnBu', name=paste0('Residence Time \nQuantiles [dys]'),
labels=c('0-2', '2-7', '7-9,495'))+
xlab('CO2 Evasion Efficiency [%]') +
ylab("Lake Count") +
# scale_y_log10()+
theme(legend.position = c(0.08, 0.85),
axis.text=element_text(size=20),
axis.title=element_text(size=24,face="bold"),
legend.text = element_text(size=17),
legend.title = element_text(size=17, face='bold')) +
scale_x_continuous(limits=c(-10, 110), breaks = seq(0,110,25)) +
ylim(0,4600)
#total evasion by order
plot2 <- ggplot(sumEvaded_by_order, aes(x=factor(StreamOrde), y=MA_evaded/MA)) +
geom_point(color='black', fill = '#66c2a5', shape=23, size=10, stroke=2) +
xlab('Stream Order') +
scale_y_log10(name = "Normalized Total Evasion \n [mg/L*km\u00b2]") +
theme(legend.position = c(0.08, 0.85),
axis.text=element_text(size=20),
axis.title=element_text(size=24,face="bold"),
legend.text = element_text(size=17),
legend.title = element_text(size=17, face='bold'))
#Make figure
grid <- plot_grid(lakesPlot_hrt, lakesPlot_area, labels="auto", ncol = 2, label_size = 26)
grid <- plot_grid(grid, plot2, ncol=1, labels = c(NA, 'c'), label_size = 26)
ggsave('Fig4.jpg', path='~Figures\\', width=13, height=12)
|
663577fcce51a04d37380cb9f80d448fd4fb753a
|
252c4d47c1da5053ac911b97a635e4e3ae261186
|
/man/pvssg.Rd
|
2b24efff820272f922a3265fe5a78f60b8266c91
|
[] |
no_license
|
cipriuhq/pepa
|
dd987d68bb530658f3ab15845ae4127e86b2933e
|
3faced8e00e329347db55a7e141cfd9d14617a01
|
refs/heads/master
| 2021-06-18T17:52:20.724587
| 2017-06-26T16:32:50
| 2017-06-26T16:32:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 371
|
rd
|
pvssg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_pvs.R
\docType{data}
\name{pvssg}
\alias{pvssg}
\title{PVS summary global}
\format{A data frame with 163 columns and 6 rows.}
\source{
International Potato Center, potato data.
}
\usage{
pvssg
}
\description{
This data set has summaries (means, standard deviations) for several traits.
}
|
2ba1a1a32b136783e19c925b6471163fe09ab89d
|
d6b5625d77f30dcca110516748f7f076ee15e5c4
|
/R/package-setup.R
|
874be2cddc97f838a48b09c19163c3f23f5c3d6a
|
[
"MIT"
] |
permissive
|
JosiahParry/proj-template-example
|
2829adc95f2cadb558d41ec20d7501fb3cd539b0
|
17e24cd60f0acc7ad55aa9cf6c55af1f25fe8f8f
|
refs/heads/master
| 2020-12-27T22:33:22.950920
| 2020-02-03T23:59:02
| 2020-02-03T23:59:02
| 238,086,952
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
package-setup.R
|
library(usethis)
# after creating package get started!
use_mit_license("Pkg Creator")
use_readme_rmd()
use_news_md()
# - R functions `R/`
use_r("gs4_skeleton")
|
02463471400513ff0ec2b53cf4eb458de35a3ca2
|
627a50c426830203637f5ae8215a320c0a0aa4eb
|
/R/plot.GGInetwork.R
|
b30dd8fab6c001d603b66837ead70f729f362039
|
[] |
no_license
|
MathieuEmily/GeneGeneInteR
|
5f46a432d6760856ad79fc6c12cd612f31a4a7ac
|
60b96b60d686a0c290edad4646484027706ee81d
|
refs/heads/master
| 2021-05-24T01:49:18.126390
| 2020-11-27T21:22:07
| 2020-11-27T21:22:07
| 183,239,562
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,748
|
r
|
plot.GGInetwork.R
|
plot.GGInetwork <- function(x,method=c("heatmap","network"), threshold=NULL, col=c("#D6604D", "#104E8B"), colbar.width=0.15, title=NULL, hclust.order=FALSE, use.log=FALSE, NA.col="#D3D3D3", draw.pvals=NULL, draw.names=NULL, interact=FALSE, method.adjust=c("none","holm","hochberg","hommel","Bonferroni","BH","BY","fdr"), genes=seq_len(ncol(x$p.value)), plot.nointer=TRUE, ...){
if(!is(x,"GGInetwork")) {
stop("x should be an object of class GGInetwork.")
}
method <- match.arg(method)
switch(method,heatmap=GGI.plot(GGI=x$p.value, genes=genes, col=col, colbar.width=colbar.width, title=title, hclust.order=hclust.order, use.log=use.log, threshold=threshold, NA.col=NA.col, draw.pvals=draw.pvals, draw.names=draw.names, interact=interact, method.adjust=method.adjust),network=draw.network(GGI=x$p.value, genes=genes, threshold=threshold, plot.nointer=plot.nointer))
}
GGI.plot <- function(GGI,genes=seq_len(ncol(GGI)),col=c("#D6604D", "#104E8B"), colbar.width=0.15,
title=NULL, hclust.order=FALSE, use.log=FALSE,
threshold=NULL, NA.col="#D3D3D3",
draw.pvals=NULL, draw.names=NULL,
interact=!(draw.pvals && draw.names),method.adjust=c("none","holm","hochberg","hommel","Bonferroni","BH","BY","fdr")) {
if(!is.matrix(GGI) && !is.numeric(GGI[1, 1])) {
stop("GGI argument should be a numeric matrix.")
} else if (ncol(GGI) != nrow(GGI)) {
stop("GGI argument should a symmetric matrix.")
} else if (ncol(GGI) < 3) {
stop("At least 3 genes must be provided.")
} else if (!is.character(col)) {
stop("col argument should be a character vector.")
} else if (!is.numeric(colbar.width) || colbar.width < 0) {
stop("colbar.width argument should be a positive numeric")
} else if (!is.character(title) && !is.null(title)) {
stop("title argument should be a string.")
} else if ((!is.logical(draw.pvals) & !is.null(draw.pvals)) | (!is.logical(draw.names) & !is.null(draw.names))) {
stop("show.pvals & draw.names arguments should be logical.")
} else if (!is.logical(hclust.order)) {
stop("hclust.order argument should be logical.")
} else if (!is.logical(use.log)) {
stop("use.log argument should be logical")
} else if (!is.logical(interact)) {
stop("interact argument should be logical")
} else if (!is.null(threshold) && is.numeric(threshold) && (threshold > 1 || threshold < 0)) {
stop("threshold argument can not be a numeric greater than 1 or lesser than 0.")
} else if (!is.null(threshold) && is.character(threshold) && threshold != "R") {
stop("threshold argument can not be any other string than 'R'.")
} else if (!is.character(NA.col)) {
stop("NA.col argument should be a character.")
}
if(is.character(genes) && any(!genes%in%colnames(GGI))){
stop("Genes and GGI don't match. Please select genes that are named in GGI.")
}
GGI <- GGI[genes,genes]
if (is.null(draw.pvals)){
draw.pvals <- (ncol(GGI) <= 15)
}
if (ncol(GGI) > 15){
draw.pvals <- FALSE
}
if (is.null(draw.names)){
draw.names <- (ncol(GGI) <= 25)
}
if (ncol(GGI) > 25){
draw.names <- FALSE
}
method.adjust <- match.arg(method.adjust)
GGI[lower.tri(GGI)] <- p.adjust(GGI[lower.tri(GGI)],method=method.adjust)
GGI[upper.tri(GGI)] <- p.adjust(GGI[upper.tri(GGI)],method=method.adjust)
R.thresh <- c(0.001, 0.01, 0.05, 0.1)
# If only one color is parsed, white is
# used to complete the scale
if (length(col) < 2) {
col <- c(col, "#FFFFFF")
}
if (use.log){
GGI <- -log10(GGI)
diag(GGI) <- min(GGI[row(GGI) != col(GGI)])
col <- rev(col)
if (!is.null(threshold)) threshold <- -log10(threshold);
R.thresh <- -log10(R.thresh)
}
col.FUN <- grDevices::colorRampPalette(col)
# Names checking (generated if none)
if (is.null(dimnames(GGI))){
genes.names <- paste("Gene", seq_len(ncol(GGI)), sep=".")
dimnames(GGI) <- list(genes.names, genes.names)
}
# Clustering
if (hclust.order) {
GGI.clust <- hclust(as.dist(GGI))
GGI <- GGI[GGI.clust$order, GGI.clust$order]
}
# Calculating plot size
plot.setup(GGI, colbar.width, draw.names, threshold)
# Draw color map
rect.pos <- draw.matrix(GGI, col.FUN, threshold, NA.col, R.thresh, use.log)
# Draw color legend bar
leg <- draw.colbar(GGI, col.FUN, colbar.width, threshold, R.thresh, NA.col, use.log)
if (!is.null(leg)) leg <- leg$rect
# Draw genes names
if (draw.names && ncol(GGI) <= 25) {
draw.genes.names(dimnames(GGI), rect.pos)
} else if (draw.names) {
warning("GGI object is too big (26+ genes): genes names were not plotted.
The use of the tooltip functionality is recommanded.")
}
# Draw p-values
if (draw.pvals && ncol(GGI) <= 15) {
draw.interp(GGI, rect.pos)
} else if (draw.pvals) {
warning("GGI object is too big (16+ genes): p-values were not plotted.
The use of the tooltip functionality is recommanded.")
}
# Draw title
if (is.null(title)) {
title <- "Genes Interactions Matrix Plot"
}
title(main=title)
# Activate tooltip functionality
if (interact && interactive()) {
writeLines("Click on a cell to get info on that cell.\nPress Esc. to leave.")
prime.plot <- recordPlot()
inter.tooltip <- FALSE
keep.on <- TRUE
while(keep.on) {
coords <- locator(n=1)
# If user forcefully stop the locator function then
# break out of the loop.
if (is.null(coords)) break
# As the bottom left point is used to identify a square
# on the plot, coordinates are floored.
coords <- floor(as.numeric(coords))
# Plot coordinates are converted back to matrix coordinates.
coords <- c(row=nrow(GGI) - coords[2], col=coords[1])
# Check if coordinates are conformant with GGI matrix
coords.check <- try(GGI[coords['row'], coords['col']], silent=TRUE)
if (!is(coords.check, 'try-error') && length(coords.check) == 1) {
# Check if selected point is in upper triangle -diag excluded-
# (onscreen part of the matrix).
# It is the case when column index is strictly superior to
# row index.
if (coords[2] > coords[1]) {
inter.tooltip <- TRUE
clear.tooltip(inter.tooltip, prime.plot)
draw.tooltip(coords, GGI, leg)
} else {
keep.on <- clear.tooltip(inter.tooltip, prime.plot)
inter.tooltip <- !keep.on
}
} else {
keep.on <- clear.tooltip(inter.tooltip, prime.plot)
inter.tooltip <- !keep.on
}
}
}
}
# Function that computes the graphic window x and y extreme values.
# No real plotting happens in this function (empty window is opened).
plot.setup <- function(GGI, colbar.width, draw.names, threshold) {
if(!is.matrix(GGI) && !is.numeric(GGI[1, 1])) {
stop("GGI argument should be a numeric matrix.")
} else if (ncol(GGI) != nrow(GGI)) {
stop("GGI argument should a symmetric matrix.")
} else if (ncol(GGI) < 3) {
stop("At least 3 genes must be provided.")
} else if (!is.numeric(colbar.width)) {
stop("colbar.width argument should be numeric.")
} else if (!is.logical(draw.names)) {
stop("draw.names argument should be TRUE or FALSE.")
} else if (!is.null(threshold) && is.character(threshold) && threshold != "R") {
stop("threshold argument can not be any other string than 'R'.")
}
# Widths and heights of elements are calculated
if (is.null(threshold)) {
colorLegend.height <- nrow(GGI)
colorLegend.width <- max(0.5, (ncol(GGI)/2)*colbar.width)
matCol.padding <- colorLegend.width * 0.5
colorLegend.space <- 1
} else {
colorLegend.height <- 0
colorLegend.width <- 0.5
matCol.padding <- colorLegend.width * 0.5
colorLegend.space <- 0
}
plot.width <- ncol(GGI) + colorLegend.space + colorLegend.width + matCol.padding
plot.height <- nrow(GGI)
plot(0, xlim=c(2, plot.width), ylim=c(1, plot.height), type="n",
xaxt="n", yaxt="n", xlab="", ylab="", bty="n")
# If names are to be plotted and exist, text padding is calculated
if (draw.names & ncol(GGI) <= 25 & !is.null(colnames(GGI))){
names.length <- strwidth(colnames(GGI))
text.vpadding <- ceiling(max(sin(pi/4) * names.length[-ncol(GGI)])) + 0.25
text.lpadding <- floor(min(seq(2, nrow(GGI)) - 0.25 - names.length[-1]))
text.rpadding <- ceiling(max(cos(pi/4) * names.length[-ncol(GGI)]))
} else {
text.vpadding <- 2
text.lpadding <- 0
text.rpadding <- 0
}
if (is.null(threshold)) {
colbar.text.padding <- ceiling(colorLegend.width*0.1 + strwidth("0.75"))
} else {
colbar.text.padding <- 0
}
xlim <- c(text.lpadding, plot.width + colbar.text.padding + text.rpadding)
ylim <- c(1, plot.height + text.vpadding)
plot(0, xlim=xlim, ylim=ylim, type="n",
xaxt="n", yaxt="n", xlab="", ylab="", bty="n")
}
# Function that draw the upper triangle of GGI matrix.
# Cells are colored according to corresponding p-values and
# the value of threshold.
# Invisibly return the coordinates of the bottom left point of each
# square. (to save some computing time later)
draw.matrix <- function(GGI, col.FUN, threshold, NA.col, R.thresh, use.log = FALSE) {
if(!is.matrix(GGI) && !is.numeric(GGI[1, 1])) {
stop("GGI argument should be a numeric matrix.")
} else if (ncol(GGI) != nrow(GGI)) {
stop("GGI argument should a symmetric matrix.")
} else if (ncol(GGI) < 3) {
stop("At least 3 genes must be provided.")
} else if (!is.function(col.FUN)) {
stop("col.FUN argument should be a function resulting from colorRampPalette.")
} else if (!is.logical(use.log)) {
stop("use.log argument should be a logical.")
} else if (!is.numeric(R.thresh) || length(R.thresh) != 4) {
stop("R.thresh argument should be a numeric vector of length 4.")
} else if (!is.character(NA.col)) {
stop("NA.col argument should be a character.")
}
rect.data <- GGI[upper.tri(GGI)]
# Assigning colors depending on display options
if (is.null(threshold)){
# If gradient is displayed then probs are turned into a percentage first
if (use.log) {
quantiles <- c(0, max(GGI, na.rm=TRUE))
} else {
quantiles <- c(0, 1)
}
rect.perc <- (rect.data - quantiles[1]) / (diff(quantiles))
} else if (is.numeric(threshold)) {
# If a threshold is used
if (use.log) {
rect.perc <- ifelse(rect.data >= threshold, 0, 1)
} else {
rect.perc <- ifelse(rect.data <= threshold, 0, 1)
}
} else if (is.character(threshold)) {
if (use.log){
rect.perc <- findInterval(rect.data, rev(R.thresh))
} else {
rect.perc <- findInterval(rect.data, R.thresh)
}
rect.perc <- rect.perc/4
}
rect.perc <- floor(rect.perc*200)
rect.perc[rect.perc == 0] <- 1
rect.col <- col.FUN(200)[rect.perc]
# NA values are also disabled
rect.col[which(is.na(rect.data))] <- NA.col
rect.pos <- which(upper.tri(GGI), arr.ind = TRUE)
temp.X <- rect.pos[, 2]
rect.pos[, 2] <- max(rect.pos[, 1]) - rect.pos[, 1] + 1
rect.pos[, 1] <- temp.X
rect(xleft = rect.pos[, 1],
ybottom = rect.pos[, 2],
xright = rect.pos[, 1] + 1,
ytop = rect.pos[, 2] + 1,
col = rect.col)
invisible(rect.pos)
}
# Function that draws the gradient indicator.
# The gradient bar is sliced accross the height into a
# large number of smaller rectangles.
draw.colbar <- function(GGI, col.FUN, colbar.width, threshold, R.thresh, NA.col, use.log = FALSE) {
if(!is.matrix(GGI) && !is.numeric(GGI[1, 1])) {
stop("GGI argument should be a numeric matrix.")
} else if (ncol(GGI) != nrow(GGI)) {
stop("GGI argument should a symmetric matrix.")
} else if (ncol(GGI) < 3) {
stop("At least 3 genes must be provided.")
} else if (!is.function(col.FUN)) {
stop("col.FUN argument should be a function resulting from colorRampPalette.")
} else if (!is.logical(use.log)) {
stop("use.log argument should be a logical.")
} else if (!is.numeric(R.thresh) || length(R.thresh) != 4) {
stop("R.thresh argument should be a numeric vector of length 4.")
} else if (!is.character(NA.col)) {
stop("NA.col argument should be a character.")
}
if (is.null(threshold)){
colorLegend.height <- nrow(GGI)
colorLegend.width <- max(0.5, (ncol(GGI)/2)*colbar.width)
matCol.padding <- colorLegend.width * 0.5
NA.height <- 0.05 * colorLegend.height
NA.padding <- 0.5 * matCol.padding
colbar.start <- NA.height + NA.padding
rect(xleft = rep(ncol(GGI) + 1 + matCol.padding, 200),
ybottom = seq(1 + colbar.start, ncol(GGI), length=201)[-201],
xright = rep(ncol(GGI) + 1 + matCol.padding + colorLegend.width, 200),
ytop = seq(1 + colbar.start, ncol(GGI), length=201)[-1],
col = col.FUN(200),
border = NA)
rect(xleft = ncol(GGI) + 1 + matCol.padding,
ybottom = 1 + colbar.start,
xright = ncol(GGI) + 1 + matCol.padding + colorLegend.width,
ytop = ncol(GGI),
col = NA,
border = "black")
if (use.log) {
quantiles <- as.numeric(format(quantile(GGI, na.rm=TRUE, names=FALSE), digits=2))
quantiles.pos <- seq(0, 1, 0.25)
} else {
quantiles <- c(0, 0.05, 0.25, 0.5, 0.75, 1)
quantiles.pos <- quantiles
}
segments(x0 = rep(ncol(GGI) + 1 + matCol.padding + colorLegend.width, 6),
y0 = (ncol(GGI) - 1 -colbar.start) * quantiles.pos + 1 + colbar.start,
x1 = rep(ncol(GGI) + 1 + matCol.padding + colorLegend.width*1.1 , 6),
y1 = (ncol(GGI) - 1 -colbar.start) * quantiles.pos + 1 + colbar.start)
text(x = rep(ncol(GGI) + 1 + matCol.padding + colorLegend.width*1.1 , 6),
y = (ncol(GGI) - 1 - colbar.start) * quantiles.pos + 1 + colbar.start,
labels = quantiles,
pos = 4)
# NA legend
rect(xleft = ncol(GGI) + 1 + matCol.padding,
ybottom = 1,
xright = ncol(GGI) + 1 + matCol.padding + colorLegend.width,
ytop = 1 +NA.height,
col = NA.col,
border = "black")
text(x = ncol(GGI) + 1 + matCol.padding + colorLegend.width*1.1,
y = 1 + 0.5*NA.height,
labels = "NA",
pos = 4)
return(NULL)
} else if (is.numeric(threshold)) {
sign <- c("<", ">")
leg <- legend("bottomleft", c(paste(sign, round(threshold, 3)), 'NA'),
fill = c(col.FUN(200)[c(1, 200)], NA.col)
)
} else if (is.character(threshold)) {
if (!use.log) {
legends <- c("< 0.001", "< 0.01", "< 0.05", "< 0.1", "> 0.1")
} else {
legends <- c("< 1", "> 1", "> 1.3", "> 2", "> 3")
}
leg <- legend("bottomleft", c(legends, 'NA'),
fill = c(col.FUN(200)[c(1, 50, 100, 150, 200)], NA.col)
)
}
return(leg)
}
# Function that draws genes' names on the plot.
# As the diagonale of the matrix is not drawn, the first
# names is skipped vertically and the last horizontally.
draw.genes.names <- function(genes.names, rect.pos) {
if(!is.list(genes.names) && length(genes.names) != 2) {
stop("genes.names argument should be a list of length two.")
} else if (!is.character(genes.names[[1]]) | !is.character(genes.names[[2]])) {
stop("genes.names argument should be a list of character vectors.")
} else if (length(genes.names[[1]]) != length(genes.names[[2]])) {
stop("genes.names[[1]] & genes.names[[2]] should be of same length.")
} else if (length(genes.names[[1]]) > 25) {
stop("Can't handle more than 25 names.")
} else if (!is.matrix(rect.pos) && !is.numeric(rect.pos[1, 1])) {
stop("rect.pos argument should be a numeric matrix")
}
cex=sort((1 - 1/3:25), decreasing=TRUE)[length(genes.names[[1]]) - 2]
# Horizontaly
text(x = sort(unique(rect.pos[, 1])) - 0.25,
y = sort(unique(rect.pos[, 2]), decreasing = TRUE) + 0.5,
labels = genes.names[[1]][-length(genes.names[[2]])],
pos = 2)
# Verticaly
text(x = sort(unique(rect.pos[, 1])) + 0.5*min(c(1, (1/length(genes.names[[1]]) ))),
y = max(rect.pos[, 2]) + 1 + 0.25,
labels = genes.names[[2]][-1],
pos = 4,
srt = 45)
}
# Function that draws the interaction p-values on the matrix.
# Multiple cex are tested so that it is ensured that p-values
# fit inside the squares and are still big enough.
draw.interp <- function(GGI, rect.pos){
if(!is.matrix(GGI) && !is.numeric(GGI[1, 1])) {
stop("GGI argument should be a numeric matrix.")
} else if (ncol(GGI) != nrow(GGI)) {
stop("GGI argument should a symmetric matrix.")
} else if (ncol(GGI) < 3) {
stop("At least 3 genes must be provided.")
} else if (!is.matrix(rect.pos) && !is.numeric(rect.pos[1, 1])) {
stop("rect.pos argument should be a numeric matrix")
}
rect.data <- GGI[upper.tri(GGI)]
rect.data <- format(rect.data, digits=2, scientific=TRUE)
for (i in seq(1, 0, length=30)) {
cex <- i
pval.width <- strwidth(rect.data, cex=cex)
if (max(pval.width) < 0.9) { break }
}
x <- rect.pos[, 1] + 0.5
y <- rect.pos[, 2] + 0.5
text(x, y, labels=rect.data, cex=cex)
}
# Function that draws the tooltip windows
# A white black-bordered box is first created
# and text is plotted on top of it.
draw.tooltip <- function(coords, GGI, legend.box) {
if (is.null(legend.box)) {
bottomleft <- par('usr')[c(1, 3)]
} else {
bottomleft <- c(legend.box$left + legend.box$w, legend.box$top - legend.box$h)
}
tooltip.str <- paste0('Interaction: ',
rownames(GGI)[coords[1]], ':',
colnames(GGI)[coords[2]],
'\np-val: ',
format(GGI[coords[1], coords[2]],
digits=4))
rect(xleft = bottomleft[1],
ybottom = bottomleft[2],
xright = bottomleft[1] + strwidth(tooltip.str)*1.1,
ytop = bottomleft[2] + strheight(tooltip.str)*1.5,
col = "white")
text(x = bottomleft[1] + strwidth(tooltip.str)*0.05,
y = mean(c(bottomleft[2], bottomleft[2] + strheight(tooltip.str)*1.5)),
labels = tooltip.str,
pos = 4, offset=0)
}
# Function that handles tooltip clearing and tooltip
# procedure ending.
clear.tooltip <- function(inter.tip, prime.plot) {
# If not in upper triangle then tooltip if cleared
if (inter.tip) {
replayPlot(prime.plot)
return(TRUE)
} else {
# If tooltip is already cleared then interaction with
# user is ceased.
return(FALSE)
}
}
draw.network <- function(GGI,genes=seq_len(ncol(GGI)),threshold=0.05,plot.nointer=TRUE,method.adjust=c("none","holm","hochberg","hommel","Bonferroni","BH","BY","fdr")){
if(length(genes)<2 || length(genes)>ncol(GGI)){
stop("Number of genes selected not valid.")
# } else if(!class(GGI)%in%c("data.frame","matrix")){
} else if(! (is(GGI,"data.frame") | is(GGI,"matrix"))){
stop("GGI must be a data.frame.")
} else if(ncol(GGI)!=nrow(GGI)){
stop("GGI must be a squared matrix, containing the pValues for each interaction between genes.")
} else if(! (is(threshold,"numeric") | is(threshold,"integer") | is.null(threshold))){
stop("Threshold must be a numeric.")
} else if(is.null(threshold)){
threshold<-0.05
} else if(threshold>1 || threshold<0){
stop("Threshold must be comprised in [0,1].")
} else if(!is(plot.nointer,"logical")){
stop("plot.inter must be a boolean.")
}
if(is.character(genes)&&any(!genes%in%colnames(GGI))){
stop("Genes and GGI don't match. Please select genes that are named in GGI.")
}
GGI <- GGI[genes,genes]
method.adjust <- match.arg(method.adjust)
GGI[lower.tri(GGI)] <- p.adjust(GGI[lower.tri(GGI)],method=method.adjust)
GGI[upper.tri(GGI)] <- p.adjust(GGI[upper.tri(GGI)],method=method.adjust)
dim <- ncol(GGI)
pVal.raw <- GGI[lower.tri(GGI)]
if(any(is.na(pVal.raw))){
warning("NAs found in GGI, considered as not significative.")
pVal.raw[is.na(pVal.raw)]<-1
}
from.raw <- c()
to.raw <- c()
for (i in seq_len(dim-1)){
from.raw <- c(from.raw, rep(colnames(GGI)[i], dim-i))
to.raw <- c(to.raw, rownames(GGI)[(i+1):dim])
}
from <- from.raw[pVal.raw<threshold]
to <- to.raw[pVal.raw<threshold]
pVal <- pVal.raw[pVal.raw<threshold]
if(plot.nointer){
actors <- data.frame(name=levels(as.factor(unique(c(from.raw,to.raw)))))
} else {
actors <- data.frame(name=levels(as.factor(unique(c(from,to)))))
}
if(length(from)==0){warning("No interactions has been found between the genes selected.")}
relations <- data.frame(from=from,
to=to,
pVal=pVal)
g <- igraph::graph_from_data_frame(relations, directed=FALSE, vertices=actors)
plot(g, vertex.size=10)
}
|
f56f9e9020bc07bc6c442b0c20fe9c15a40f30ba
|
aa18ff5ac84f719c8719da0de4ae0d8f25791eb5
|
/project/R/MatchingKnowledge.R
|
a7a773477207d3660c236ccf12c4bf6982cac5c3
|
[] |
no_license
|
jleslie17/2017_VR
|
e68ce6517957b54d24e66ae60c3934637c9a66fb
|
4485e4c01eab757c63fe73b5fd70c3271fa37dd5
|
refs/heads/master
| 2021-06-14T06:58:36.570822
| 2017-05-04T19:45:37
| 2017-05-04T19:45:37
| 80,775,532
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,300
|
r
|
MatchingKnowledge.R
|
# Use this script to match attendees based on knowledge, '3.1'
source('AddUnderscores.R')
source('SpreadResponses.R')
library(lsa)
source('GetMatches.R')
source('CompanyMatchesOutput.R')
path <- getwd()
datadir <- paste(path, '/data/', sep = '')
#Load and clean data
FileMS2 <- 'Milestone2utf8.csv'
File130916 <- 'SSL_Reg_13.09.16.csv'
File190916 <- 'Registration19Sept.csv'
Data <- read.csv(paste(datadir, File190916, sep = ''),
header = T, na.strings = '')
Data <- Data[rowSums(is.na(Data)) != ncol(Data),]
df <- Data[,21:33]
names(df)
targets <- df[,1:6]
users <- df[,8:13]
names(targets)
names(users)
# Put underscores between words, but not between answers
targets <- AddUnderscores(targets)
#Spread responses
tarSpread <- SpreadResponses(targets)
names(tarSpread)
# Remove columns with no predictive power
drops <- c('Other', 'na', 'N/A', '.*please.*select.*', 'NEED.*INFO',
'NEED_INFO', '---_please_select_---', 'n/a', '-', '.', 'none',
'X', 'None', 'N/a', 'No_interest', '%', 'n.a', 'tbc', 'n.a.', 'NA')
#drops <- c('Other', 'na', 'N/A', '---_please_select_---')
tarSpread <- tarSpread[, !names(tarSpread) %in% drops]
colSums(tarSpread)
names(tarSpread)
CombineSimilarColumns <- function(df, a, b){
# Takes entries from column b and puts them into col a, then removes
# col b. Use this when two columns have slightly different names but
# tell the same information.
df[[a]][df[[b]] == 1] <- 1
df <- df[, names(df) != b]
return(df)
}
# Clean up tarSpread
names(tarSpread)[names(tarSpread) == 'wireless_connectivity'] <- 'Wireless_connectivity'
users <- AddUnderscores(users)
usersSpread <- SpreadResponses(users)
usersSpread <- usersSpread[, !names(usersSpread) %in% drops]
# Tidy these due to bad formatting:
names(usersSpread)
usersSpread <- usersSpread[,sort(names(usersSpread))]
tarSpread <- tarSpread[,sort(names(tarSpread))]
names(usersSpread)
a <- 'Robotics_and_AI'
b <- 'Robotics_and_AI_'
table(usersSpread[[a]])
table(usersSpread[[b]])
usersSpread <- CombineSimilarColumns(usersSpread, a, b)
table(usersSpread[[a]])
table(usersSpread[[b]])
names(usersSpread)
# usersSpread$Robotics_and_AI[usersSpread$Robotics_and_AI_ == 1] <- 1
# usersSpread <- usersSpread[,names(usersSpread) != 'Robotics_and_AI_']
#africa <- 'Smart_technology_African_cities'
# names(tarSpread)[34] <- africa
# names(usersSpread)[25] <- africa
a <- 'information_security'
b <- 'inforamtion_security'
table(usersSpread[[a]])
table(usersSpread[[b]])
usersSpread <- CombineSimilarColumns(usersSpread, a, b)
table(usersSpread[[a]])
table(usersSpread[[b]])
names(usersSpread)
a <- 'software_development'
b <- 'Software'
table(usersSpread[[a]])
table(usersSpread[[b]])
usersSpread <- CombineSimilarColumns(usersSpread, a, b)
table(usersSpread[[a]])
table(usersSpread[[b]])
names(usersSpread)
# Now remove the columns in users that do not appear in targets
names(tarSpread)
names(usersSpread)
names(usersSpread)[!names(usersSpread) %in% names(tarSpread)]
usersTidy <- usersSpread[,names(usersSpread) %in% names(tarSpread)]
names(tarSpread)
names(tarSpread)[!names(tarSpread) %in% names(usersTidy)]
tarTidy <- tarSpread[,names(tarSpread) %in% names(usersTidy)]
# Reorder the columns to match
# index <- integer()
# for(name in names(tarTidy)){
# index <- c(index, which(names(usersTidy) == name))
# }
#
# names(usersTidy)[index]
# names(tarTidy)
#
# identical(names(usersTidy)[index], names(tarTidy))
#
# usersTidy <- usersTidy[, index]
# make empty matrix for for values to be added
for(i in 1:length(names(usersTidy))){
print(names(usersTidy)[i])
print(names(tarTidy)[i])
}
m <- matrix(0, nrow = nrow(usersSpread), ncol = nrow(tarSpread))
musers <- as.matrix(usersTidy)
mtargets <- as.matrix(tarTidy)
distanceToTargets <- function(user, targets = mtargets){
distances <- numeric()
for(i in 1:nrow(targets)){
distances[i] <- cosine(user, targets[i,])
}
#This makes the outcome binary:
distances[is.na(distances)] <- 0
distances[distances > 0] <- 1
return(distances)
}
for(i in 1:nrow(musers)){
m[i,] <- distanceToTargets(musers[i,])
}
mSum <- m + t(m)
RankDistanceToTargets <- function(user, targets){
# Calculates cosine distance between one user profile and each of the
# targets in L. Returns only 25 best matches for the user. NOT the
# 25 best reciprocal fits!
tempDF <- data.frame(target = numeric(),
distance = numeric())
distances <- numeric()
for(j in 1:length(targets)){
tempDF[j,1] <- targets[j]
tempDF[j,2] <- cosine(musers[user,], mtargets[targets[j],])
}
#This gives na the value of 0:
tempDF$distance[is.na(tempDF$distance)] <- 0
# Fills in the values up to position 25:
for(i in 1:25){
if(is.na(tempDF$distance[i])){
tempDF[i,] <- 0
}
}
return(tempDF[order(tempDF$distance, decreasing = T)[1:25],1])
}
L <- list()
for(i in 1:nrow(mSum)){
x <- which(mSum[i,] > 1, arr.ind = T) #finds recipr matches
y <- x[x != i] #removes self
if(length(y) == 0){ #sets to 0 if no data
y <- 0
L[[i]] <- y
} else {
trimmed <- RankDistanceToTargets(i, y)
L[[i]] <- trimmed
}
}
# x <- which(mSum[1468,] > 1, arr.ind = T)
# y <- x[x != 1468]
# targets <- y
# user <- 1468
#
# test <- y
# for(i in 1:25){
# if(is.na(test[i])){
# test[i] <- 0
# }
# }
#
# RankDistanceToTargets[1468, y]
# L is a list of matches
# Here there is a list of matches. Now get that into suitable output
# This puts the list of names of matches column-wise. Each column is
# a delgate, with the rows being the matches. Not a great output.
DelegatesToMeet <- GetMatches(L, Data)
# This puts the data into a four-column dataframe, with the list of matches
# as one \n-separated string in the fourth column.
KnowledgeMatchesOutput <- GetCompanyMatchesOutput(Data, DelegatesToMeet)
#write.xlsx(KnowledgeMatchesOutput, 'KnowledgeMatchesTop25_M3.xlsx', row.names = F)
|
91ff2bd716fb372593dde0000104eb2c3eccb260
|
ffcf770ed00b5456cbe0fb23d13e943e8053c594
|
/TPP_FDP.R
|
492fc0e236609f9d8bc3fb55b4c8825454be9140
|
[] |
no_license
|
panshiliuzhi/FDR_Lasso
|
497e58592abac21db17dc9ee8be99f97947e6ea7
|
3d7df8e8f56e4caad2925f7a3e5dccdc293f7c19
|
refs/heads/master
| 2022-04-16T07:45:19.889256
| 2018-02-12T11:30:38
| 2018-02-12T11:30:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,995
|
r
|
TPP_FDP.R
|
library(glmnet)
TPP <- function(model, beta) {
# True positive proportion
true_coefs <- which(beta > 0)
k <- length(true_coefs)
coefs <- coef.glmnet(model)@i
common_coefs <- intersect(true_coefs, coefs)
length(common_coefs)/max(k, 1)
}
FDP <- function(model, beta) {
# False discoveries proportion
true_coefs <- which(beta > 0)
k <- length(true_coefs)
coefs <- coef.glmnet(model)@i
not_common_coefs <- intersect(which(beta == 0), coefs)
length(not_common_coefs)/max(length(coefs), 1)
}
TPP_FDP <- function(lambda, X, y, beta) {
# (TPP, FDP)
# lambda = 0, tous les coefficients sont sélectionnés
true_coefs <- which(beta > 0)
k <- length(true_coefs)
model <- glmnet(X, y, lambda = lambda)
TPP <- TPP(model, beta)
FDP <- FDP(model, beta)
c(TPP, FDP)
}
first_false_selection <- function(n, p, beta) {
# When the first false selection happens
# Generate data under same gaussian random design setting
X = matrix( rnorm(n * p), nrow=n, ncol=p)
y = X%*%beta
true_coefs <- which(beta > 0)
k <- length(true_coefs)
lambda <- 70
tpp_fdp <- TPP_FDP(lambda, X, y, beta)
tpp <- tpp_fdp[1]
fdp <- tpp_fdp[2]
while (fdp == 0) {
lambda <- lambda - 1
tpp_fdp <- TPP_FDP(lambda, X, y, beta)
tpp <- tpp_fdp[1]
fdp <- tpp_fdp[2]
}
# Rank of the first false discovery
c(tpp = tpp, fdp = fdp, lambda = lambda, rank_fdp = 1 + floor(k*tpp))
}
last_true_selection <- function(n, p, beta) {
# When the last true selection happens
# Generate data under same gaussian random design setting
X = matrix( rnorm(n * p), nrow=n, ncol=p)
y = X%*%beta
true_coefs <- which(beta > 0)
k <- length(true_coefs)
lambda <- 0.1
tpp_fdp <- TPP_FDP(lambda, X, y, beta)
tpp <- tpp_fdp[1]
fdp <- tpp_fdp[2]
i <- 1
while (tpp == 1) {
lambda <- lambda + 0.5
tpp_fdp <- TPP_FDP(lambda, X, y, beta)
tpp <- tpp_fdp[1]
fdp <- tpp_fdp[2]
i <- i + 1
}
c(tpp = tpp, fdp = fdp, lambda = lambda)
}
|
22a30e805951090bd4b971710bade475ae8e086f
|
3dea25f109bc6fb4fdbe45cdc576e8dfb98c3cfb
|
/scripts/04_deAnalysis.R
|
d3af61958b3b89490faa0f4f849cfce8ebc5f655
|
[] |
no_license
|
alanlorenzetti/ccrescentus_rnaseq_rhle
|
13a406d3aa6517afabbcb272958b72c748359f27
|
e5c103757b4e05dfbb5fee86c971bc991821131b
|
refs/heads/main
| 2023-06-03T05:27:30.631542
| 2021-06-29T18:07:25
| 2021-06-29T18:07:25
| 381,445,565
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,099
|
r
|
04_deAnalysis.R
|
# alorenzetti 202106
# description ####
# this script will prepare data
# generate the counts, and
# perform differential expression
# analysis
# preparing data ####
# providing info about data
samples = c("NA1_10C", "NA2_10C", "NA3_10C",
"NA1_30C", "NA2_30C", "NA3_30C",
"rhIE_1_10oC_S4", "rhIE_2_10oC_S5", "rhIE_3_10oC_S6",
"rhIE_1_30oC_S1", "rhIE_2_30oC_S2", "rhIE_3_30oC_S3")
reps = rep(c("A", "B", "C"), 4)
strains = c(rep("NA1000", 6), rep("rhlE", 6))
conditions = c(rep(c(rep("10C", 3), rep("30C", 3)), 2))
# info datatable
colData = data.frame(row.names = samples,
replicate = reps,
strain = strains,
condition = conditions)
# loading raw counts ####
# loading raw counts obtained by kallisto
# to manually compute TPMs, follow the instructions on the following
# page: https://haroldpimentel.wordpress.com/2014/05/08/what-the-fpkm-a-review-rna-seq-expression-units/
# reading totalrna counts
totrna = read_tsv("data/tableEstCounts25.tsv")
# formatting deseq2 input object
assay = totrna %>%
dplyr::select(starts_with("NA"),
starts_with("rhIE")) %>%
as.matrix() %>%
round(digits = 0)
colnames(assay) = colnames(assay) %>%
str_replace(., "^NA", "NA1000") %>%
str_replace(., "rhIE_", "rhlE") %>%
str_replace(., "oC", "C") %>%
str_replace(., "_S[0-9]", "") %>%
str_replace(., "([0-9])_", "_\\1_") %>%
str_replace(., "(.*)_(.*)_(.*)", "\\1_\\3_\\2") %>%
str_replace(., "1$", "A") %>%
str_replace(., "2$", "B") %>%
str_replace(., "3$", "C")
assay = assay[,c(1,3,5,2,4,6,7,9,11,8,10,12)]
rownames(colData) = colnames(assay)
totrnaSE = SummarizedExperiment(assay = list(counts=assay),
rowData = totrna[,c(1:3)],
colData = colData)
# giving gene names thinking about compatibility
# issues. I am gonna add some features available
# in the functional categorization object in order
# to make it work in the next script
rownames(totrnaSE) = rowData(totrnaSE) %>%
as_tibble() %>%
dplyr::select(target_id) %>%
mutate(target_id = str_replace(string = target_id,
pattern = "\\|.*$",
replacement = "")) %>%
left_join(x = .,
y = funCat,
by = c("target_id" = "locus_tag")) %>%
unite(rn, c("ID",
"target_id",
"entrezID",
"gene_symbol"),
sep = "|") %>%
dplyr::select(rn) %>%
unlist(use.names = F)
# removing rRNA instances
totrnaSE = totrnaSE[str_detect(string = rownames(totrnaSE),
pattern = "CCNA_R0069|CCNA_R0066",
negate = T),]
# creating DESeq object for entire experiment ####
# to do exploratory analysis and differential expression analysis
# creating deseq2 objects
totrnadds = totrnaSE
totrnadds$group = factor(paste0(totrnadds$strain, "_", totrnadds$condition))
totrnadds = DESeqDataSet(totrnadds, design = ~ group)
# doing rlog transformation for distance and PCA
# before eliminating genes with zero counts
rld = rlog(totrnadds)
rldNonBlind = rlog(totrnadds, blind=F)
# removing genes with zero counts and performing DESeq2 analysis
totrnadds = totrnadds[rowSums(counts(totrnadds)) > 1, ]
totrnadds = DESeq(totrnadds)
#resultsNames(dds)
# setting distance matrix for dds
sampleDists = dist(t(assay(rld)))
sampleDistMatrix = as.matrix(sampleDists)
rownames(sampleDistMatrix) = paste( rld$strain, rld$condition, rld$replicate, sep="_" )
colnames(sampleDistMatrix) = NULL
colors = colorRampPalette(rev(brewer.pal(9, "Reds")) )(255)
# result tables for contrasts
results = list()
results[["rhlE10C_vs_rhlE30C"]] = results(totrnadds, contrast= c("group", "rhlE_10C", "rhlE_30C"), alpha = padjthreshold)
results[["rhlE10C_vs_NA100010C"]] = results(totrnadds, contrast= c("group", "rhlE_10C", "NA1000_10C"), alpha = padjthreshold)
results[["rhlE30C_vs_NA100030C"]] = results(totrnadds, contrast= c("group", "rhlE_30C", "NA1000_30C"), alpha = padjthreshold)
|
80e3c26210c833d9d9b7f861612c4b54648bc270
|
2931c0b42f80075e212b4a28cd0b9b1586eac6af
|
/Tareas/Método_Bisección.R
|
b7c6fc1d16ddc9c6043658c68b6b32592a40eb8a
|
[] |
no_license
|
mgalvis60/Analisis_Numerico_1910
|
f4edfe9091643c362d8366acb224c4e869024ca7
|
d3a8def7b35956c3cf54efe4e9660a09439ef7b1
|
refs/heads/master
| 2020-04-20T04:32:38.040374
| 2019-05-15T12:09:06
| 2019-05-15T12:09:06
| 168,630,617
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 620
|
r
|
Método_Bisección.R
|
#Mateo Galvis López
#Bisección en R Codigo
#Biseccion
Fx <- function(x) exp(x)-pi*x
# Halla la raiz de Fx
biseccion <- function(a,b) {
x<-seq(-3,3,0.001)
plot(x,Fx(x),type="l",col="blue")
abline(h=0,col="blue")
x<-b
d<-(a+b)/2
i<-0
error<-abs(a-b)/2
while (error > 1.e-4) {
i<-i+1
if (Fx(x) == 0) break
if (Fx(x)*Fx(a) < 0) b <- x else {a <- x}
d<-x
x<-(a+b)/2
#points(rbind(c(x,0)),pch=17,cex=0.7,col="red")
text(x,0,i,cex=0.8,col="red")
error<-abs(a-b)/2
cat("X=",x,"\tE=",error,"\t\tIteración=",i,"\n")
}
}
biseccion(-3,3)
|
62bbfe8d18dd8ca63163714f5c8453f60d5748a9
|
b961e5df200a979211606950cf0ccaa36bb5cf7d
|
/myProject/monte_carlo.R
|
40ffdbd02f47aa51b0c78d1c8dd9fee73261df8b
|
[] |
no_license
|
navbharti/Rworkspace
|
27f45a262ade1cccdd742f1e83a24f91a6304df7
|
0415761023a61887e97cdbff55ccfa976651c3b7
|
refs/heads/master
| 2021-01-17T17:54:40.844402
| 2016-08-14T12:44:17
| 2016-08-14T12:44:17
| 62,465,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 922
|
r
|
monte_carlo.R
|
library(rafalib)
library(downloader)
url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/mice_pheno.csv"
filename <- "mice_pheno.csv"
if (!file.exists(filename)) download(url,destfile=filename)
library(dplyr)
dat <- read.csv("mice_pheno.csv")
controlPopulation <- filter(dat,Sex == "F" & Diet == "chow") %>%
select(Bodyweight) %>% unlist
#We will build a function that automatically generates a t-statistic under the null hypothesis for a sample size of n.
ttestgenerator <- function(n) {
#note that here we have a false "high fat" group where we actually
#sample from the chow or control population.
#This is because we are modeling the null.
cases <- sample(controlPopulation,n)
controls <- sample(controlPopulation,n)
tstat <- (mean(cases)-mean(controls)) /
sqrt( var(cases)/n + var(controls)/n )
return(tstat)
}
ttests <- replicate(1000, ttestgenerator(10))
|
7a080ebc48b059d988cc41c95c2e6184d9595793
|
41d8aa2ea38571c2aae84e8581a4820ba6b985bf
|
/test.R
|
ed6291817be1f7d6b6683ab410db003e8dcd8d26
|
[] |
no_license
|
arunmarria/housePricePrediction
|
f0fe9e72cd8534c492aca1230d690eb6eac570d7
|
78b83827191b0859b9f9eee572f8ed59c7773e48
|
refs/heads/master
| 2020-04-10T17:59:46.341974
| 2019-01-02T14:59:35
| 2019-01-02T14:59:35
| 161,190,693
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,448
|
r
|
test.R
|
## the script is for data preparation of actual test data and it involves performing same operations on
## test data as performed on train data
## loading data
house_test <- read.csv("test.csv")
## Getting glimpse of data
head(house_test)
str(house_test)
## understanding data
## changing MSSubclass to factor
for( i in 1:length(house_test$MSSubClass)){
if( house_test$MSSubClass[i] == 150)
house_test$MSSubClass[i] =160
}
house_test$MSSubClass <- as.factor(house_test$MSSubClass)
# replace NA in Alley column with no alleys
sum(is.na(house_test$Alley))
table(house_test$Alley)
house_test$Alley <- ifelse(is.na(house_test$Alley), "No Alleys", house_test$Alley)
str(house_test$Alley)
house_test$Alley <- as.factor(house_test$Alley)
levels(house_test$Alley) <- c("Grvl", "Pave", "No Alleys" )
## Replacing Na in BsmtQual with "No basmt"
house_test$BsmtQual <- as.character(house_test$BsmtQual)
house_test$BsmtQual <- ifelse(is.na(house_test$BsmtQual), "No basmt", house_test$BsmtQual)
str(house_test$BsmtQual)
house_test$BsmtQual <- as.factor(house_test$BsmtQual)
## Replacing Na in BsmtCond with "No basmt"
house_test$BsmtCond <- as.character(house_test$BsmtCond)
house_test$BsmtCond <- ifelse(is.na(house_test$BsmtCond), "No basmt", house_test$BsmtCond)
str(house_test$BsmtCond)
house_test$BsmtCond <- as.factor(house_test$BsmtCond)
## Replacing Na in BsmtExposure with "No basmt"
house_test$BsmtExposure <- as.character(house_test$BsmtExposure)
house_test$BsmtExposure <- ifelse(is.na(house_test$BsmtExposure), "No basmt", house_test$BsmtExposure)
str(house_test$BsmtExposure)
house_test$BsmtExposure <- as.factor(house_test$BsmtExposure)
## Replacing Na in BsmtFinType1 with "No basmt"
house_test$BsmtFinType1 <- as.character(house_test$BsmtFinType1)
house_test$BsmtFinType1 <- ifelse(is.na(house_test$BsmtFinType1), "No basmt", house_test$BsmtFinType1)
str(house_test$BsmtFinType1)
house_test$BsmtFinType1 <- as.factor(house_test$BsmtFinType1)
## Replacing Na in BsmtFinType2 with "No basmt"
house_test$BsmtFinType2 <- as.character(house_test$BsmtFinType2)
house_test$BsmtFinType2 <- ifelse(is.na(house_test$BsmtFinType2), "No basmt", house_test$BsmtFinType2)
str(house_test$BsmtFinType2)
house_test$BsmtFinType2 <- as.factor(house_test$BsmtFinType2)
## Replacing Na in FireplaceQu with "No Fireplace"
house_test$FireplaceQu <- as.character(house_test$FireplaceQu)
house_test$FireplaceQu <- ifelse(is.na(house_test$FireplaceQu), "No Fireplace", house_test$FireplaceQu)
str(house_test$FireplaceQu)
house_test$FireplaceQu <- as.factor(house_test$FireplaceQu)
## Replacing Na in GarageType, GarageQual,GarageYrBlt, GarageCond and GarageFinish with "No Garage"
house_test$GarageType <- as.character(house_test$GarageType)
house_test$GarageType <- ifelse(is.na(house_test$GarageType), "No Garage", house_test$GarageType)
str(house_test$GarageType)
house_test$GarageType <- as.factor(house_test$GarageType)
house_test$GarageFinish <- as.character(house_test$GarageFinish)
house_test$GarageFinish <- ifelse(is.na(house_test$GarageFinish), "No Garage", house_test$GarageFinish)
str(house_test$GarageFinish)
house_test$GarageFinish <- as.factor(house_test$GarageFinish)
house_test$GarageYrBlt <- as.character(house_test$GarageYrBlt)
house_test$GarageYrBlt <- ifelse(is.na(house_test$GarageYrBlt), "No Garage", house_test$GarageYrBlt)
str(house_test$GarageYrBlt)
house_test$GarageYrBlt <- as.factor(house_test$GarageYrBlt)
house_test$GarageQual <- as.character(house_test$GarageQual)
house_test$GarageQual <- ifelse(is.na(house_test$GarageQual), "No Garage", house_test$GarageQual)
str(house_test$GarageQual)
house_test$GarageQual <- as.factor(house_test$GarageQual)
house_test$GarageCond <- as.character(house_test$GarageCond)
house_test$GarageCond <- ifelse(is.na(house_test$GarageCond), "No Garage", house_test$GarageCond)
str(house_test$GarageCond)
house_test$GarageCond <- as.factor(house_test$GarageCond)
## Replacing Na in PoolQC with "No Pool"
house_test$PoolQC <- as.character(house_test$PoolQC)
house_test$PoolQC <- ifelse(is.na(house_test$PoolQC), "No Pool", house_test$PoolQC)
str(house_test$PoolQC)
house_test$PoolQC <- as.factor(house_test$PoolQC)
## Replacing Na in Fence with "No Fence"
house_test$Fence <- as.character(house_test$Fence)
house_test$Fence <- ifelse(is.na(house_test$Fence), "No Fence", house_test$Fence)
str(house_test$Fence)
house_test$Fence <- as.factor(house_test$Fence)
## Replacing Na in MiscFeature with "None"
house_test$MiscFeature <- as.character(house_test$MiscFeature)
house_test$MiscFeature <- ifelse(is.na(house_test$MiscFeature), "None", house_test$MiscFeature)
str(house_test$MiscFeature)
house_test$MiscFeature <- as.factor(house_test$MiscFeature)
## checking Nas in data set now
library(e1071)
sapply(house_test, function(x) {sum(is.na(x))})
##skewness0.6603101 so replacinf NAs with mean
house_test$LotFrontage <-ifelse(is.na(house_test$LotFrontage), mean(house_test$LotFrontage, na.rm = T), house_test$LotFrontage)
sapply(house_test, function(x) {sum(is.na(x))})
## repacing MSZoning NA values with majority values
house_test$MSZoning <- as.character(house_test$MSZoning)
house_test$MSZoning <-ifelse(is.na(house_test$MSZoning), 'RL', house_test$MSZoning)
house_test$MSZoning <- as.factor(house_test$MSZoning)
##
## repacing Utilities NA values with majority values
house_test$Utilities <- as.character(house_test$Utilities)
house_test$Utilities <-ifelse(is.na(house_test$Utilities), 'AllPub', house_test$Utilities)
house_test$Utilities <- as.factor(house_test$Utilities)
##
## repacing Exterior1st NA values with majority values
house_test$Exterior1st <- as.character(house_test$Exterior1st)
house_test$Exterior1st <-ifelse(is.na(house_test$Exterior1st), 'VinylSd', house_test$Exterior1st)
house_test$Exterior1st <- as.factor(house_test$Exterior1st)
##
## repacing Exterior2nd NA values with majority values
house_test$Exterior2nd <- as.character(house_test$Exterior2nd)
house_test$Exterior2nd <-ifelse(is.na(house_test$Exterior2nd), 'VinylSd', house_test$Exterior2nd)
house_test$Exterior2nd <- as.factor(house_test$Exterior2nd)
##
## repacing MasVnrType NA values with majority values
house_test$MasVnrType <- as.character(house_test$MasVnrType)
house_test$MasVnrType <-ifelse(is.na(house_test$MasVnrType), 'None', house_test$MasVnrType)
house_test$MasVnrType <- as.factor(house_test$MasVnrType)
##replacing Nas with mean for MasVnrArea
house_test$MasVnrArea <-ifelse(is.na(house_test$MasVnrArea), mean(house_test$MasVnrArea, na.rm = T), house_test$MasVnrArea)
##replacing Nas with 0 for BsmtFinSF1
house_test$BsmtFinSF1 <-ifelse(is.na(house_test$BsmtFinSF1), 0, house_test$BsmtFinSF1)
house_test$BsmtFinSF2 <-ifelse(is.na(house_test$BsmtFinSF2), 0, house_test$BsmtFinSF2)
house_test$BsmtUnfSF <-ifelse(is.na(house_test$BsmtUnfSF), 0, house_test$BsmtUnfSF)
house_test$TotalBsmtSF <-ifelse(is.na(house_test$TotalBsmtSF), 0, house_test$TotalBsmtSF)
house_test$BsmtFullBath <-ifelse(is.na(house_test$BsmtFullBath), 0, house_test$BsmtFullBath)
house_test$BsmtHalfBath <-ifelse(is.na(house_test$BsmtHalfBath), 0, house_test$BsmtHalfBath)
##replacing Nas with majority values for KitchenQual
house_test$KitchenQual <- as.character(house_test$KitchenQual)
house_test$KitchenQual <-ifelse(is.na(house_test$KitchenQual), 'Gd', house_test$KitchenQual)
house_test$KitchenQual <- as.factor(house_test$KitchenQual)
##replacing Nas with majority values for Functional
house_test$Functional <- as.character(house_test$Functional)
house_test$Functional <-ifelse(is.na(house_test$Functional), 'Typ', house_test$Functional)
house_test$Functional <- as.factor(house_test$Functional)
## replacing Nas with 0 for GarageCars,garageArea
house_test$GarageArea <-ifelse(is.na(house_test$GarageArea), 0, house_test$GarageArea)
house_test$GarageCars <-ifelse(is.na(house_test$GarageCars), 0, house_test$GarageCars)
##replacing Nas with majority values for SaleType
house_test$SaleType <- as.character(house_test$SaleType)
house_test$SaleType <-ifelse(is.na(house_test$SaleType), 'WD', house_test$SaleType)
house_test$SaleType <- as.factor(house_test$SaleType)
## Checking if we still have any NAs on test data
any(is.na(house_test))
## Exploring categorical features
## changing level names for better visualization and exploration
levels(house_test$MSZoning)<- c("Commercial", "FloatingVill", "Res. high density",
"Res. low density", "Res. med density")
levels(house_test$LotShape)<- c("Irregular(slight)", "Irregular(Moder)","Irregular","Regular")
levels(house_test$LandContour)<- c("Banked", "HillSide", "Low", "Level")
levels(house_test$Utilities)<- c("All", "No sewarage")
levels(house_test$LotConfig)[3:4]<- c("Frontage(2 sides)","Frontage(3 sides)" )
levels(house_test$LandSlope)<- c("Gentle", "Moderate","Severe")
levels(house_test$ExterQual )<- c("Excellent", "Fair","Good","Typical")
levels(house_test$ExterCond)<- c("Excellent","Fair","Good","Poor","Typical")
levels(house_test$BsmtQual )<- c("Excellent","Fair","Good","No basmt","Typical")
levels(house_test$BsmtCond )<- c("Fair","Good","No bsmt","Poor","Typical")
levels(house_test$BsmtExposure)<- c("Average","Good","Minimum","No","No basmt")
levels(house_test$BsmtFinType1)<- c("Avg. LQ","Below avg. LQ", "Good LQ","Low Quality","No basmnt","Avg. RecRoom","Unfinished" )
levels(house_test$BsmtFinType2)<-c("Avg. LQ","Below avg. LQ", "Good LQ","Low Quality","No basmnt","Avg. RecRoom","Unfinished" )
levels(house_test$HeatingQC)<- c("Excellent","Fair","Good","Poor","Typical")
levels(house_test$KitchenQual)<- c("Excellent","Fair","Good","Typical")
levels(house_test$Functional)<- c("MajorDeductn1","MajorDeductn2","MinorDeduc1","MinorDeduc","Moder.Deduction","SeverelyDamaged","Typical")
levels(house_test$FireplaceQu)<- c("Excellent","Fair","Good","No Fireplace","Poor","Typical")
levels(house_test$GarageFinish)<- c("Finished","No Garage","RoughFinished","Unfinished")
## different
levels(house_test$GarageQual)<- c("Fair","Good","No Garage","Poor","Typical")
levels(house_test$GarageCond)<- c("Excellent","Fair","Good","No Garage","Poor","Typical")
levels(house_test$PavedDrive) <- c("No","Partial","Yes")
##diff
levels(house_test$PoolQC)<- c("Excellent","Good","No Pool")
levels(house_test$Fence) <- c("GoodPrivacy","GoodWood","MinimumPrivacy","MinimumWood","NoFence")
## converting columns to factors
house_test$YrSold <- as.factor(house_test$YrSold)
house_test$MoSold <- as.factor(house_test$MoSold)
##house_test$GarageCars <- as.factor(house_test$GarageCars)
##house_test$GarageCars <- as.character(house_test$GarageCars)
## combine categories for 3 or more garagecars
for ( i in 1:length(house_test$GarageCars)){
if((house_test$GarageCars[i]) >=3)
{
house_test$GarageCars[i] = "3 or more"
}
}
## converting to factor
house_test$GarageCars <- as.factor(house_test$GarageCars)
## fireplaces
for ( i in 1:length(house_test$Fireplaces)){
if((house_test$Fireplaces[i]) >=2)
{
house_test$Fireplaces[i] = "2 or more"
}
}
house_test$Fireplaces <- as.factor( house_test$Fireplaces)
## combine categories for TotRmsAbvGrd
for ( i in 1:length(house_test$TotRmsAbvGrd)){
if((house_test$TotRmsAbvGrd[i]) == 10|(house_test$TotRmsAbvGrd[i]) == 11|(house_test$TotRmsAbvGrd[i]) == 12|
(house_test$TotRmsAbvGrd[i]) == 14 | (house_test$TotRmsAbvGrd[i]) == 13|
(house_test$TotRmsAbvGrd[i]) == 15)
{
house_test$TotRmsAbvGrd[i] = "10 or more"
}
if((house_test$TotRmsAbvGrd[i]) == 2|(house_test$TotRmsAbvGrd[i]) == 3| (house_test$TotRmsAbvGrd[i]) == 4)
{
house_test$TotRmsAbvGrd[i] = "4 or less"
}
}
house_test$TotRmsAbvGrd <- as.factor(house_test$TotRmsAbvGrd)
##kitchenabvgr combinging categores
table(house_test$KitchenAbvGr)
for ( i in 1:length(house_test$KitchenAbvGr)){
if((house_test$KitchenAbvGr[i]) == 0|(house_test$KitchenAbvGr[i]) == 1)
{
house_test$KitchenAbvGr[i] = "1 or less"
}
if((house_test$KitchenAbvGr[i]) == 2|(house_test$KitchenAbvGr[i]) == 3) {
house_test$KitchenAbvGr[i] = "2 or more"
}
}
house_test$KitchenAbvGr <- as.factor(house_test$KitchenAbvGr)
## combining categories for BedroomAbvGr
table(house_test$BedroomAbvGr)
for ( i in 1:length(house_test$BedroomAbvGr)){
if((house_test$BedroomAbvGr[i]) <=2)
{
house_test$BedroomAbvGr[i] = "2 or less"
}
if((house_test$BedroomAbvGr[i]) >= 4) {
house_test$BedroomAbvGr[i] = "4 or more"
}
}
house_test$BedroomAbvGr <- as.factor(house_test$BedroomAbvGr)
##combining HalfBath categories and converting to factor
table(house_test$HalfBath)
for ( i in 1:length(house_test$HalfBath)){
if((house_test$HalfBath[i]) >=1)
{
house_test$HalfBath[i] = "1 or more"
}
}
house_test$HalfBath <- as.factor(house_test$HalfBath)
## combining categoreis of fullbath and converting to factor
table(house_test$FullBath)
for ( i in 1:length(house_test$FullBath)){
if((house_test$FullBath[i]) <=1)
{
house_test$FullBath[i] = "1 or less"
}
if((house_test$FullBath[i]) >= 2)
{
house_test$FullBath[i] = "2 or more"
}
}
house_test$FullBath <- as.factor(house_test$FullBath)
## combining categories and converting to factor for bsmthalfbath
table(house_test$BsmtHalfBath)
for ( i in 1:length(house_test$BsmtHalfBath)){
if((house_test$BsmtHalfBath[i]) >=1)
{
house_test$BsmtHalfBath[i] = "1 or more"
}
}
house_test$BsmtHalfBath <- as.factor(house_test$BsmtHalfBath)
## combining categories and converting to factor for bsmtfullbah
table(house_test$BsmtFullBath)
for ( i in 1:length(house_test$BsmtFullBath)){
if((house_test$BsmtFullBath[i]) >=1)
{
house_test$BsmtFullBath[i] = "1 or more"
}
}
house_test$BsmtFullBath <- as.factor(house_test$BsmtFullBath)
##combining categories and converting to factor for yearremodadd
table(house_test$YearRemodAdd)
for ( i in 1:length(house_test$YearRemodAdd)){
if(((house_test$YearRemodAdd[i]) >=1950 && (house_test$YearRemodAdd[i]) <= 1955) )
{
house_test$YearRemodAdd[i] = "1950-1955"
}
if(((house_test$YearRemodAdd[i]) >=1956 && (house_test$YearRemodAdd[i]) <= 1960) )
{
house_test$YearRemodAdd[i] = "1956-1960"
}
if(((house_test$YearRemodAdd[i]) >=1961 && (house_test$YearRemodAdd[i]) <= 1965) )
{
house_test$YearRemodAdd[i] = "1961-1965"
}
if(((house_test$YearRemodAdd[i]) >=1966 && (house_test$YearRemodAdd[i]) <= 1970 ))
{
house_test$YearRemodAdd[i] = "1966-1970"
}
if(((house_test$YearRemodAdd[i]) >=1971 && (house_test$YearRemodAdd[i]) <= 1975) )
{
house_test$YearRemodAdd[i] = "1971-1975"
}
if(((house_test$YearRemodAdd[i]) >=1976 && (house_test$YearRemodAdd[i]) <= 1980) )
{
house_test$YearRemodAdd[i] = "1976-1980"
}
if(((house_test$YearRemodAdd[i]) >=1981 && (house_test$YearRemodAdd[i]) <= 1985) )
{
house_test$YearRemodAdd[i] = "1981-1985"
}
if(((house_test$YearRemodAdd[i]) >=1986 && (house_test$YearRemodAdd[i]) <= 1990) )
{
house_test$YearRemodAdd[i] = "1986-1990"
}
if(((house_test$YearRemodAdd[i]) >=1991 && (house_test$YearRemodAdd[i]) <= 1995) )
{
house_test$YearRemodAdd[i] = "1991-1995"
}
if(((house_test$YearRemodAdd[i]) >=1996 && (house_test$YearRemodAdd[i]) <= 2000) )
{
house_test$YearRemodAdd[i] = "1996-2000"
}
if(((house_test$YearRemodAdd[i]) >=2001 && (house_test$YearRemodAdd[i]) <= 2005) )
{
house_test$YearRemodAdd[i] = "2001-2005"
}
if(((house_test$YearRemodAdd[i]) >=2006 && (house_test$YearRemodAdd[i]) <= 2010 ) )
{
house_test$YearRemodAdd[i] = "2006-2010"
}
}
house_test$YearRemodAdd <- as.factor(house_test$YearRemodAdd)
##combining categories and converting to factor for YearBuilt
table(house_test$YearBuilt)
for ( i in 1:length(house_test$YearBuilt)){
if(((house_test$YearBuilt[i]) >=1871 && (house_test$YearBuilt[i]) <= 1900) )
{
house_test$YearBuilt[i] = "Before 1900"
}
if(((house_test$YearBuilt[i]) >=1901 && (house_test$YearBuilt[i]) <= 1925) )
{
house_test$YearBuilt[i] = "1901-1925"
}
if(((house_test$YearBuilt[i]) >=1926 && (house_test$YearBuilt[i]) <= 1950) )
{
house_test$YearBuilt[i] = "1926-1950"
}
if(((house_test$YearBuilt[i]) >=1951 && (house_test$YearBuilt[i]) <= 1975 ))
{
house_test$YearBuilt[i] = "1951-1975"
}
if(((house_test$YearBuilt[i]) >=1976 && (house_test$YearBuilt[i]) <= 2000) )
{
house_test$YearBuilt[i] = "1976-2000"
}
if(((house_test$YearBuilt[i]) >=2001 && (house_test$YearBuilt[i]) <= 2005) )
{
house_test$YearBuilt[i] = "2001-2005"
}
if(((house_test$YearBuilt[i]) >=2006 && (house_test$YearBuilt[i]) <= 2010 ) )
{
house_test$YearBuilt[i] = "2006-2010"
}
}
house_test$YearBuilt <- as.factor( house_test$YearBuilt)
##combining categories and converting to factor for OverallCond
table(house_test$OverallCond)
for ( i in 1:length(house_test$OverallCond)){
if((house_test$OverallCond[i]) <= 4)
{
house_test$OverallCond[i] = "4 or less"
}
if((house_test$OverallCond[i]) >= 8)
{
house_test$OverallCond[i] = "8 or more"
}
}
house_test$OverallCond <- as.factor(house_test$OverallCond)
##combining categories and converting to factor for OverallQual
table(house_test$OverallQual)
for ( i in 1:length(house_test$OverallQual)){
if((house_test$OverallQual[i]) <= 4)
{
house_test$OverallQual[i] = "4 or less"
}
if((house_test$OverallQual[i]) >= 8)
{
house_test$OverallQual[i] = "8 or more"
}
}
house_test$OverallQual <- as.factor(house_test$OverallQual)
## combining categories of poolQC
table(house_test$PoolQC)
levels(house_test$PoolQC)[2] <- "Fair/Good"
## combining categories of GarageCond
table(house_test$GarageCond)
house_test$GarageCond <- as.character(house_test$GarageCond)
for ( i in 1:length(house_test$GarageCond)){
if((house_test$GarageCond[i] == "Fair") || (house_test$GarageCond[i] == "Poor"))
{
house_test$GarageCond[i] = "Fair/Poor"
}
if((house_test$GarageCond[i] == "Good") || (house_test$GarageCond[i] == "Typical"))
{
house_test$GarageCond[i] = "Good/Typical"
}
}
house_test$GarageCond <- as.factor(house_test$GarageCond)
## combining categories of GarageQual
table(house_test$GarageQual)
house_test$GarageQual <- as.character(house_test$GarageQual)
for ( i in 1:length(house_test$GarageQual)){
if((house_test$GarageQual[i] == "Fair") || (house_test$GarageQual[i] == "Poor"))
{
house_test$GarageQual[i] = "Fair/Poor"
}
if((house_test$GarageQual[i] == "Good") || (house_test$GarageQual[i] == "Typical")||
(house_test$GarageQual[i] == "Excellent"))
{
house_test$GarageQual[i] = "Good/Typical"
}
}
house_test$GarageQual <- as.factor(house_test$GarageQual)
## combining categories of Fence
table(house_test$Fence)
house_test$Fence <- as.character(house_test$Fence)
for ( i in 1:length(house_test$Fence)){
if((house_test$Fence[i] == "GoodWood") || (house_test$Fence[i] == "MinimumWood"))
{
house_test$Fence[i] = "Good/MinimumWood"
}
}
house_test$Fence <- as.factor(house_test$Fence)
## combining categories of Functional column
table(house_test$Functional)
house_test$Functional <- as.character(house_test$Functional)
for ( i in 1:length(house_test$Functional)){
if((house_test$Functional[i] == "MajorDeductn1") || (house_test$Functional[i] == "MajorDeductn2"))
{
house_test$Functional[i] = "MajorDeductn"
}
if((house_test$Functional[i] == "MinorDeduc1") || (house_test$Functional[i] == "MinorDeduc"))
{
house_test$Functional[i] = "MinorDeductn"
}
}
house_test$Functional <- as.factor(house_test$Functional)
## combine categories of Electrical
table(house_test$Electrical)
house_test$Electrical <- as.character(house_test$Electrical)
for ( i in 1:length(house_test$Electrical)){
if((house_test$Electrical[i] == "FuseP") || (house_test$Electrical[i] == "Mix")||
(house_test$Electrical[i] == "FuseF"))
{
house_test$Electrical[i] = "FuseP/F/Mix"
}
}
house_test$Electrical <- factor(house_test$Electrical)
## combine categories of HeatingQC
table(house_test$HeatingQC)
house_test$HeatingQC <- as.character(house_test$HeatingQC)
for ( i in 1:length(house_test$HeatingQC)){
if((house_test$HeatingQC[i] == "Fair") || (house_test$HeatingQC[i] == "Poor")||
(house_test$HeatingQC[i] == "Typical"))
{
house_test$HeatingQC[i] = "Fair/Poor/Typical"
}
}
house_test$HeatingQC <- as.factor(house_test$HeatingQC)
## combine categories of Heating
table(house_test$Heating)
house_test$Heating <- as.character(house_test$Heating)
for ( i in 1:length(house_test$Heating)){
if((house_test$Heating[i] == "Floor") || (house_test$Heating[i] == "Grav")||
(house_test$Heating[i] == "OthW")|| (house_test$Heating[i] == "Wall"))
{
house_test$Heating[i] = "Others"
}
}
house_test$Heating <- factor(house_test$Heating)
## combine categories of BsmtFinType1 and BsmtFinType2
table(house_test$BsmtFinType1)
house_test$BsmtFinType1 <- as.character(house_test$BsmtFinType1)
for ( i in 1:length(house_test$BsmtFinType1)){
if((house_test$BsmtFinType1[i] == "Unfinished") || (house_test$BsmtFinType1[i] == "Avg. RecRoom")||
(house_test$BsmtFinType1[i] == "Low Quality"))
{
house_test$BsmtFinType1[i] = "LowQuality/Unfinished"
}
}
house_test$BsmtFinType1 <- factor(house_test$BsmtFinType1)
table(house_test$BsmtFinType2)
house_test$BsmtFinType2 <- as.character(house_test$BsmtFinType2)
for ( i in 1:length(house_test$BsmtFinType2)){
if((house_test$BsmtFinType2[i] == "Unfinished") || (house_test$BsmtFinType2[i] == "Avg. RecRoom")||
(house_test$BsmtFinType2[i] == "Low Quality"))
{
house_test$BsmtFinType2[i] = "LowQuality/Unfinished"
}
if((house_test$BsmtFinType2[i] == "Avg. LQ") || (house_test$BsmtFinType2[i] == "Below avg. LQ")||
(house_test$BsmtFinType2[i] == "Good LQ"))
{
house_test$BsmtFinType2[i] = "Good/Avg/BAvg LQ"
}
}
house_test$BsmtFinType2 <- factor(house_test$BsmtFinType2)
## combining categories for house_test$BsmtQual
table(house_test$BsmtQual)
house_test$BsmtQual <- as.character(house_test$BsmtQual)
for ( i in 1:length(house_test$BsmtQual)){
if((house_test$BsmtQual[i] == "Fair") || (house_test$BsmtQual[i] == "Typical"))
{
house_test$BsmtQual[i] = "Fair/Typical"
}
}
house_test$BsmtQual <- as.factor(house_test$BsmtQual)
## combining categories for house_test$BsmtCond
table(house_test$BsmtCond)
house_test$BsmtCond <- as.character(house_test$BsmtCond)
for ( i in 1:length(house_test$BsmtCond)){
if((house_test$BsmtCond[i] == "Poor") || (house_test$BsmtCond[i] == "Fair"))
{
house_test$BsmtCond[i] = "Fair/Poor"
}
}
house_test$BsmtCond <- as.factor(house_test$BsmtCond)
# combining categories for house_test$Foundation
table(house_test$Foundation)
house_test$Foundation <- as.character(house_test$Foundation)
for ( i in 1:length(house_test$Foundation)){
if((house_test$Foundation[i] == "Slab") || (house_test$Foundation[i] == "Stone")||
( house_test$Foundation[i]=="Wood"))
{
house_test$Foundation[i] = "Other"
}
}
house_test$Foundation <- as.factor(house_test$Foundation)
# combining categories for house_test$ExterQual
table(house_test$ExterQual)
house_test$ExterQual <- as.character(house_test$ExterQual)
for ( i in 1:length(house_test$ExterQual)){
if((house_test$ExterQual[i] == "Fair") || (house_test$ExterQual[i] == "Typical"))
{
house_test$ExterQual[i] = "Fair/Typical"
}
}
house_test$ExterQual <- as.factor(house_test$ExterQual)
# combining categories for house_test$ExterCond
table(house_test$ExterCond)
house_test$ExterCond <- as.character(house_test$ExterCond)
for ( i in 1:length(house_test$ExterCond)){
if((house_test$ExterCond[i] == "Fair") || (house_test$ExterCond[i] == "Typical")
||(house_test$ExterCond[i] == "Poor"))
{
house_test$ExterCond[i] = "Fair/Typical/Poor"
}
}
house_test$ExterCond <- as.factor(house_test$ExterCond)
### combining categories for house_test$RoofMatl
table(house_test$RoofMatl)
house_test$RoofMatl <- as.character(house_test$RoofMatl)
for ( i in 1:length(house_test$RoofMatl)){
if((house_test$RoofMatl[i] == "ClyTile") || (house_test$RoofMatl[i] == "Membran")
||(house_test$RoofMatl[i] == "Metal")||(house_test$RoofMatl[i] == "Roll")
||(house_test$RoofMatl[i] == "Tar&Grv")||(house_test$RoofMatl[i] == "WdShake")
||(house_test$RoofMatl[i] == "WdShngl"))
{
house_test$RoofMatl[i] = "Other"
}
}
house_test$RoofMatl <- as.factor(house_test$RoofMatl)
###### combining categories for house_test$RoofStyle
table(house_test$RoofStyle)
house_test$RoofStyle <- as.character(house_test$RoofStyle)
for ( i in 1:length(house_test$RoofStyle)){
if((house_test$RoofStyle[i] == "Flat") || (house_test$RoofStyle[i] == "Gambrel")
||(house_test$RoofStyle[i] == "Mansard")||(house_test$RoofStyle[i] == "Shed"))
{
house_test$RoofStyle[i] = "Other"
}
}
house_test$RoofStyle <- as.factor(house_test$RoofStyle)
house_test$Exterior1st <- as.character(house_test$Exterior1st)
for ( i in 1:length(house_test$Exterior1st)){
if(house_test$Exterior1st[i] == "ImStucc"|| house_test$Exterior1st[i] == "Stone")
{
house_test$Exterior1st[i] = "CemntBd"
}
if(house_test$Exterior1st[i] == "AsphShn"|| house_test$Exterior1st[i] == "BrkComm"
||house_test$Exterior1st[i] == "CBlock" )
{
house_test$Exterior1st[i] = "CBlock"
}
}
house_test$Exterior1st <- as.factor(house_test$Exterior1st)
house_test$Exterior2nd <- as.character(house_test$Exterior2nd)
for ( i in 1:length(house_test$Exterior2nd)){
if(house_test$Exterior2nd[i] == "ImStucc"||house_test$Exterior2nd[i] == "Other")
{
house_test$Exterior2nd[i] = "CmentBd"
}
if(house_test$Exterior2nd[i] == "AsphShn"|| house_test$Exterior2nd[i] == "Brk Cmn"
||house_test$Exterior2nd[i] == "CBlock" )
{
house_test$Exterior2nd[i] = "AsbShng"
}
if(house_test$Exterior2nd[i] == "Stone")
{
house_test$Exterior2nd[i] = "Stucco"
}
}
house_test$Exterior2nd <- as.factor(house_test$Exterior2nd)
##
## changing MiscFeature as per test data labels
house_test$MiscFeature <- as.character(house_test$MiscFeature)
for ( i in 1:length(house_test$MiscFeature)){
if(house_test$MiscFeature[i] == "Gar2"|| house_test$MiscFeature[i] == "TenC")
{
house_test$MiscFeature[i] = "None"
}
if(house_test$MiscFeature[i] == "Othr" )
{
house_test$MiscFeature[i] = "Shed"
}
}
house_test$MiscFeature <- as.factor(house_test$MiscFeature)
##Recombining categories for better prediction
##for MSSubclass 30 and 45 and 180
## 40 and 50
## 190 and 90
##75 and 80 and 85
house_test$MSSubClass <- as.character(house_test$MSSubClass)
for ( i in 1:length(house_test$MSSubClass)){
if(house_test$MSSubClass[i] == "30"||house_test$MSSubClass[i] == "45"
||house_test$MSSubClass[i] == "180")
{
house_test$MSSubClass[i] = "30/45/180"
}
if(house_test$MSSubClass[i] == "75"|| house_test$MSSubClass[i] == "80"
||house_test$MSSubClass[i] == "85" )
{
house_test$MSSubClass[i] = "75/80/85"
}
if(house_test$MSSubClass[i] == "40"||house_test$MSSubClass[i]== "50")
{
house_test$MSSubClass[i] = "40/50"
}
if(house_test$MSSubClass[i] == "190"||house_test$MSSubClass[i]== "90")
{
house_test$MSSubClass[i] = "90/190"
}
}
house_test$MSSubClass <- as.factor(house_test$MSSubClass)
##
## changing MSZoning as per test data labels
house_test$MSZoning <- as.character(house_test$MSZoning)
for ( i in 1:length(house_test$MSZoning)){
if(house_test$MSZoning[i] == "Res. high density"||house_test$MSZoning[i] == "Res. med density" )
{
house_test$MSZoning[i] = "Res. high/med"
}
}
house_test$MSZoning <- as.factor(house_test$MSZoning)
##
## changing LotShape as per test data labels
house_test$LotShape <- as.character(house_test$LotShape)
for ( i in 1:length(house_test$LotShape)){
if(house_test$LotShape[i] == "Irregular(slight)"||house_test$LotShape[i] == "Irregular(Moder)"
|| house_test$LotShape[i] =="Irregular")
{
house_test$LotShape[i] = "Irregular"
}
}
house_test$LotShape <- as.factor(house_test$LotShape)
##
## changing LandContour as per test data labels
house_test$LandContour <- as.character(house_test$LandContour)
for ( i in 1:length(house_test$LandContour)){
if(house_test$LandContour[i] == "HillSide"||house_test$LandContour[i] == "Low")
{
house_test$LandContour[i] = "HillSide/Low"
}
}
house_test$LandContour <- as.factor(house_test$LandContour)
##
## changing LotConfig as per test data labels
house_test$LotConfig <- as.character(house_test$LotConfig)
for ( i in 1:length(house_test$LotConfig)){
if(house_test$LotConfig[i] == "CulDSac"||house_test$LotConfig[i] == "Frontage(3 sides)")
{
house_test$LotConfig[i] = "CulDSac/Frontage3Sides"
}
}
house_test$LotConfig <- as.factor(house_test$LotConfig)
##
## changing LandSlope as per test data labels
house_test$LandSlope <- as.character(house_test$LandSlope)
for ( i in 1:length(house_test$LandSlope)){
if(house_test$LandSlope[i] == "Moderate"||house_test$LandSlope[i] == "Severe")
{
house_test$LandSlope[i] = "Moderate/Severe"
}
}
house_test$LandSlope <- as.factor(house_test$LandSlope)
## changing Condition1 as per test data labels
house_test$Condition1 <- as.character(house_test$Condition1)
for ( i in 1:length(house_test$Condition1)){
if(house_test$Condition1[i] == "RRAe"||house_test$Condition1[i] == "RRAn"
||house_test$Condition1[i] == "RRNe"||house_test$Condition1[i] == "RRNn")
{
house_test$Condition1[i] = "RRA/RRN"
}
if(house_test$Condition1[i] == "PosA"|| house_test$Condition1[i] == "PosN")
{
house_test$Condition1[i] = "PosA/PosN"
}
}
house_test$Condition1 <- as.factor(house_test$Condition1)
## changing BldgType as per test data labels
house_test$BldgType <- as.character(house_test$BldgType)
for ( i in 1:length(house_test$BldgType)){
if(house_test$BldgType[i] == "2fmCon"||house_test$BldgType[i] == "Duplex")
{
house_test$BldgType[i] = "2fmCon/Duplex"
}
}
house_test$BldgType <- as.factor(house_test$BldgType)
## changing HouseStyle as per test data labels
house_test$HouseStyle <- as.character(house_test$HouseStyle)
for ( i in 1:length(house_test$HouseStyle)){
if(house_test$HouseStyle[i] == "1.5Unf"||house_test$HouseStyle[i] == "2.5Unf"
||house_test$HouseStyle[i] == "SFoyer"||house_test$HouseStyle[i] == "SLvl")
{
house_test$HouseStyle[i] = "Others"
}
}
house_test$HouseStyle <- as.factor(house_test$HouseStyle)
## changing Exterior1st as per test data labels
house_test$Exterior1st <- as.character(house_test$Exterior1st)
for ( i in 1:length(house_test$Exterior1st)){
if(house_test$Exterior1st[i] == "AsbShng"||house_test$Exterior1st[i] == "CBlock"
)
{
house_test$Exterior1st[i] = "AsbShng/CBlock"
}
if(house_test$Exterior1st[i] == "WdShing"||house_test$Exterior1st[i] == "Wd Sdng"
)
{
house_test$Exterior1st[i] = "WdShing/Wd Sdng"
}
}
house_test$Exterior1st <- as.factor(house_test$Exterior1st)
## changing Exterior2nd as per test data labels
house_test$Exterior2nd <- as.character(house_test$Exterior2nd)
for ( i in 1:length(house_test$Exterior2nd)){
if(house_test$Exterior2nd[i] == "Stucco"||house_test$Exterior2nd[i] == "Plywood"
)
{
house_test$Exterior2nd[i] = "Stucco/Plywood"
}
if(house_test$Exterior2nd[i] == "BrkFace"||house_test$Exterior2nd[i] == "CmentBd"
)
{
house_test$Exterior2nd[i] = "BrkFace/CmentBd"
}
if(house_test$Exterior2nd[i] == "WdShing"||house_test$Exterior2nd[i] == "Wd Shng"
||house_test$Exterior2nd[i] == "AsbShng")
{
house_test$Exterior2nd[i] = "WdShing/Wd Sdng/AsbShng"
}
}
house_test$Exterior2nd <- as.factor(house_test$Exterior2nd)
## changing MasVnrType as per test data labels
house_test$MasVnrType <- as.character(house_test$MasVnrType)
for ( i in 1:length(house_test$MasVnrType)){
if(house_test$MasVnrType[i] == "None"||house_test$MasVnrType[i] == "BrkCmn")
{
house_test$MasVnrType[i] = "None/BrkCmn"
}
}
house_test$MasVnrType <- as.factor(house_test$MasVnrType)
## changing ExterCond as per test data labels
house_test$ExterCond <- as.character(house_test$ExterCond)
for ( i in 1:length(house_test$ExterCond)){
if(house_test$ExterCond[i] == "Excellent"||house_test$ExterCond[i] == "Fair/Typical/Poor")
{
house_test$ExterCond[i] = "Others"
}
}
house_test$ExterCond <- as.factor(house_test$ExterCond)
|
3276053a53223f06b6c07e39767a377293c8a1ee
|
8aa69c336da13f338e944e586171e8bdf0c3f87a
|
/glmmTMB/man/sigma.glmmTMB.Rd
|
19cf886fa1902174ba512bfc165247b86100e3f4
|
[] |
no_license
|
glmmTMB/glmmTMB
|
4b5612a1cf6ce7567117b3318086fd7b3840e3da
|
0e9d26a02e5e36b74120d8c8a35eae0e0960a73b
|
refs/heads/master
| 2023-08-29T14:39:15.855753
| 2023-08-25T22:28:15
| 2023-08-25T22:28:15
| 40,176,799
| 230
| 63
| null | 2023-09-04T11:10:37
| 2015-08-04T09:53:51
|
HTML
|
UTF-8
|
R
| false
| true
| 3,923
|
rd
|
sigma.glmmTMB.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarCorr.R
\name{sigma.glmmTMB}
\alias{sigma.glmmTMB}
\alias{sigma}
\title{Extract residual standard deviation or dispersion parameter}
\usage{
\method{sigma}{glmmTMB}(object, ...)
}
\arguments{
\item{object}{a \dQuote{glmmTMB} fitted object}
\item{\dots}{(ignored; for method compatibility)}
}
\description{
For Gaussian models, \code{sigma} returns the value of the residual
standard deviation; for other families, it returns the
dispersion parameter, \emph{however it is defined for that
particular family}. See details for each family below.
}
\details{
The value returned varies by family:
\describe{
\item{gaussian}{returns the \emph{maximum likelihood} estimate
of the standard deviation (i.e., smaller than the results of
\code{sigma(lm(...))} by a factor of (n-1)/n)}
\item{nbinom1}{returns a dispersion parameter
(usually denoted \eqn{\alpha}{alpha} as in Hardin and Hilbe (2007)):
such that the variance equals \eqn{\mu(1+\alpha)}{mu(1+alpha)}.}
\item{nbinom2}{returns a dispersion parameter
(usually denoted \eqn{\theta}{theta} or \eqn{k}); in contrast to
most other families, larger \eqn{\theta}{theta} corresponds to a \emph{lower}
variance which is \eqn{\mu(1+\mu/\theta)}{mu(1+mu/theta)}.}
\item{Gamma}{Internally, glmmTMB fits Gamma responses by fitting a mean
and a shape parameter; sigma is estimated as (1/sqrt(shape)),
which will typically be close (but not identical to) that estimated
by \code{stats:::sigma.default}, which uses sqrt(deviance/df.residual)}
\item{beta}{returns the value of \eqn{\phi}{phi},
where the conditional variance is \eqn{\mu(1-\mu)/(1+\phi)}{mu*(1-mu)/(1+phi)}
(i.e., increasing \eqn{\phi}{phi} decreases the variance.)
This parameterization follows Ferrari and Cribari-Neto (2004)
(and the \code{betareg} package):}
\item{betabinomial}{This family uses the same parameterization (governing
the Beta distribution that underlies the binomial probabilities) as \code{beta}.}
\item{genpois}{returns the index of dispersion \eqn{\phi^2}{phi^2},
where the variance is \eqn{\mu\phi^2}{mu*phi^2} (Consul & Famoye 1992)}
\item{compois}{returns the value of \eqn{1/\nu}{1/nu},
When \eqn{\nu=1}{nu=1}, compois is equivalent to the Poisson distribution.
There is no closed form equation for the variance, but
it is approximately undersidpersed when \eqn{1/\nu <1}{1/nu <1}
and approximately oversidpersed when \eqn{1/\nu >1}{1/nu>1}.
In this implementation, \eqn{\mu}{mu} is exactly the mean (Huang 2017), which
differs from the COMPoissonReg package (Sellers & Lotze 2015).}
\item{tweedie}{returns the value of \eqn{\phi}{phi},
where the variance is \eqn{\phi\mu^p}{phi*mu^p}.
The value of \eqn{p} can be extracted using \code{family_params}
}
}
The most commonly used GLM families
(\code{binomial}, \code{poisson}) have fixed dispersion parameters which are
internally ignored.
}
\references{
\itemize{
\item Consul PC, and Famoye F (1992). "Generalized Poisson regression model. Communications in Statistics: Theory and Methods" 21:89–109.
\item Ferrari SLP, Cribari-Neto F (2004). "Beta Regression for Modelling Rates and Proportions." \emph{J. Appl. Stat.} 31(7), 799-815.
\item Hardin JW & Hilbe JM (2007). "Generalized linear models and extensions." Stata press.
\item Huang A (2017). "Mean-parametrized Conway–Maxwell–Poisson regression models for dispersed counts. " \emph{Statistical Modelling} 17(6), 1-22.
\item Sellers K & Lotze T (2015). "COMPoissonReg: Conway-Maxwell Poisson (COM-Poisson) Regression". R package version 0.3.5. https://CRAN.R-project.org/package=COMPoissonReg
}
}
|
35f33b5263b9d0b94df6302ba13c80ab07de5e3d
|
7658adb5dacf17ffdb3badb94218d81315eca3c6
|
/4. Multivariate/4.3 (Matlab-R) Location estimator/4.3 R/R_Multiv_meanmedian.R
|
070b43f87a0a414f81da78757218e512251d1509
|
[] |
no_license
|
saullopezc/curso-atipicos
|
e186fb07192ca61f340e1befaf0576b969c20e87
|
0c4c5cd01c38b7f344909438396b26123afdefb7
|
refs/heads/master
| 2022-11-20T03:48:06.147674
| 2020-07-25T00:47:59
| 2020-07-25T00:47:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 374
|
r
|
R_Multiv_meanmedian.R
|
set.seed(3)
library(MASS)
install.packages("depth")
library(depth)
mu1 <- c(0,0); mu2 <- c(10,0); sigma <- matrix(c(1,0,0,1), nc = 2)
X <- rbind(mvrnorm(80, mu1, sigma), mvrnorm(20, mu2, sigma)) #dimension 100x2
plot(X)
# Mean vector
m=c(mean(X[,1]),mean(X[,2]))
# Component-wise median
med1=c(median(X[,1]),median(X[,2]))
# Spatial median
med2=med(X,method="Spatial")
|
0093c0c9aaafd3f1113d270ca73cd341eda1b4c9
|
d265fd0e263981fd3918711b35514e2eb3c095b5
|
/Exam R 20171020.R
|
dfb3e275b49cd0180c96fb2dc60de9fa06bc250d
|
[] |
no_license
|
hodfa840/Rexam
|
b279ae2b9cfbc8797a61cb4defecf806d515e325
|
a2a15ed2a1a5e1a7aa6db99ff3a7bd0e6a240977
|
refs/heads/master
| 2022-02-13T04:12:06.912418
| 2018-11-23T15:14:17
| 2018-11-23T15:14:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,992
|
r
|
Exam R 20171020.R
|
#1.1
rm(list = ls())
#1.2
f <- function(x){
return(x*x)
}
#1.3
# file "Read-and-delete_me" have some information about package. We should delete it after reading
# file "NAMESPACE"
# file DESCRPTION contains introduction information about the package like name, author, license..
# folder R has R script (.R files) for this package, for example the script for task 1.2
# folder man has .Rd files, which describe information about the R script. This .Rd files generate base on documents in R script.
#2.1
build_mgb <- setRefClass("MGB",
fields = list(
name = "character",
speed = "numeric",
status = "character",
weight = "numeric",
rate = "numeric",
travel_time = "numeric",
history = "data.frame"),
methods = list(
initialize = function(name, speed)
{
#Check input
if(!is.character(name))
stop("Name should be a character!")
if (!is.numeric(speed)) {
stop("Speed should be a numeric!")
}
#Create data frame to storage the run history
history <<- data.frame(matrix(ncol = 5, nrow = 0))
colnames(history) <<- c("name", "speed", "weight", "travel_time", "status")
name <<- name
speed <<- speed
status <<- "in_service"
},
simulate_home_run = function() {
# Check input: sunk or in_service
if (status== "sunk") {
cat(name)
cat(" is reported lost")
}
else {
#Calculate
weight <<- rnorm(1, mean = 40, sd = 2)
rate <<- 1/(weight*900/speed/250)
travel_time <<- rexp(1, rate = rate)
#Check sunk
if (travel_time >10) {
status <<- "sunk"
} #else cat("Success")
#Insert to history
history[nrow(history) +1,] <<- list(name,speed, weight, travel_time, status)
}
return(history)
},
#function that print the history
report = function() {
print(history)
}
)
)
#2.c
MGB_504 <- build_mgb(name="Hopewell",speed=46)
for (i in 1:100) {
history <- MGB_504$simulate_home_run()
}
#function that run 100 times and draw result.
plot = function(history){
library(ggplot2)
times <- c(1:nrow(history))
ggplot(data=history, mapping= aes(y=travel_time, x=times)) +
geom_line() +
labs(title="Line plot of travel time")
}
#3.1
find_max_value <- function(x){
#Check input
if (!is.vector(x))
stop("Input should be a vector!")
if (!is.numeric(x))
stop("Vector should contain number!")
max <- 0
position <- 0
for (i in 1:length(x)) {
if (max <= x[i]) {
max <- x[i]
position <- i
}
}
output <- list("maxvalue"= max, "max_value_position"= position)
return(output)
}
#3.2
#O(x)
#3.3
find_max_value_2 <- function(x){
if (!is.vector(x))
stop("Input should be a vector!")
if (!is.numeric(x))
stop("Vector should contain number!")
max <- 0
position <- 0
#Invert, find min and invert again
x <- -x
max <- min(x)
a <- match(x, max)
position <- tail(which(a == 1),1)
max <- - max
output <- list("maxvalue"= max, "max_value_position"= position)
return(output)
}
#3.4
library(testthat)
context("find_max")
test_that("check wrong input", {
expect_error(a <- find_max_value("example") )
expect_error(a <- find_max_value2("example") )
})
#initial value
x<-c(1,2,3,56,4,56,5)
test1 <- find_max_value(x)
test2 <- find_max_value_2(x)
test_that("return correct value", {
expect_equal(test1[[1]], 56 )
expect_equal(test1[[2]], 6 )
expect_equal(test2[[1]], 56 )
expect_equal(test2[[2]], 6 )
})
|
855a56dda4600aa53f939475f3e5f867a9c42911
|
4c2f5c46b33ab8cb94df18bceb40365c3a1f9c05
|
/man/grpFactor.Rd
|
8469d3c53a63a15f0e97164f10d5847b92fc091c
|
[] |
no_license
|
htc502/ewrefxn
|
bbd11c85d4a04ac8a0e5289859f57234b918ae18
|
0303fb85b22db08c26028e518992fc9f7547d49a
|
refs/heads/master
| 2020-04-15T23:47:01.806327
| 2020-01-29T17:42:20
| 2020-01-29T17:42:20
| 40,641,809
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 541
|
rd
|
grpFactor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/microarray.R
\name{grpFactor}
\alias{grpFactor}
\title{'combine' multiple factors}
\usage{
grpFactor(...)
}
\arguments{
\item{...}{factors to be passed in}
\item{sep}{the separater used for combine the factor levels, with '_' as the default}
}
\value{
a new factor with levels combined from each individual factor
}
\description{
this function will group factors of the same length into a 'combined' one
levels of inidividual factors will be combined together
}
|
1ae5a9478d7f7c8f4eb7c7c4cd40d2e9706f4995
|
06c91835ef96c077e8549e7948c36dc6775fdd52
|
/man/get_bounds.Rd
|
d3a372d82a87903348d8a1bc9c9a6867912fe5d3
|
[] |
no_license
|
sscogges/controlhet
|
3293a7f0ad949108f90bea6278d6d61e231013dc
|
0150cbb9fd2247baecf7513c7ea00db232442dae
|
refs/heads/master
| 2021-01-20T04:53:51.475552
| 2017-08-25T10:32:35
| 2017-08-25T10:32:35
| 101,393,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 660
|
rd
|
get_bounds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multipleversions_code_110316.R
\name{get_bounds}
\alias{get_bounds}
\title{Find upper and lower bounds for P(Y(1) | D(0) = 0, D(1) = 1) and
P(Y(1) | D(0) = 2, D(1) = 1)}
\usage{
get_bounds(Y0, Y1, D0, D1)
}
\arguments{
\item{Y0}{observed outcomes in Z = 0 arm}
\item{Y1}{observed outcomes in Z = 1 arm}
\item{D0}{observed treatment types in D = 0 arm}
\item{D1}{observed treatment types in D = 1 arm}
}
\value{
a 4-element vector of upper and lower bounds estimates
}
\description{
Find upper and lower bounds for P(Y(1) | D(0) = 0, D(1) = 1) and
P(Y(1) | D(0) = 2, D(1) = 1)
}
|
1d3120871a41fb91d3323f26a7f20e48ffd37144
|
8982143e6eff27f3cd338ced46ef545425af1176
|
/iteration/map-multi-args.R
|
27a77c2185271af9976c427dbaf110ae2eb40d91
|
[] |
no_license
|
uroszivanovic/R-for-Data-Science
|
825f493769e3646de15ebced983a4ffa2aee1d45
|
46ccf93a1a3afd958941461b32eea8ae78319267
|
refs/heads/master
| 2022-06-20T18:42:24.850635
| 2020-05-06T16:46:49
| 2020-05-06T16:46:49
| 257,034,087
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 965
|
r
|
map-multi-args.R
|
library(tidyverse)
mu <- list(5, 10, -3)
mu %>%
map(rnorm, n = 5) %>%
str()
sigma <- list(1, 5, 10)
seq_along(mu) %>%
map(~rnorm(5, mu[[.]], sigma[[.]])) %>%
str()
#map2() - iterates over two vectors in parallel:
map2(mu, sigma, rnorm, n = 5) %>% str()
###like map(), map2() is just a wrapper around a for loop:
#map2 <- function(x, y, f, ...) {
#out <- vector("list", length(x))
#for (i in seq_along(x)) {
#out[[i]] <- f(x[[i]], y[[i]], ...)
#}
#out
#}
#pmap() - iterates over more than two vectors in parallel:
n <- list(1, 3, 5)
args1 <- list(n, mu, sigma)
args1 %>%
pmap(rnorm) %>%
str()
#it is better to name the arguments, so the code can be much clearer:
args2 <- list(mean = mu, sd = sigma, n = n)
args2 %>%
pmap(rnorm) %>%
str()
#storing args in df (the best solution once the code gets complicated):
params <- tribble(
~mean, ~sd, ~n,
5, 1, 1,
10, 5, 3,
-3, 10, 5
)
params %>%
pmap(rnorm)
|
937dbb28c12716328c1187a2632b8da752266750
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ubci/examples/ubci_index.Rd.R
|
05e6b245f83b2bab935648e30d2c3c733cf8d5d1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 271
|
r
|
ubci_index.Rd.R
|
library(ubci)
### Name: ubci_index
### Title: get ubci index
### Aliases: ubci_index
### ** Examples
## No test:
index <- ubci_index(index = "UBMI")
index
ubci_index(index = "UBMI", from="2018-05-15")
ubci_index(index = "UBMI", to="2018-05-15")
## End(No test)
|
68949c73c8b68f3d65f8ddedffac6c57d03140ea
|
02011265ec12c28d36953a558e613b23950969a4
|
/cachematrix.R
|
d724d9d5da669aa1e7fd356f693e9c5f53070d92
|
[] |
no_license
|
sankarshanbaliga/ProgrammingAssignment2
|
3201c5f490f1beeb228bd1d1f6defd198c3a7af0
|
c16fe9312dc8649814bd7236ba4204fe4c7e4647
|
refs/heads/master
| 2021-01-14T10:27:19.157159
| 2014-04-26T15:34:11
| 2014-04-26T15:34:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,040
|
r
|
cachematrix.R
|
## These functons will cache of the inverse of a matrix in order to reduce
## time spent on time consuming operations
## This function:
## Sets the value of the matrix
## Gets the value of the matrix
## Sets the value of the inverse
## Gets the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function checks if the inverse has already been calculated. It skips the
## calculation of the inverse in case it is already done
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
63d8ae1df8ea86fe39b1a30d04cf870162d36f66
|
d711652887bcaebd96da64b7a1163dd8f0102c21
|
/parseByBreeding.R
|
e35acaa8c5c905e4a7e859c0b21890fa4586022b
|
[] |
no_license
|
kaiyaprovost/GISProject
|
51d3e14e7d019fb08a0dc9d2bb8e2db6c6f4b277
|
660d1007f3c1de5c3da08acd3317ba29b1927fad
|
refs/heads/master
| 2020-06-14T12:09:11.430868
| 2016-11-29T00:28:28
| 2016-11-29T00:28:28
| 75,026,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,064
|
r
|
parseByBreeding.R
|
library(maptools)
library(rgeos)
path = "/Users/kprovost/Documents/PhylogeographyReview/AllRangeMaps/All/"
outDir = "/Users/kprovost/Documents/PhylogeographyReview/AllRangeMaps/onlyBreeding/"
#setwd(path)
file.names = dir(path, pattern =".shp")
print(file.names[1])
## SKIPPED BECAUSE WEIRD OR NO BREEDING
## Acrosephalus sorghophilis 112
## Calidris ferruginea 1390 -- DID MANUALLY
## Caprimulgus centralasicus 1537
## Hirundo perdita 4363
## Setopagis maculosa 8839
#for (i in 1:10){
for (i in 8840:length(file.names)){
print(i)
nameOnly = substring(file.names[i],1,nchar(file.names[i])-4)
print(nameOnly)
stringShp = paste(path,file.names[i],sep="")
newString = paste(outDir,nameOnly,"_ONLYBREED",sep="") ## leave off the filename for writeShapefile
#print(stringShp)
if(file.exists(paste(newString,".shp",sep=""))){
print(">>>exists")
} else {
## import the shapefile
shp = readShapeSpatial(stringShp,repair=TRUE)
print(summary(shp@data$SEASONAL))
## remove the data from the shapefile when its season is 3-5
shp = shp[shp@data$SEASONAL!=3,]
#print(summary(shp@data$SEASONAL))
shp = shp[shp@data$SEASONAL!=4,]
#print(summary(shp@data$SEASONAL))
shp = shp[shp@data$SEASONAL!=5,]
#print(summary(shp@data$SEASONAL))
#print(shp@data$SEASONAL)
#plot(shp)
## export the shapefile
writeSpatialShape(shp,newString)
}
}
## NOTES FROM PREV RUN ##
## 516 DID NOT WORK CANNOT FIGURE OUT WHY - DID MANUALLY IN Q
## 3376 IS KNOWN BY ONLY ONE SPECIMEN - EXCLUDED
## Ones with no breeding or resident ranges:
## 109: "Acrocephalus_sorghophilus_22714704", only "3" listed, skipped
## 262: "Aimophila_notosticta_pl", empty?
## 1635 "Caprimulgus_centralasicus_22689909" only 5
## 4610 subscript oob "Hirundo_perdita_22712390" only 5
## 7274 Periporphyrus_erythromelas_pl - empry?
## 9242 setopagis maculosa 5 only
## Other issues:
## 1483: Brian's conflicted copy, skipped
## 10177 - Todiramphus recurvirostris, no dbf file, skipped b/c Asian bird
|
5efecabb9930242e2b46e2f9c7ccb0021eb75b94
|
9bfa780023ac5f601556dbabdcf6adffd104e5fd
|
/Kocom/R_Pkgs/installpkgs2.R
|
0bc47f6b28f5d9a0a25c792ad99fb06eca76f762
|
[] |
no_license
|
ljinsup/Kocom
|
31b048a9d0add4b70653c4fc9807561c07b16ad2
|
7449a868f742e493e9546ba8f8fff454c426d9c5
|
refs/heads/master
| 2021-01-21T13:48:07.466157
| 2016-05-30T11:26:49
| 2016-05-30T11:26:49
| 51,070,316
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
installpkgs2.R
|
cat("==========================================================================")
install.packages("RSclient_0.7-2.tar.gz", repos=NULL, type="source")
cat("==========================================================================")
install.packages("CEMS.tar.gz", repos=NULL, type="source")
cat("==========================================================================")
install.packages("shiny_0.13.0.tar.gz", repos=NULL, type="source")
cat("================== R Packages install complete. ==================")
library(CEMS)
|
c0667446deadb3e3cac1978d97d50f03c12e6041
|
de427995e5a749a1c0d6c05cce92e4b771902ad9
|
/R/enumfactor.R
|
eef33851f06ee145a0b91e84137b4baa2b2b1a26
|
[] |
no_license
|
sophof/enumfactor
|
4ee2840ebdcc5a3f9a045d606afbe148bf02762c
|
b984cfa9085c81924fc413b0e6ae0aba9692c010
|
refs/heads/master
| 2020-05-07T15:03:25.743007
| 2019-04-10T17:28:27
| 2019-04-10T17:28:27
| 180,620,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,306
|
r
|
enumfactor.R
|
#' @export
enumfactor <- function(x, ...) UseMethod("enumfactor")
#' @export
enumfactor.default <- function (x = character(),
levels,
labels = levels,
exclude = NA,
nmax = NA,
indices) {
out <- enumfactor.factor(
factor(x, levels, labels, exclude, ordered = FALSE, nmax),
indices)
return(out)
}
#' @export
enumfactor.factor <- function(x, indices){
class(x) <- "enumfactor"
nlev <- nlevels(x)
if(missing(indices)) {
n <- length(nlev)
if(n ==0) indices <- integer()
else indices <- 1:nlev
}
if(!is.integer(indices)) indices <- as.integer(indices)
if (length(indices) != nlev){
msg <- sprintf("number of indices (%d) is not equal to number of levels (%d)",
length(indices),
nlev)
stop(msg)
}
indices(x) <- indices
if (!isTRUE(val <- .valid.enumfactor(x)))
warning(val)
return(x)
}
#' @export
enumfactor.enumfactor <- function(x,
levels,
labels = levels,
exclude = NA,
nmax = NA,
indices){
stop("not yet implemented")
}
|
c84f576cc6e406a1f49fd4143e035ecca0304f0e
|
16e142bfbcc71936637e3a08b8e791d4e6b7b007
|
/man/setttings.Rd
|
bdc12421cd3ca38f05e7dc1985f848f4759c1767
|
[] |
no_license
|
behrica/quanteda
|
c030ad280ac4b2797de3056e799ef1babdd37a82
|
b2daa7485cabe0bdd28ffb69a88283964e74fc26
|
refs/heads/master
| 2020-12-27T15:05:01.930496
| 2014-11-14T23:37:49
| 2014-11-14T23:37:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
rd
|
setttings.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{settingsInitialize}
\alias{settingsInitialize}
\title{\code{settingsInitialize} returns a list of legal settings, set to their default values}
\usage{
settingsInitialize()
}
\description{
\code{settingsInitialize} returns a list of legal settings, set to their default values
}
|
841721d69c3ac1e24354739efb024ab5c328e530
|
1fbac5899858da3a434aacbefd0cbd7b399a58e7
|
/plotting/plot_kobe_timeseries.R
|
5e7153cb9058da8adb0d91cba1e8dbde4461558c
|
[] |
no_license
|
Ovec8hkin/pws-herring-basa
|
f4c3e30871e5ea0f9afb46104c0c930018ea257f
|
caef7da3a6ae71aaf9b23e5e6b54ea1aebbfacaa
|
refs/heads/main
| 2023-04-06T21:51:04.323235
| 2023-01-18T20:37:00
| 2023-01-18T20:37:00
| 488,278,803
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,386
|
r
|
plot_kobe_timeseries.R
|
library(ggplot2)
library(ggdist)
library(tidyverse)
source(paste0(here::here("functions/"), "fun_read_dat.R"))
b.star <- 40000
f.star <- 0.20
total.sims <- 1
set.seed(1998)
sims <- sample(1:1e4, size=total.sims)
nyr <- 10
control.rules <- c("base")
data <- data.frame(year=NA, biomass=NA, exploit=NA, sim=NA, cr=NA)
for(c in control.rules){
for(s in sims){
print(s)
model.dir <- paste0(here::here("model/"))
biomass.estimates <- read.biomass.estimates(model.dir)
exploit.rate.estimates <- read.exploit.rates(model.dir)
biomass.est.rel <- as_tibble(biomass.estimates/b.star) %>% pivot_longer(everything(), "year", values_to="biomass")
exploit.est.rel <- as_tibble(exploit.rate.estimates/f.star) %>% pivot_longer(everything(), "year", values_to="exploit")
d <- biomass.est.rel
d$exploit <- exploit.est.rel$exploit
d$sim <- s
d$cr <- c
data <- rbind(data, d)
}
}
data$year <- as.numeric(data$year)
data.df <- data %>% na.omit() %>%
group_by(year, cr) %>%
summarise(
biomass = median(biomass),
exploit=median(exploit)
) %>%
print(n=10)
rect_dat <- data.frame(panel = c("bottom_left", "top_right",
"bottom_right", "top_left"),
x_min = c(-Inf, 1, 1, -Inf),
x_max = c(1, Inf, Inf, 1), y_min = c(-Inf,1, -Inf, 1),
y_max = c(1, Inf, 1, Inf))
kobe.plot <- ggplot(data.df[data.df$year >= 2022, ])+
geom_rect(data=rect_dat, aes(xmin = x_min, ymin = y_min, xmax = x_max, ymax = y_max, fill = panel))+
geom_point(aes(x=biomass, y=exploit, color=cr), size=3)+
#geom_label_repel(data=data.df[(as.numeric(data.df$year) %% 5)== 0,], aes(x=biomass, y=exploit, label=year), box.padding = 0.35, point.padding = 0.5, max.overlaps=5)+
geom_hline(yintercept=1, size=1)+
geom_vline(xintercept=1, size=1)+
scale_fill_manual(values = c("orange","limegreen", "#FF4000", "#fcd526"))+
coord_cartesian(xlim=c(0, 4), ylim=c(0, 2), expand=FALSE)
for(c in control.rules){
kobe.plot <- kobe.plot + geom_segment(data=data.df[data.df$year >= 2022 & data.df$cr==c,], aes(x=biomass, y=exploit, color=cr, xend=c(tail(biomass, n=-1), NA), yend=c(tail(exploit, n=-1), NA), group=cr), arrow=arrow(length=unit(0.5, "cm")))
}
n.tot <- 5200*total.sims
kobe.plot
kobe.df <- data %>%
mutate(
kobe.color = ifelse(
data$biomass < 1 & data$exploit > 1,
"red",
ifelse(
data$biomass > 1 & data$exploit < 1,
"green",
ifelse(
data$biomass < 1 & data$exploit < 1,
"orange",
"yellow"
)
)
)
) %>%
na.omit() %>%
group_by(year, cr, kobe.color) %>%
summarise(
n=n()
) %>%
mutate(freq=n/n.tot) %>%
mutate(across(kobe.color, factor, levels=c("red", "orange", "yellow", "green")))
ggplot(kobe.df) +
geom_col(aes(x=year, y=freq, fill=kobe.color))+
scale_fill_manual(values=c("red", "orange", "#fedd1f", "limegreen")) +
coord_cartesian(expand=FALSE)+
scale_x_continuous(breaks=as.integer(seq(1980, 2022+nyr+1, length.out=6)))+
labs(x="Year", y="Proportion of Outcomes", title="Kobe Timeseries")+
facet_wrap(~cr, nrow=2)+
theme(
legend.position = "bottom",
panel.spacing.x = unit(0.4, "in")
)
ggsave(paste0(here::here("figures/"), "kobe_timeseries.pdf"), width=6, height=5, dpi=300)
data.df.2 <- data %>% na.omit() %>%
group_by(year, cr) %>%
summarise(
biomass.quants=quantile(biomass, c(0.025, 0.5, 0.975)),
exploit.quants=quantile(exploit, c(0.025, 0.5, 0.975))
) %>%
mutate(perc=c("2.5%", "50%", "97.5%")) %>%
pivot_wider(
names_from = perc,
values_from = c(biomass.quants, exploit.quants)
) %>%
filter(year == max(data$year, na.rm=TRUE)) %>%
print(n=10)
ggplot(data.df.2)+
geom_rect(data=rect_dat, aes(xmin = x_min, ymin = y_min, xmax = x_max, ymax = y_max, fill = panel))+
geom_point(aes(x=`biomass.quants_50%`, y=`exploit.quants_50%`), size=3, color="white")+
geom_errorbar(aes(x=`biomass.quants_50%`, y=`exploit.quants_50%`, ymin=`exploit.quants_2.5%`, ymax=`exploit.quants_97.5%`), width=0.5, size=1, color="white")+
geom_errorbarh(aes(x=`biomass.quants_50%`, y=`exploit.quants_50%`, xmin=`biomass.quants_2.5%`, xmax=`biomass.quants_97.5%`), height=0.1, size=1, color="white")+
gghighlight(use_direct_label = FALSE)+
geom_hline(yintercept=1, size=1)+
geom_vline(xintercept=1, size=1)+
scale_fill_manual(values = c("orange","limegreen", "#FF4000", "#fcd526"))+
coord_cartesian(xlim=c(0, 5), ylim=c(0, 2), expand=FALSE)+
facet_wrap(~cr, nrow=2)+
theme(legend.position = "bottom")
|
8d5acae4a1bf44d125e05fe6dc0375b6c4181ffd
|
894823803baee99ae414f5d74262c8069ff81014
|
/Scripts/redes.R
|
6d553a2742f243cbd03dded980074232974486cb
|
[] |
no_license
|
itsriodejaneiro/capacitacao-jornalistas
|
8df7b5685e9695520e06d4cd98b87fb767dbc262
|
973817fd135f97211b0f3079233c82d41753ade1
|
refs/heads/main
| 2023-06-10T14:12:36.777405
| 2021-06-28T21:31:44
| 2021-06-28T21:31:44
| 380,345,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,630
|
r
|
redes.R
|
rm(list = ls()) # Limpando o "Enviroment"
options(scipen = 999)
paleta_its <- c("#A3248B","#54318C","#255C8E","#00A6B7","#E1F1FD", "#8a8a8a")
# 0. Pacotes -----------------------------------------------------------------
install.packages('igraph')
install.packages('ggraph')
install.packages('readr')
install.packages('tidyverse')
library('igraph')
library('ggraph')
library('readr')
library('tidyverse')
library('ggplot2')
library('scales')
# Leitura das bases ----------------------------------------------------
base <- read_csv("./capacitacao-jornalistas-main/Dados/all_#FechadoComBolsonaro2022_2021-06-14 00:00:00_1623963801.csv",
col_types = cols(.default = "c"))
base_pegabot <- read_csv("./capacitacao-jornalistas-main/Dados/Handles-.fechadocombolsonaro2022-1623963801-resultados.csv")
# Verificar quais perfis são 'bots' (+70%)
base_pegabot <- base_pegabot %>%
mutate(Resultado = cut(x = `Análise Total`,
breaks = c(0,.70, Inf),
labels = c("Baixa Probabilidade",
"Alta Probabilidade"))) %>%
select(`Perfil Twitter`, Resultado)
# Construção da rede de RTs --------------------------------------------
## Vamos passar o tweet por uma regex para extrair o autor do tweet original
# Testando:
str_match('RT @bolsomito_2: @taoquei1 Vem voto auditavel #FechadoComBolsonaro2022',
"(RT|via)((?:[[:blank:]:]\\W*@\\w+))")
# Agora sim, vamos construir uma tabela com os RTs
# indicando o usuário autor do tweet e o usuário que fez o RT
rts <- filter(base, str_detect(tweet, "(RT|via)((?:[[:blank:]:]\\W*@\\w+)+)")) %>% # procura por tweets que tenham RT ou via no texto
select(tweet, username) %>% # seleciona a coluna tweet
mutate(fonte = str_match(tweet, "(RT|via)((?:[[:blank:]:]\\W*@\\w+))")[,3]) %>% # cria uma coluna fonte para indicar a fonte do tweet
mutate(fonte = gsub(" ", "", fonte, fixed = TRUE)) %>% # limpa o espaço em branco da coluna fonte
mutate(username = paste0('@', username)) %>% # aqui é só um ajuste para adicionar '@' ao username, estava sem
select(fonte, username)
head(rts) # mostra as primeiras linhas dessa tabela
# E a partir dessa tabela, construímos a rede:
rt_graph <- graph_from_data_frame(rts, directed = TRUE, vertices = NULL)
gsize(rt_graph) # verificar a qntd de nós
gorder(rt_graph) # verificar a qntd de arestas
summary(rt_graph)
## Legenda do igraph
# D ou U: rede direcionada ou não
# N: named graph - quando os nós tem um atributo nome (no nosso caso sim)
# W: rede com pesos - quando as arestas têm um atributo peso (no nosso caso não)
# B: grede bipartite - quando os nós têm um atributo para especificar seu tipo
# seguido da qntd de nós e de arestas
# Análises exploratórias --------------------------------------------------
# 1) Como é a distribuição de grau nessa rede? #####
# Vamos criar uma tabela com essa info
# a função degree_distribution() já faz esse cálculo
dist_grau <- data.frame(y = degree_distribution(rt_graph), x = 1:length(degree_distribution(rt_graph)))
# Plot do resultado
ggplot(dist_grau) +
geom_segment(aes(x, y, xend=x, yend=0), color="slateblue") +
scale_y_continuous(expand=c(0,0), trans="sqrt") +
labs(x = "Grau",
y = "Densidade (sqrt)",
title = "Distribuição de grau da rede de RTs") +
theme_minimal()
# 2) Quais são os usuários mais centrais #####
# Centralidade de grau de entrada
grau_in <- degree(rt_graph, mode = 'in') # grau de entrada
grau_in <- data.frame(screen_name = names(grau_in),
grau_in = grau_in, row.names = NULL)
top10_in <- grau_in %>%
arrange(-grau_in) %>%
top_n(10); top10_in
# Centralidade de grau de saída
grau_out <- degree(rt_graph, mode = 'out') # grau de SAÍDA
grau_out <- data.frame(screen_name = names(grau_out),
grau_out = grau_out, row.names = NULL)
top10_out <- grau_out %>%
arrange(-grau_out) %>%
top_n(10); top10_out
# Centralidade de grau de intermediação
bet <- betweenness(rt_graph, directed = F, weights = NA, normalized = T)
bet <- data.frame(screen_name = names(bet),
bet = bet, row.names = NULL)
top10_bet <- bet %>%
arrange(-bet) %>%
top_n(10); top10_bet
# 3) Quantos componentes tem a rede #####
componentes <- components(rt_graph) # ou clusters(rt_graph)
componentes[3]
rt_graph_dg <- decompose.graph(rt_graph)
rt_graph_1 <- igraph::simplify(rt_graph_dg[[1]])
# 4) Qual o volume de interações envolvendo ao menos um 'bot' #####
# Filtrar todos os nós que são bots
nos_bots <- base_pegabot %>%
filter(Resultado == 'Alta Probabilidade')
# Filtrar interações de envolvam algum bot (na fonte ou username)
interacoes_bots <- rts %>%
filter(fonte %in% nos_bots$`Perfil Twitter` |
username %in% nos_bots$`Perfil Twitter`)
nrow(interacoes_bots) # número de interações
percent(nrow(interacoes_bots)/nrow(rts)) # porcentagem em relação ao total
# 5) Quantos 'bots' interagiram com pelo menos um dos usuários mais centrais #####
# primeiro temos que reunir uma lista desses usuários mais centrais
usuarios_centrais <- c(top10_in$screen_name,
top10_out$screen_name,
top10_bet$screen_name) %>%
unique()
# como no item 4, filtramos as interacoes que envolvem bots
# mas tb filtramos a partir disso, as interacoes que envolvem os usuarios centrais
interacoes_centrais_bots <- rts %>%
filter(fonte %in% nos_bots$`Perfil Twitter` |
username %in% nos_bots$`Perfil Twitter`) %>%
filter(fonte %in% usuarios_centrais |
username %in% usuarios_centrais)
nrow(interacoes_centrais_bots) # número de interações
percent(nrow(interacoes_centrais_bots)/nrow(rts)) # porcentagem em relação ao total
# 6) Pelo menos um plot da rede... #####
# Como é uma rede grande para plotar por aqui, vamos fazer alguns ajutes:
# Ajustar o tamanho dos nós no grafo. Mínimo = 50 e máximo = ao valor do nó com maior grau
V(rt_graph_1)$node_size <- unname(ifelse(degree(rt_graph_1)[V(rt_graph_1)] > 50, degree(rt_graph_1), 50))
# Ajustar a cor. Nós com grau até 50 ficam em azul, maior que 50 ficam em rosa
V(rt_graph_1)$color <- unname(ifelse(degree(rt_graph_1)[V(rt_graph_1)] > 50, paleta_its[1], paleta_its[3]))
# E plotar
ggraph(rt_graph_1, layout = 'stress') +
geom_edge_link0(edge_colour = paleta_its[6], edge_width = 0.1, edge_alpha = 0.5)+
geom_node_point(aes(size = V(rt_graph_1)$node_size),
color = V(rt_graph_1)$color, alpha = .7) +
coord_fixed() +
theme_graph() +
theme(legend.position="none")
|
f19ebf1826c8b19dccaf295e18da2d16ade3b069
|
f166b8646ba0fe35cf8e4243ce3157620c2ddfe8
|
/annotation_liftOver_hg19tohg38.R
|
f70a7b6f240725ddad5a551221b0533d85e40d30
|
[] |
no_license
|
ding-lab/cptac_methylation
|
0214a5a3921743fd5aa7707782cca22b263cefb8
|
6b3933cac40f8738dbd5bbd760cf714227fe8e95
|
refs/heads/master
| 2022-10-13T06:41:28.318746
| 2021-01-21T21:06:29
| 2021-01-21T21:06:29
| 135,501,473
| 8
| 5
| null | 2022-09-01T16:47:42
| 2018-05-30T21:55:42
|
R
|
UTF-8
|
R
| false
| false
| 3,731
|
r
|
annotation_liftOver_hg19tohg38.R
|
##########################################################################################
### author: Sunantha Sethuraman <s.sethuraman@wustl.edu>
### usage: Rscript annotation_liftOver_hg19tohg38.R
### created: 08/07/2018
### description: Lifts over the annotation file for methylation data from hg19 to hg38
#########################################################################################
## ----Load libraries---------------------------------------------------------------------
library(rtracklayer)
library(GenomicRanges)
library(IlluminaHumanMethylationEPICanno.ilm10b2.hg19)
library(IlluminaHumanMethylationEPICmanifest)
## ----Fetch chain information------------------------------------------------------------
ch <- import.chain("hg19ToHg38.over.chain")
###----Define a new function--------------------------------------------------------------
grtodf <- function(gr) {
df <- data.frame(chromosome=seqnames(gr),
starts=start(gr),
ends=end(gr),
Locus=names(gr),
strands=strand(gr),
stringsAsFactors=F)
return(df)
}
## ----Read annotation and identify columns that need lift over---------------------------
annEPIC= as.data.frame(getAnnotation(IlluminaHumanMethylationEPICanno.ilm10b2.hg19))
annEPIC$Locus <- rownames(annEPIC)
no_coord <- c("Name", "Probe_rs", "Probe_maf", "CpG_rs", "CpG_maf", "SBE_rs", "SBE_maf",
"Relation_to_Island", "UCSC_RefGene_Group", "DMR", "X450k_Enhancer",
"Regulatory_Feature_Group", "GencodeBasicV12_NAME", "GencodeBasicV12_Accession",
"GencodeBasicV12_Group", "GencodeCompV12_NAME", "GencodeCompV12_Accession",
"GencodeCompV12_Group", "DNase_Hypersensitivity_Evidence_Count", "OpenChromatin_Evidence_Count",
"TFBS_Evidence_Count", "Methyl27_Loci", "Methyl450_Loci", "Random_Loci")
annEPIC_nocoord <- annEPIC[,c("Locus", no_coord)]
## ----Process all coordinate containing columns------------------------------------------
# Process the first few columns
primary <- annEPIC[,c("Locus", "chr", "pos", "pos", "strand")]
colnames(primary) <- c("Locus", "Chromosome", "Start", "End", "Strand")
temp <- makeGRangesFromDataFrame(primary)
names(temp) <- primary$Locus
genome(temp) <- "hg19"
seqlevelsStyle(temp) <- "UCSC"
temp <- unlist(liftOver(temp, ch))
temp <- grtodf(temp)
primary <- temp[,c("chromosome", "starts", "Locus", "strands")]
colnames(primary) <- c("chr", "pos", "Locus", "strand")
# Process other columns with coordinate information
coord_cols <- c("Islands_Name", "Phantom4_Enhancers", "Phantom5_Enhancers", "HMM_Island",
"Regulatory_Feature_Name", "DNase_Hypersensitivity_NAME", "OpenChromatin_NAME", "TFBS_NAME")
liftcols <- function(column) {
temp <- annEPIC[,c("Locus", "chr", column, "strand")]
prefix <- gsub(":.*$", "", temp[,column])
names(prefix) <- temp[,"Locus"]
prefix <- prefix[prefix != ""]
temp[,column] <- gsub("^.*:", "", temp[,column])
temp$Start <- gsub("-.*$", "", temp[,column])
temp$End <- gsub("^.*-", "", temp[,column])
temp <- temp[temp$Start != "",]
temp <- makeGRangesFromDataFrame(temp)
names(temp) <- names(prefix)
genome(temp) <- "hg19"
seqlevelsStyle(temp) <- "UCSC"
temp <- unlist(liftOver(temp, ch))
temp <- grtodf(temp)
prefix <- prefix[temp$Locus]
temp$column <- paste0(prefix, ":", temp$starts, "-", temp$ends)
temp <- temp[,c("Locus", "column")]
colnames(temp)[2] <- column
return(temp)
}
lifted <- lapply(coord_cols, liftcols)
## ----Merge dataframes progressively-----------------------------------------------------
ann <- merge(primary, annEPIC_nocoord, by = "Locus", all.x=TRUE)
for(i in 1:length(lifted)) {
ann <- merge(ann, lifted[[i]], by = "Locus", all.x=TRUE)
}
write.table(ann, "annotation_liftedOver_hg38.txt", row.names=F, sep="\t", quote=F)
|
a4d9bf4a13def3d635a1ee5ef1f16bd732cb6684
|
fe803e1beb20030c97b7e8ccde2ba58af2e58173
|
/bayesian-data-analysis/stan/chapter3/timeseries.r
|
6b03f28ec08d3951ff14c7a4f38a8b02b0aed8c3
|
[] |
no_license
|
yc3356/courses
|
0c9c1d9fa063a0419fee2c8b664bf768492f9d4e
|
a1902818b5d4b2f78a9880774295144374f84eac
|
refs/heads/master
| 2022-08-01T10:18:29.076698
| 2020-05-13T06:59:46
| 2020-05-13T06:59:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 729
|
r
|
timeseries.r
|
series <- matrix(scan("Series1000.txt"), nrow=1000, ncol=135, byrow=TRUE)
T <- 135
N <- 1000
par(mar=c(3,3,2,0), tck=-.01, mgp=c(1.5,.5,0))
plot(c(1,T), range(series), bty="l", type="n", xlab="Time", ylab="series")
for (n in 1:N){
lines(1:T, series[n,], lwd=.5)
}
slope <- rep(NA, N)
se <- rep(NA, N)
for (n in 1:N){
data <- series[n,]
time <- 1:T
fit <- lm(data ~ time)
slope[n] <- 100*coef(fit)[2]
se[n] <- 100*se.coef(fit)[2]
}
plot(slope, se, bty="l", xlab="Slope", ylab="SE", pch=20)
hist(slope, xlab="Slope", breaks=seq(floor(10*min(slope)), ceiling(10*max(slope)))/10)
# fit a mixture model
y <- slope
K <- 3
mu <- c(0,-1,1)
data <- list(y=y, K=K, mu=mu)
fit <- stan("timeseries.stan", data=data)
print(fit)
|
721f9e052f03b33825fea4fa9d909bb3e064dbea
|
0819482cde73b1436a2721b1e61679f34b8e9ebf
|
/section04/exercise4_6.R
|
50fd01dabfc523ade8e148eb55fe33e154e10ccb
|
[] |
no_license
|
YeseniaDS/UT-Austin-SDS383-Statistical-Modeling-II
|
575da6e0498a1c78be902ece62e680b9465fb540
|
d78e200060dad6c7c10801daadee82366e91bb65
|
refs/heads/master
| 2023-06-08T05:12:37.399033
| 2018-04-29T05:49:48
| 2018-04-29T05:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,435
|
r
|
exercise4_6.R
|
rm(list = ls())
setwd("D:/2018 UT Austin/R Code/Statistical Modeling II/section04")
library(MASS)
#---------------------------------------------------------------------
faithful = read.csv("faithful.csv")
x <- faithful$waiting; y <- scale(faithful$eruptions)
n <- length(x)
#---------------------------------------------------------------------
# define the squared exponential function
sqexp <- function(x, alpha = 1, l = 1, sigma = 0.01) {
kernel <- alpha^2 * exp(-0.5 / (l^2) * as.matrix(dist(x, upper=T, diag=T)^2))
K.y <- kernel + sigma^2 * diag(n)
return(K.y)
}
#---------------------------------------------------------------------
# define the objective function (negative log-likelihood)
loglike<- function(hypa, x, y) {
# covareiance matrix of y
K <- sqexp(x, alpha = hypa[1], l = hypa[2], sigma = hypa[3])
K.inv <- solve(K)
# return negative log-likelihood
nll <- as.numeric(0.5*t(y) %*% K.inv %*% y +
0.5*determinant(K,logarithm=T)$modulus) + 0.5*n*log(2*pi)
return(nll)
}
#---------------------------------------------------------------------
# initialize hyperparameters and optimize
optimal <- optim(par = c(1, 3, 1), fn = loglike,
gr = NULL, method ="L-BFGS-B", x, y)
print(optimal)
#---------------------------------------------------------------------
# plot the fit
# see updated R script exercise4_5
|
b9e5ad3eedb439997a3a0297929105166c9e3746
|
f0a9139920d61bdb5b814dfe6e315de0e045292f
|
/Monthly counts.R
|
1d8e8008d7b66c0537c0a638b08a635ef920ca82
|
[] |
no_license
|
mzkaramat/BillPrecitor
|
bae5dd0a3a7e0361d8f22bba69e6741ab47d0574
|
83e19c5c55628fc6fac0315446c954b95c26ef29
|
refs/heads/master
| 2021-01-20T05:25:19.436769
| 2017-04-30T13:05:42
| 2017-04-30T13:05:42
| 89,777,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,480
|
r
|
Monthly counts.R
|
library(plotly)
require("RPostgreSQL")
library(plotly)
pw <- {
"G7iFeMJnkukW"
}
# loads the PostgreSQL driver
drv <- dbDriver("PostgreSQL")
# creates a connection to the postgres database
# note that "con" will be used later in each connection to the database
con <- dbConnect(drv, dbname = "congress",
host = "192.168.128.193", port = 5431,
user = "user04", password = pw)
sql_command<- "SELECT c.name, to_char(a.action_date, 'YYYY-MM') dt, count(*) cnt FROM actions a, committees c WHERE a.committee = c.id AND a.type = 'IntroReferral' AND c.name IN ( SELECT name FROM ( SELECT c.name, count(*) cnt FROM actions a, committees c WHERE a.committee = c.id AND a.type = 'IntroReferral' GROUP BY c.name ORDER BY cnt DESC LIMIT 5) tb) GROUP BY c.name, to_char(a.action_date, 'YYYY-MM') order by dt , cnt desc"
bills<-dbGetQuery(con, sql_command)
bills<- bills[,c(2,1,3)]
cr <- xtabs(cnt~.,bills)
cr$dt<- rownames(cr)
cr <- as.data.frame.matrix(cr)
p <- plot_ly(cr, x = ~dt, y = ~cr[,1], name = colnames(cr)[1], type = 'scatter', mode = 'lines') %>%
add_trace(y = ~cr[,2], name = colnames(cr)[2], mode = 'lines') %>%
add_trace(y = ~cr[,3], name = colnames(cr)[3], mode = 'lines') %>%
add_trace(y = ~cr[,4], name = colnames(cr)[4], mode = 'lines') %>%
add_trace(y = ~cr[,5], name = colnames(cr)[5], mode = 'lines')
p
|
6f8e8b75b61f63c5da92aae4cac63e7c99543f1b
|
e3d3bfacc366e44e695db121189b81d1807df552
|
/man/XLgeneric.Rd
|
5e70cfcabc0de2f97a555ecf76c41ce68b8b650d
|
[] |
no_license
|
cran/table1xls
|
0118cd99362b34714ac7e70f5aab8208f67049f3
|
304833adabec176fc3d8b10c8c33143f2c836809
|
refs/heads/master
| 2021-01-17T07:40:08.188080
| 2017-07-26T21:04:36
| 2017-07-26T21:04:36
| 17,700,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,647
|
rd
|
XLgeneric.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/XLgeneric.r
\name{XLgeneric}
\alias{XLgeneric}
\title{Write generic rectangular data to a spreadsheet}
\usage{
XLgeneric(wb, sheet, dataset, title = NULL, addRownames = FALSE,
rowNames = rownames(dataset), rowTitle = "Name", colNames = NULL,
row1 = 1, col1 = 1, purge = FALSE)
}
\arguments{
\item{wb}{a \code{\link[XLConnect]{workbook-class}} object}
\item{sheet}{numeric or character: a worksheet name (character) or position (numeric) within \code{wb}.}
\item{dataset}{the rectangular structure to be written. Can be a data frame, table, matrix or similar.}
\item{title}{character: an optional overall title to the table. Default (\code{NULL}) is no title.}
\item{addRownames}{logical: should a column of row names be added to the left of the structure? (default \code{FALSE})}
\item{rowNames}{character: vector of row names. Default \code{rownames(dataset)}, but relevant only if \code{addRownames=TRUE}.}
\item{rowTitle}{character: the title to be placed above the row name column (default "Name")}
\item{colNames}{character: vector of column names to replace the original ones. Default \code{NULL}, meaning that the original names are left intact. Note that the title for the row-names column (if \code{addRownames=TRUE}) is \emph{not} considered part of \code{colNames}, and is set separately.}
\item{row1, col1}{numeric: the first row and column occupied by the output.}
\item{purge}{logical: should \code{sheet} be created anew, by first removing the previous copy if it exists? (default \code{FALSE})}
}
\value{
The function returns invisibly, after writing the data into \code{sheet} and saving the file.
}
\description{
Export a generic data frame, matrix or table to a spreadsheet and save the file.
}
\details{
This function is a convenience wrapper for getting practically any rectangular data structure into a spreadsheet, without worrying about conversion or spreadsheet-writing technicalities.
If the structure is not a data frame (or inherited from one), but a table or matrix, the function will convert it into one using \code{\link{as.data.frame.matrix}}, because data frames are what the underlying function \code{\link{writeWorksheet}} can export.
See the \code{\link{XLtwoWay}} help page, for behavior regarding new-sheet creation, overwriting, etc.
}
\examples{
t1<-XLwriteOpen("generic1.xls")
### Just a meaningless matrix; function converts to data.frame and exports.
XLgeneric(t1,"s1",matrix(1:4,nrow=2))
### Now adding row names, title, etc. Note adding the title shifts the table one row down.
XLgeneric(t1,"s1",matrix(1:4,nrow=2),col1=5,addRownames=TRUE,
title="Another Meaningless Table",rowTitle="What?",
rowNames=c("Hey","You!"))
###... and now adding some text
XLaddText(t1,"s1","You can also add text here...",row1=10)
XLaddText(t1,"s1","...or here.",row1=11,col1=8)
XLaddText(t1,"s2",
"Adding text to a new sheet name will create that sheet!"
,row1=2,col1=2)
### A more complicated example, showing how a "flattened" 3-way table might be exported:
carnames=paste(rep(c(4,6,8),each=2),"cylinders",rep(c("automatic","manual"),3))
XLgeneric(t1,'cars',ftable(mtcars$cyl,mtcars$vs,mtcars$am),
addRownames=TRUE,rowNames=carnames,rowTitle="Engine Type",colNames=c("S","V"))
cat("Look for",paste(getwd(),"generic1.xls",sep='/'),"to see the results!\\n")
}
\seealso{
For two-way contingency tables, see \code{\link{XLtwoWay}}.
}
\author{
Assaf P. Oron \code{<assaf.oron.at.seattlechildrens.org>}
}
|
ab4c36772bbf1654d1f16af29c9f36039516ec26
|
d2f14956ce85d338da1fa41cd16a1f5f472f1c88
|
/ui.R
|
2b643331e6609efb639adee293fbdfadefd7fecb
|
[] |
no_license
|
BLKhoo/developing-data-products
|
bf18ff8a94134f598cb153fc589660523af98a33
|
c714672f6aae18812a36c273858ae30cbc78a9b5
|
refs/heads/master
| 2016-09-01T07:46:20.120816
| 2015-09-27T17:18:06
| 2015-09-27T17:18:06
| 43,255,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,314
|
r
|
ui.R
|
# Application to determine the low enough price based on stock CAGR and lowest point in the growth trend line based on linear
# regression and also the log price assume stock price over time would have 2^t growth where t is the time.
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("What should be a low enough price considering Stock fundamental CAGR?"),
sidebarPanel(
h5("Please allow some time for the server to response due to networklatency "),
textInput('stk',"Update valid Stock code:","IBM"),
#numericInput('id1', 'offset LM line',-1,min=-10,max=10,step = 1),
h5(" It is assumed here that stock price reflects fundamental and more importantly the sentiments and market psychology which cause the gyrations.
The general trend could be considered as the fundamental CAGR which can be positive or negative. Is is safer to invest in POSITIVE CAGR stock"),
h5(" The log plot on the right shows the plot of log price over time with a LM line , considered as the compounded annual growth of the stock,.
Adjusting the offset to bring the lower black line to touch lowest price points (at least 2 points) would indicate the lowest price point to invest , resulting in higher probability of upside or gain"),
h5("The red lm line represents the neutral CAGR or fair CAGR. Any price below would provide more discount and the lower the better for long term positive CAGR stock"),
sliderInput("id1","Enter offset:",value=0,min=0,max=4,step=0.1),
# dateInput("date", "Date:"),
submitButton("Submit"),
h6("DISCLAMER: Do not base any investment and or trading decision solely on the information provided here.
We accept no liability if you use the information to form trading or
investing decisions or to trade real money. You should seek your own investment
advice before making any decision based on the information from a licensed financial
professional who will consider your personal objectives and circumstances.
Note that data source is obtained from yahoo & subject to availability and accuracy of the data as provided")
),
mainPanel(
h4( "Stock /Date / Value /CAGR"),
verbatimTextOutput("ostk"),
plotOutput('stkPlot'),
plotOutput('stkPlot2')
)
))
|
a101ff8676d735f391753916c814f56e20ef1b50
|
42b641dd7b2c00d6448c7d7cb0ffedd5ee37124a
|
/scripts/R-scripts/scale_analysis_CT.R
|
1d18ed913c4f4cd6ff3f52d6a81791ecf4a8f894
|
[] |
no_license
|
hurlbertlab/core-transient
|
2f1c8366912f6f4aec80f821585b3e196265af29
|
56b8cbb1f9049f4dd4a46f16b4d3944f6600b71a
|
refs/heads/master
| 2021-01-23T14:46:51.972712
| 2020-06-09T14:35:50
| 2020-06-09T14:35:50
| 17,714,944
| 6
| 1
| null | 2016-01-25T18:32:24
| 2014-03-13T15:28:18
|
R
|
UTF-8
|
R
| false
| false
| 9,014
|
r
|
scale_analysis_CT.R
|
# scale analysis function
# Load libraries:
library(stringr)
library(ggplot2)
library(grid)
library(gridExtra)
library(MASS)
library(dplyr)
library(tidyr)
library(lme4)
library(plyr)
# Source the functions file:
source('scripts/R-scripts/core-transient_functions.R')
getwd()
# Set your working directory to be in the home of the core-transient repository
# e.g., setwd('C:/git/core-transient')
# Min number of time samples required
minNTime = 6
# Min number of species required
minSpRich = 10
# Ultimately, the largest number of spatial and
# temporal subsamples will be chosen to characterize
# an assemblage such that at least this fraction
# of site-years will be represented.
topFractionSites = 0.5
dataformattingtable = read.csv('data_formatting_table.csv', header = T)
datasetIDs = filter(dataformattingtable, spatial_scale_variable == 'Y',
format_flag == 1)$dataset_ID
datasetIDs = datasetIDs[datasetIDs != c(1,317)] #dropped 317 bc ended up only being one spatial grain
summ = read.csv('output/tabular_data/core-transient_summary.csv', header=T)
grainlevels = c()
#function(datasetID, dataDescription) {
for(datasetID in datasetIDs){
print(datasetID)
dataset7 = read.csv(paste('data/formatted_datasets/dataset_', datasetID, '.csv', sep = ''))
dataDescription = subset(read.csv("data_formatting_table.csv"),dataset_ID == datasetID)
spatialgrains = dataDescription$Raw_siteUnit
spatialgrains = as.character(spatialgrains)
spatialgrains = unlist(strsplit(spatialgrains, '_'))
#spatialgrains = spatialgrains[length(spatialgrains):1] #reversing order to be from small to large
#spatialgrains = c(spatialgrains, maxGrain)
spatialgrain = c()
grainLevel = 1
for (sg in spatialgrains) {
spatialgrain = paste(spatialgrain, sg, sep = "_")
if (substr(spatialgrain, 1, 1) == "_") {
sGrain = substring(spatialgrain, 2, nchar(spatialgrain))
} else {
sGrain = spatialgrain
}
print(sGrain)
tGrain = "year"
if (nchar(as.character(dataset7$date[1])) > 4|is.na(dataset7$date[1])){
dataset7$date = as.POSIXct(strptime(as.character(dataset7$date), format = "%Y-%m-%d"))
}
richnessYearsTest = richnessYearSubsetFun(dataset7, spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime,
minSpRich = minSpRich,
dataDescription)
if(class(richnessYearsTest) == "character"){
goodSites = 0
break
}else
goodSites <- unique(richnessYearsTest$analysisSite)
uniqueSites = unique(dataset7$site)
fullGoodSites = c()
for (s in goodSites) {
tmp = as.character(uniqueSites[grepl(paste(s, "_", sep = ""), paste(uniqueSites, "_", sep = ""))])
fullGoodSites = c(fullGoodSites, tmp)
}
dataset8 = subset(dataset7, site %in% fullGoodSites)
if(goodSites == 0){
subsettedData = dataset7
}else{
subsettedData = subsetDataFun(dataset8,
datasetID, spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime, minSpRich = minSpRich,
proportionalThreshold = topFractionSites,
dataDescription)
writePropOccSiteSummary(subsettedData$data, spatialGrainAnalysis = TRUE, grainLevel = grainLevel)}
print(grainLevel)
grainLevel = grainLevel + 1
} # end of spatial grain loop
grainlevels = rbind(grainlevels, c(datasetID, grainLevel-1))
} # end dataset loop
grainlevels = data.frame(grainlevels)
colnames(grainlevels) = c("datasetID", "NumGrains")
write.csv(grainlevels, "output/tabular_data/grainlevel.csv", row.names=FALSE)
# Merge all output files into 1 file
#grainlevels = read.csv("output/tabular_data/grainlevels.csv", header = TRUE)
files = list.files("data/spatialGrainAnalysis/propOcc_datasets")
bigfile = c()
#scale = c()
for(file in files){
nfile= read.csv(paste("data/spatialGrainAnalysis/propOcc_datasets/", file, sep = ""), header=TRUE)
scale = substring(file, 18,last = 18)
bigfile = rbind(bigfile, nfile)
#scale=rbind(scale, unique(bigfile$datasetID))
}
bigfile=data.frame(bigfile)
#scale = data.frame(scale)
bigfile_taxa = merge(bigfile, dataformattingtable[,c('dataset_ID', 'taxa')], by.x = 'datasetID', by.y = "dataset_ID")
#biggile_scale= merge(bigfile, dataformattingtable[,c('dataset_ID', 'taxa')], )
write.csv(bigfile_taxa, "output/tabular_data/propOcc_w_taxa.csv", row.names=FALSE)
##### If just running analysis #####
propOcc_w_taxa = read.csv("output/tabular_data/propOcc_w_taxa.csv", header = TRUE) # read in file if not running whole code
# rbind site_summary files
summfiles = list.files("data/spatialGrainAnalysis/siteSummaries")
allsummaries = c()
for(file in summfiles){
nfile= read.csv(paste("data/spatialGrainAnalysis/siteSummaries/", file, sep = ""), header= TRUE)
nfile$scale = as.numeric(substring(file, 22,last = 22))
nfile$site = as.factor(nfile$site)
allsummaries = rbind(allsummaries, nfile)
}
allsummaries = data.frame(allsummaries)
# rbind propOcc files
propOccfiles = list.files("data/spatialGrainAnalysis/propOcc_datasets")
allpropOcc = c()
for(file in propOccfiles){
nfile= read.csv(paste("data/spatialGrainAnalysis/propOcc_datasets/", file, sep = ""), header= TRUE)
nfile$scale = as.numeric(substring(file, 18,last = 18))
nfile$site = as.factor(nfile$site)
allpropOcc = rbind(allpropOcc, nfile)
}
allpropOcc = data.frame(allpropOcc)
# count up spRich with and without transients (for Fig 4)
notransrich = allpropOcc %>% filter(propOcc > 1/3) %>% dplyr::count(datasetID, site, scale)
write.csv(notransrich, "output/tabular_data/notransrich.csv", row.names = FALSE)
allrich = allpropOcc %>% dplyr::count(datasetID, site, scale)
write.csv(allrich, "output/tabular_data/allrich.csv", row.names = FALSE)
# Summary statistics by datasetID/site, i.e. mean occupancy, % transient species (<=1/3)
summaries_taxa = merge(allsummaries, dataformattingtable[,c("dataset_ID","taxa","Raw_spatial_grain", "Raw_spatial_grain_unit")], by.x = 'datasetID', by.y = "dataset_ID", all.x=TRUE)
# summaries_taxa = summaries_taxa[! datasetID %in% c(207, 210, 217, 218, 222, 223, 225, 238, 241,258, 282, 322, 280,317)]
#write.csv(summaries_taxa, "output/tabular_data/summaries_grains_w_taxa.csv", row.names=FALSE)
#summaries_taxa = read.csv("output/summaries_grains_w_taxa.csv", header = TRUE) # read in file if not running whole code
# merge in conversion table
conversion_table = read.csv("output/tabular_data/conversion_table.csv", header =TRUE)
summaries_grains_w_taxa = merge(summaries_taxa, conversion_table, by.x = "Raw_spatial_grain_unit", by.y = "intl_units")
mean_occ_by_site = propOcc_w_taxa %>%
group_by(datasetID, site) %>%
dplyr::summarize(meanOcc = mean(propOcc),
pctTrans = sum(propOcc <= 1/3)/n(),
pctCore = sum(propOcc > 2/3)/n(),
pctNeither = 1-(pctTrans + pctCore))
occ_taxa = merge(mean_occ_by_site, summaries_grains_w_taxa, by = c("datasetID", "site"))
occ_taxa = occ_taxa[order(occ_taxa$datasetID, occ_taxa$scale, occ_taxa$site, decreasing = F), ]
write.csv(occ_taxa,"output/tabular_data/occ_taxa.csv", row.names=FALSE)
# Calculating number of core, trans, and total spp for each dataset/site combo
propOcc_demog = merge(propOcc_w_taxa, occ_taxa, by = c("datasetID", "site"))
propOcc_w_taxa$spptally = 1
totalspp = propOcc_w_taxa %>% group_by(datasetID, site) %>% tally(spptally)
numCT= propOcc_w_taxa %>% group_by(datasetID, site) %>%
dplyr::summarize(numTrans33 = sum(propOcc <= 1/3), #33%
numTrans25 = sum(propOcc <= 1/4), #25%
numTrans10 = sum(propOcc <= 1/10), #10%
numCore=sum(propOcc > 2/3),
n = sum(spptally),
perTrans33 = sum(propOcc <= 1/3)/n, #33%
perTrans25 = sum(propOcc <= 1/4)/n, #25%
perTrans10 = sum(propOcc <= 1/10)/n, #10%
meanOcc = mean(propOcc, na.rm = T))
numCT = merge(propOcc_w_taxa[,c("datasetID", "site", "taxa")], numCT, by= c("datasetID", "site"))
write.csv(numCT,"output/tabular_data/numCT.csv", row.names=FALSE)
spptotals = merge(totalspp, numCT, by= c("datasetID", "site"))
# for each dset - the propocc as response and the # of grain levels, community size, and random effect of taxa would be the predictor variables
taxorder = c('Bird', 'Plant', 'Mammal', 'Fish', 'Arthropod', 'Benthos', 'Plankton', 'Invertebrate')
col.palette=c("blue","green", "purple", "light blue","gold", "dark blue", "red", "dark green")
taxcolors = data.frame(taxa = taxorder, color = col.palette)
# calc area at ALL scales
# summaries_grains_w_taxa
# our model
mod1 = lmer(pctTrans ~ (1|taxa) * scale, data=occ_taxa)
|
fe87e4a9ade9062263058a64308f11f6a351f92f
|
c31f112a1fb43dd4fd3c808e905600cfec50bec3
|
/app_server.R
|
bed4c7217f40bdfc6b04a3b4c0ecc5e6cf25048f
|
[] |
no_license
|
bmetsker/finalproject_info201
|
20d32691a4714bce0ea7d349b1efc63865e51874
|
a33f2a8c4a02327bddffdec26adb7b6428f73d9e
|
refs/heads/master
| 2020-08-29T19:59:39.197574
| 2019-12-04T21:16:50
| 2019-12-04T21:16:50
| 218,157,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 953
|
r
|
app_server.R
|
library("ggplot2")
library("plotly")
library("dplyr")
source("Metric1Analysis.R")
source("Metric2Analysis.R")
source("scripts/Metric3Analysis(Kunpei).R")
server <- function(input, output) {
output$Metric1Graph <- renderPlotly(
FedCompareGraph(input$Metric1Date[1], input$Metric1Date[2], paste(input$Metric1Stock,"Adj.Close"), AdjData)
)
output$Metric2Graph <- renderPlotly(
VolumeGraph(input$Metric2Date[1], input$Metric2Date[2], paste(input$Metric2Stock,"Volume"), VolumeData)
)
output$Metric3Graph <- renderPlotly(
fluctuationCompareGraph(input$Metric3Date[1], input$Metric3Date[2], input$Metric3Stock, FluctuationData)
)
convert <- function(input) {
if(input == 1) {
return("Consumer Goods")
} else if (input == 2) {
return("Technology")
} else if (input == 3) {
return("Healthcare")
} else if (input == 4) {
return("RealEstate")
} else {
return(null);
}
}
}
|
b99aa2bafc8c426166f502c13cf6000612760a15
|
a36df1ec0c5bfb81e09479011d03d9c18e595400
|
/man/getInfoTabela.Rd
|
1644b10b6837fa5088d4483659521fe15381783e
|
[] |
no_license
|
vilaralmeida/qualificajampa
|
63eb893c87749e0d87bc7ea214077648148b5fb6
|
746b82c493aecdb80f5368a86f6109126fcde3ef
|
refs/heads/master
| 2021-04-28T16:04:53.441409
| 2018-02-19T23:23:42
| 2018-02-19T23:23:42
| 122,005,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 641
|
rd
|
getInfoTabela.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getInfoTabela.R
\name{getInfoTabela}
\alias{getInfoTabela}
\title{get Info Tabela}
\usage{
getInfoTabela(tabela = "pessoal")
}
\arguments{
\item{tabela}{Qual Tabela terá a Informação Recuperada.
Opções: pessoal, publicidade, receitas, despesas, empenho, receitasdespesasextras, convenios, propostas}
}
\value{
Informacoes sobre a tabela
}
\description{
Retorna Informacoes Sobre os Campos dos Dados Abertos
Disponivel em: transparencia.joaopessoa.pb.gov.br/download
}
\examples{
getInfoTabela(): Retorna Colunas e Tipo de Cada coluna no formato data.frame
}
|
ef8e9fcf6e722139e0c0904a96e7145f262090ec
|
2ff0e105ced702c9ec86e8cdad170e9b4e4e22b6
|
/XGB.r
|
848f8e34897dccfa78b46c97e632ee04f363640f
|
[] |
no_license
|
MaheshParamati/MachineLearning-Projects
|
e5f3b20118a3681e1541c7b7acbc13b405fde04c
|
fa8e09d9f57f3b1bf1663473b02e0631ec52d481
|
refs/heads/master
| 2021-01-17T15:51:25.800660
| 2017-03-06T19:55:26
| 2017-03-06T19:55:26
| 84,114,852
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,694
|
r
|
XGB.r
|
setwd("C:/Workspace/")
traindata <- read.csv("traindata.csv", header = T) #Read Train Data
labeldata <- read.csv("label.csv", header = T) #Read Train Data Labels
testdata <- read.csv("testdata.csv", header = T) #Read Test Data
traindata$status_group <- labeldata$status_group #Add Train Data Labels to Actual Data
#Creating a New attribute called 'years in operation(yip)'
tmp <- format(as.Date(traindata$date_recorded, '%Y-%m-%d'),'%Y')
tmp <- as.numeric(tmp)
traindata$yip <- tmp - traindata$construction_year
traindata$yip[traindata$yip > 2000] <- 0
#Removing Attributes-based on redundancy and logical reasoning
traindata$num_private <- NULL
traindata$wpt_name <- NULL
traindata$subvillage <- NULL
traindata$region_code <- NULL
traindata$region <- NULL
traindata$ward <- NULL
traindata$recorded_by <- NULL
traindata$scheme_name <- NULL
traindata$permit <- NULL
traindata$extraction_type <- NULL
traindata$extraction_type_class <- NULL
traindata$management_group <- NULL
traindata$quality_group <- NULL
traindata$quantity_group <- NULL
traindata$waterpoint_type_group <- NULL
traindata$source_type <- NULL
# traindata$latitude <- NULL
# traindata$longitude <- NULL
#traindata$funder <- NULL
#traindata$installer <- NULL
traindata$date_recorded <- NULL
#factoring attributes
traindata$basin <- as.factor(traindata$basin)
#traindata$region <- as.factor(traindata$region)
traindata$public_meeting <- as.factor(traindata$public_meeting)
traindata$scheme_management <- as.factor(traindata$scheme_management)
traindata$extraction_type_group <- as.factor(traindata$extraction_type_group)
traindata$management <- as.factor(traindata$management)
traindata$payment <- as.factor(traindata$payment)
traindata$payment_type <- as.factor(traindata$payment_type)
traindata$water_quality <- as.factor(traindata$water_quality)
traindata$waterpoint_type <- as.factor(traindata$waterpoint_type)
traindata$quantity <- as.factor(traindata$quantity)
traindata$source <- as.factor(traindata$source)
traindata$source_class <- as.factor(traindata$source_class)
traindata$status_group <- as.factor(traindata$status_group)
#Removing NAs from attribute
traindata$public_meeting <- factor(traindata$public_meeting, levels = c("FALSE","TRUE",""))
traindata$public_meeting[is.na(traindata$public_meeting)] <- ""
#Creating New attribute called 'YIP' for testdata
tmp1 <- format(as.Date(testdata$date_recorded, '%Y-%m-%d'),'%Y')
tmp1 <- as.numeric(tmp1)
testdata$yip <- tmp1 - testdata$construction_year
testdata$yip[testdata$yip > 2000] <- 0
#Removing redundant and unimportant attributes
testdata$num_private <- NULL
testdata$wpt_name <- NULL
testdata$subvillage <- NULL
testdata$region_code <- NULL
testdata$region <- NULL
testdata$ward <- NULL
testdata$recorded_by <- NULL
testdata$scheme_name <- NULL
testdata$permit <- NULL
testdata$extraction_type <- NULL
testdata$extraction_type_class <- NULL
testdata$management_group <- NULL
testdata$quality_group <- NULL
testdata$quantity_group <- NULL
testdata$waterpoint_type_group <- NULL
testdata$source_type <- NULL
# testdata$latitude <- NULL
# testdata$longitude <- NULL
#testdata$funder <- NULL
#testdata$installer <- NULL
testdata$date_recorded <- NULL
testdata$public_meeting <- casefold(testdata$public_meeting, upper = TRUE)
testdata$public_meeting <- as.factor(testdata$public_meeting)
levels(testdata$scheme_management) <- levels(traindata$scheme_management)
#Diving the training data into Training and Validation Set
labeldata <- traindata$status_group
labeldata <- as.numeric(labeldata)
TL <- labeldata[1:53460]
VL <- labeldata[53461:59400]
traindata <- traindata[-grep('status_group',colnames(traindata))]
validdata <- traindata[53461:59400,] #validation set-10% of data
traindata <- traindata[1:53460,] #training set-90% of data
alldata <- rbind(traindata, validdata,testdata) #Combining all the data
TL <- TL - 1
VL <- VL - 1
# library(Matrix)
# install.packages("chron", dependencies = T)
# require(xgboost)
#Converting all the data to numeric and factoring levels
cols <- names(alldata)
for(i in cols){
if(class(alldata[[i]])=="character"){
levels <- unique(c(alldata[[i]]))
alldata[[i]] <- as.numeric(factor(D[[i]], levels = levels))
}
if(class(alldata[[i]])=="factor"){
alldata[[i]] <- as.numeric(alldata[[i]])
}
}
#Subsetting the data
alldata.xgb <- subset(alldata, select = c(-district_code,-id))
alldata.xgb <- as.matrix(as.data.frame(lapply(alldata.xgb,as.numeric)))
#Breaking the dataset again
train.xgb <- alldata.xgb[1:53460,]
valid.xgb <- alldata.xgb[53461:59400,]
test.xgb <- alldata.xgb[59401:74250,]
#Converting the data into DMatrix format
newtrain <- xgb.DMatrix(train.xgb, label = TL)
newvalid <- xgb.DMatrix(valid.xgb, label = VL)
newtest <- xgb.DMatrix(test.xgb)
print("hello")
accuracies <- 0
for(i in 9:9){
set.seed(i)
print(i)
#set.seed(2*i+1)
#list of parameters for Gradient Boosting
param.xgb <- list(objective = "multi:softmax",eval_metric = "merror",num_class = 3,
booster = "gbtree",eta = 0.2,subsample = 0.7,colsample_bytree = 0.4,
max_depth = 14)
print("hello")
results <- xgb.cv(params = param.xgb, newtrain, nrounds = 200, nfold = 10,
early.stop.round = 20,maximize = FALSE, print.every.n = 10)
#Creating the Gradient Boosting Model
model.xgb <- xgb.train(data = newtrain, param.xgb, nrounds = 38, watchlist = list(valid = newvalid, train = newtrain),
nfold = 10, early.stop.round = 20, print.every.n = 10,
maximize = FALSE,save_name = "model.xgb")
#early.stop.round = 20,
#Testing the model against Validation Set
pred.valid <- predict(model.xgb, newvalid)
output.valid <- data.frame(id = validdata$id, status_group = pred.valid)
output.valid$status_group[output.valid$status_group == 0] = "functional";
output.valid$status_group[output.valid$status_group == 1] = "functional needs repair";
output.valid$status_group[output.valid$status_group == 2] = "non functional";
VL[VL == 0] = "functional"
VL[VL == 1] = "functional needs repair"
VL[VL == 2] = "non functional"
print(mean(output.valid$status_group == VL))
#accuracies[i] <- mean(output.valid$status_group == VL)
}
#Testing the model against test data
pred.test <- predict(model.xgb, newtest)
output.test <- data.frame(id = testdata$id, status_group = pred.test)
output.test$status_group[output.test$status_group == 0] = "functional";
output.test$status_group[output.test$status_group == 1] = "functional needs repair";
output.test$status_group[output.test$status_group == 2] = "non functional";
print("hello")
write.csv(output.test, file = "submissionSeed9_1.csv", row.names = F)
|
37dbd7a4912507c900f0d382881a571699711b63
|
5900c665320c9d3d2075820471bf48b0d5998cb5
|
/Chap6_ClusterAnalysis/cluster_analysis.R
|
e5869515abcccc5227acb4e4dd907b5715d995ea
|
[] |
no_license
|
LaguaFluc/Data-Mining-and-Modeling
|
756e3e3f1c60f55d56395b396c4636e6c7ce90f2
|
0bf0ad1ff4eae70fca92e446612d35ca99ff96fa
|
refs/heads/main
| 2023-04-03T05:57:03.759486
| 2021-04-14T00:43:14
| 2021-04-14T00:43:14
| 353,004,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,160
|
r
|
cluster_analysis.R
|
# -----------1、获取数据并标准化
iris.4 = iris[, 1:4]
iris.4 = scale(iris.4, center=T, scale=T)
# -----------2、对前四列数据做聚类
##k均值聚类
K <- 4
cluster.iris <- kmeans(iris.4,centers = K,iter.max = 99,nstart=25)
cluster.iris$size
# -----------3、画图
plot(iris.4, col=cluster.iris$cluster)
plot_data = function(data, k=4){
# 找到前四列的数据
# 我希望来画图
# 将聚的类添加到最后一列
cluster.data <- kmeans(data,centers = 5,iter.max = 99,nstart=25)
print(length(cluster.data$cluster))
sepal = data[, 1:2]
petal = data[, 3:4]
color = cluster.data$cluster
plot(sepal, xlab=colnames(sepal)[1], ylab=colnames(sepal)[2],
main=paste("Scatter of " , colnames(sepal)[1], "and", colnames(sepal)[2]),
col=color)
plot(petal, xlab=colnames(petal)[1], ylab=colnames(petal)[2],
main=paste("Scatter of " , colnames(petal)[1] , "and", colnames(petal)[2]),
col=color)
return(data)
}
plot_data(iris.4)
# -----------4、找到最优刻度
N <- dim(iris.4)[1]
pseudo_li = seq(2, 8, 1)
i = 1
for (k in 2:8){
clustercars <- kmeans(iris.4,centers = k,iter.max = 99,nstart=25)
pseudo = (clustercars$betweenss / (k - 1)) / (clustercars$tot.withinss / (N - k))
pseudo_li[i] = pseudo
print(paste(k, ": ", pseudo))
i = i + 1
}
plot(seq(2, 8, 1), pseudo_li)
# -----------5、多维标度分析,清晰可视化
library(ggplot2)
# 1、对原始维度的变量进行k-means聚类
# 2、对聚类之后的数据,通过多维标度分析转化为2维
# 3、对2维数据画图,颜色为第几类变量
cluster.data <- kmeans(iris.4, centers = 3, iter.max = 99,nstart=25)
color = cluster.data$cluster
m.data = as.matrix(data[, 1:4])
dis.data = dist(m.data)
MD = cmdscale(dis.data, k=2)
p <- ggplot(data=as.data.frame(MD), mapping=aes(x=MD[, 1], y=MD[, 2]))
d <- p + geom_point(aes(colour=color)) + ggtitle(label="2-D points after Multidimensional scaling analysis") + scale_color_gradientn(colours =rainbow(4))
d
# ---------6、层次聚类法。
help("hclust")
tree <- hclust(dist(iris.4),method = "average")
# method="average"指定使用平均连接法。
# 画聚类树图。
plot(tree)
# 类别数为2时所得的聚类结果。
out <- cutree(tree,k = 2)
out
table(out) # 查看多少类
# -----------7、多维标度分析,查看层次聚类之后的结果
library(ggplot2)
m.data = as.matrix(iris.4)
dis.data = dist(m.data)
MD = cmdscale(dis.data, k=2)
color = out
p <- ggplot(data=as.data.frame(MD), mapping=aes(x=MD[, 1], y=MD[, 2]))
d <- p + geom_point(aes(colour=color)) + ggtitle(label="2-D points after Multidimensional scaling analysis")+ scale_color_gradientn(colours =rainbow(4))
d
# -----------使用NbClust函数进行聚类,实际是找最优类别数K
library(NbClust)
#加载程序包NbClust,其中含有NbClust函数。
help(NbClust)
nbcluster <- NbClust(iris.4,method = "average")
# "average",表示使用平均连接的层次聚类法),将数据进行聚类。
# 查看nbcluster包含的分析结果项
names(nbcluster)
# 查看综合各个指标所得的最佳类别数下,各个观测所属的类别
nbcluster$Best.partition
# 作废
multi_scale_plot <- function(data, k=3){
cluster.data <- kmeans(data,centers = k, iter.max = 99,nstart=25)
data[["cluster"]] = cluster.data$cluster
m.data = as.matrix(data[, 1:4])
dis.data = dist(m.data)
MD = cmdscale(dis.data, k=2)
# p <- ggplot(data=as.data.frame(MD), mapping=aes(x=MD[, 1], y=MD[, 2]))
# d <- p + geom_point(aes(colour=data$cluster))
ggplot(data=as.data.frame(MD), mapping=aes(x=MD[, 1], y=MD[, 2])) + geom_point(aes(colour=data$cluster))
# qplot(MD[, 1], MD[, 2], data = as.data.frame(MD), colour = data$cluster)
# plot(MD[, 1], MD[, 2])
# plot(MD, col=data$cluster,
# main="2-D points after Multidimensional scaling analysis")
# return(data)
}
iris.cluster = multi_scale_plot(iris.4)
# 找到最优的k
help("kmeans")
for (k in 2:8){
clustercars <- kmeans(iris.4,centers = k,iter.max = 99,nstart=25)
pseudo = (clustercars$betweenss / (k - 1)) / (clustercars$tot.withinss / (N - k))
print(pseudo)
}
|
9a39952d4a70f353f226afaf4801cbdc1189df0c
|
6a811d1fbe579e346b9813857f9059be010a67a7
|
/scripts/tables/loop.R
|
20c25d8a93fd4c63b7c8ca1620dfb67ffc509d28
|
[] |
no_license
|
HongyuanWu/rkipcreg
|
317634c4d3b500ad23cf39c6f50843e889237692
|
c50b5e3d0d6576c268fe160ac37f0db6e3cc3f11
|
refs/heads/master
| 2021-10-10T23:28:30.353773
| 2019-01-19T04:04:05
| 2019-01-19T04:04:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 842
|
r
|
loop.R
|
# loading required libraries
library(tidyverse)
library(xtable)
# load data
load('data/tf.rda')
# generate table
loop %>%
mutate(study = str_split(study, '_', simplify = TRUE)[, 1]) %>%
group_by(tf, feature) %>%
summarise(n = n(),
ave = mean(cor), sd = sd(cor),
study = paste(study, collapse = ', ')) %>%
setNames(c('TF', 'Gene', 'N', 'Average', 'SD', 'TCGA Studies')) %>%
xtable(caption = '\\textbf{Positive feedback regulaiton between common transcription regulators.}',
align = 'cllcccp{.25\\textwidth}',
label = 'tab:loop') %>%
print(include.rownames = FALSE,
booktabs = TRUE,
caption.placement = 'top',
table.placement = 'H',
sanitize.text.function = identity,
comment = FALSE,
file = paste('output/tables', 'loop.tex', sep = '/'))
|
4a7cfbdce5dd512dd2b1e64c9c421e6291ea3b82
|
880f4265c481c2e9364341003023ea3f6ebf9ec2
|
/R/farforecast.R
|
cef22a80a52178f0349975ecd275fb9499c02b77
|
[] |
no_license
|
cran/ftsa
|
a2205b07d06d14ea71b354123544fe66e6ef66ea
|
65415c7011cbb1bdd6d5b9de5895d2061d3fb471
|
refs/heads/master
| 2023-06-09T10:33:28.167641
| 2023-06-01T16:00:02
| 2023-06-01T16:00:02
| 17,696,159
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,716
|
r
|
farforecast.R
|
farforecast <- function(object, h = 10, var_type = "const", Dmax_value,
Pmax_value, level = 80, PI = FALSE)
{
order_select = method.FPE(object = object, D = Dmax_value, var_type = var_type, Pmax = Pmax_value)
order = order_select[2]
ftsm_object = ftsm(y = object, order = order)
if(requireNamespace("vars", quietly = TRUE))
{
var_pred = predict(vars::VAR(ftsm_object$coeff[, 2:(order + 1)], p = order_select[1],
type = var_type), n.ahead = h, ci = level/100)
}
else
{
stop("Please install vars")
}
qconf <- qnorm(0.5 + level/200)
meanfcast <- varfcast <- matrix(NA, nrow = h, ncol = order)
for(i in 1:order)
{
var_fit_pred = var_pred$fcst[[i]]
meanfcast[, i] = var_fit_pred[, 1]
varfcast[, i] = ((var_fit_pred[, 3] - var_fit_pred[, 2])/(2 * qconf))^2
}
point_fore = ftsm_object$basis[, 2:(order + 1)] %*% t(meanfcast) + ftsm_object$basis[, 1]
x = as.numeric(rownames(object$y))
rownames(point_fore) = x
colnames(point_fore) = 1:h
point_fore_fts = fts(x, point_fore, yname = "Forecasts", xname = object$xname)
if (PI == TRUE)
{
n.curve = ncol(object$y)
L = max(round(n.curve/5), order)
insample_fore = matrix(NA, nrow(object$y), (ncol(object$y) - L))
for(i in 1:(ncol(object$y) - L))
{
dum = ftsm(fts(object$x, object$y[, 1:(L + i - 1)]), order = order)
dum_coeff = dum$coeff[, 2:(order + 1)]
var_pred = predict(vars::VAR(dum_coeff, lag.max = nrow(dum_coeff) - 2, type = var_type), n.ahead = 1)
meanfcast = matrix(NA, nrow = 1, ncol = order)
for(j in 1:order)
{
var_fit_pred = var_pred$fcst[[j]]
meanfcast[, j] = var_fit_pred[, 1]
}
insample_fore[, i] = dum$basis[, 2:(order + 1)] %*% t(meanfcast) + dum$basis[, 1]
}
insample_test = object$y[, (L + 1):ncol(object$y)]
resi = insample_test - insample_fore
lb_resi = apply(resi, 1, quantile, (100 - level)/200, na.rm = TRUE)
ub_resi = apply(resi, 1, quantile, (100 + level)/200, na.rm = TRUE)
lb = point_fore + lb_resi
ub = point_fore + ub_resi
colnames(lb) = colnames(ub) = 1:h
PI_lb = fts(x, lb, yname = "Lower bound", xname = object$xname)
PI_ub = fts(x, ub, yname = "Upper bound", xname = object$xname)
return(list(point_fore = point_fore_fts, order_select = order_select,
PI_lb = PI_lb, PI_ub = PI_ub))
}
else
{
return(list(point_fore = point_fore_fts, order_select = order_select))
}
}
|
5be1d5bd6962c542684b95e2972d139e0dd2397b
|
cbcd97b5a92511dd94390e0fed7aa6de4e14f45b
|
/cachematrix.R
|
8d3de0b4558ceab4276d3320196c73e046aa2838
|
[] |
no_license
|
mcm987/ProgrammingAssignment2
|
6ea331609830c5078048442f15c5628eccccbc34
|
a1ecff17d013eae7560015f234dec90e27fe0aab
|
refs/heads/master
| 2020-12-24T14:01:10.132820
| 2016-02-17T12:04:14
| 2016-02-17T12:04:14
| 51,905,042
| 0
| 0
| null | 2016-02-17T07:55:38
| 2016-02-17T07:55:37
| null |
UTF-8
|
R
| false
| false
| 2,202
|
r
|
cachematrix.R
|
## These two functions are build analog to the example "Caching the Mean of a
## Vector" in the assignment description. The function "makeCacheMatrix" creates
## a special "matrix" object and provides a list with functions to 1) set the
## value of the matrix, 2) get the value of the matrix, 3) set the value of
## the inverse matrix, 4) get the value of the inverse matrix. The function
## "cacheSolve" checks, if the inverse matrix of the input matrix has already
## been calculated. If this is the case, the inverse matrix is return from the
## cache. If the inverse matrix has not been calculated yet, "cacheSolve"
## calculates the inverse matrix.
## How does the function "makeCacheMatrix" work? When we start out NULL is
## assigned to inv (for "inverse matrix"). If we call "set", a new matrix "y"
## is assigned to our working variable "x" and inv is set to NULL with the
## superassignment operator ("<<-"). With "get" we can get our matrix printed,
## with "setinv" we solve for our inverse matrix and with "getinv" we display
## the inverse matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(solve) inv <<- solve
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## How does the function "cacheSolve" work? The inverse matrix of matrix x is
## assigned to the variable "inv" (for "inverse matrix"). If "inv" != NULL, then
## cached data is returned (by "return(inv)"). If "inv" equals NULL (because
## we just started out or because we set a new matrix using "set" in the function
## "makeCacheMatrix", then the matrix data is called into the cacheSolve function
## using x$get() and then the inverse matrix is computed and set using the
## superassignment operator.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
3de39b2cbc3fb0b6e86e159f4f59798fa4702228
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/samplingbook/examples/Sprop.Rd.R
|
9d1bc05ae5f7f46f2de6aec57ebf94dfb564a707
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 783
|
r
|
Sprop.Rd.R
|
library(samplingbook)
### Name: Sprop
### Title: Sampling Proportion Estimation
### Aliases: Sprop
### ** Examples
# 1) Survey in company to upgrade office climate
Sprop(m=45, n=100, N=300)
Sprop(m=2, n=100, N=300)
# 2) German opinion poll for 03/07/09 with
# (http://www.wahlrecht.de/umfragen/politbarometer.htm)
# a) 302 of 1206 respondents who would elect SPD.
# b) 133 of 1206 respondents who would elect the Greens.
Sprop(m=302, n=1206, N=Inf)
Sprop(m=133, n=1206, N=Inf)
# 3) Rare disease of animals (sample size n=500 of N=10.000 animals, one infection)
# for 95% one sided confidence level use level=0.9
Sprop(m=1, n=500, N=10000, level=0.9)
# 4) call with data vector y
y <- c(0,0,1,0,1,0,0,0,1,1,0,0,1)
Sprop(y=y, N=200)
# is the same as
Sprop(m=5, n=13, N=200)
|
04c41c3bd58284eb0d1a9210a9dd53d73c342062
|
3dfcad5e4ca29823a6e7899dcd22aaf7f5df971c
|
/man/getOutputDataSet.AromaTransform.Rd
|
31473dfd3b14ba691f69ab1d40e6d5bdd48b14de
|
[] |
no_license
|
HenrikBengtsson/aroma.core
|
f22c931029acf55f3ad2fdb6eb3bc2f0d2ba04e4
|
1bf20e2b09f4b8c0ca945dfb26fdf1902c187109
|
refs/heads/master
| 2022-12-01T09:01:49.295554
| 2022-11-15T18:29:17
| 2022-11-15T18:29:52
| 20,845,682
| 2
| 3
| null | 2018-04-21T02:06:48
| 2014-06-15T01:32:43
|
R
|
UTF-8
|
R
| false
| false
| 1,847
|
rd
|
getOutputDataSet.AromaTransform.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% AromaTransform.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{getOutputDataSet.AromaTransform}
\alias{getOutputDataSet.AromaTransform}
\alias{AromaTransform.getOutputDataSet}
\alias{getOutputDataSet,AromaTransform-method}
\title{Gets the transformed data set}
\description{
Gets the transformed data set, if processed.
}
\usage{
\method{getOutputDataSet}{AromaTransform}(this, onMissing=c("dropall", "drop", "NA", "error"), ...,
incomplete=FALSE, className=NULL, force=FALSE, verbose=FALSE)
}
\arguments{
\item{...}{Arguments passed to static method \code{byPath()} of
the class of the input \code{\link{AromaMicroarrayDataSet}}.}
\item{onMissing}{A \code{\link[base]{character}} string specifying how non-processed files
should be returned.
If \code{"drop"}, they are ignored and not part of the returned
data set.
If \code{"dropall"}, \code{\link[base]{NULL}} is returned unless all files are processed.
If \code{"NA"}, they are represented as a "missing" file.
If \code{"error"}, they are not accepted and an exception is thrown.
}
\item{incomplete}{[DEPRECATED] If the output data set is incomplete,
then \code{\link[base]{NULL}} is returned unless \code{incomplete} is \code{\link[base:logical]{TRUE}}.}
\item{force}{If \code{\link[base:logical]{TRUE}}, any in-memory cached results are ignored.}
\item{verbose}{See \code{\link[R.utils]{Verbose}}.}
}
\value{
Returns an \code{\link{AromaMicroarrayDataSet}} or \code{\link[base]{NULL}}.
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{AromaTransform}}.
}
\keyword{internal}
\keyword{methods}
|
a6b0cc7578a07abdd1b93695b38d1c898398ba9f
|
ded24dcc44f53ec1adcf336ed211e4b8c4076c70
|
/dataset_labelselect.R
|
5c72cf5142262e7970a5e7546a81ce1d18b4ce0b
|
[] |
no_license
|
hying99/capstry
|
76ecd155d3a3a11a1e9e512249a34b3835235140
|
e7288ab030e72cdee53bc882f9727d798aa5d2a7
|
refs/heads/master
| 2023-07-04T07:46:47.850022
| 2021-08-08T08:42:09
| 2021-08-08T08:42:09
| 393,864,850
| 1
| 0
| null | 2021-08-08T08:42:10
| 2021-08-08T05:15:42
|
Java
|
UTF-8
|
R
| false
| false
| 474
|
r
|
dataset_labelselect.R
|
###select.labels
Labelselect <- function(select.table)
{
samplenames <- row.names(select.table)
labelsnames <- colnames(select.table)
select.labels <- vector("list",length = nrow(select.table))
names(select.labels) <- samplenames
for (i in 1:nrow(select.table)) {
for (j in 1:ncol(select.table)) {
if (select.table[i,j] == 1)
select.labels[[samplenames[i]]] <- c(select.labels[[samplenames[i]]],labelsnames[j])
}
}
return(select.labels)
}
|
404434dd1e118beebaf4336824cfb8b7ae45b15d
|
3c9734e0a0506e25dbb0d34802c3ef3eb27caf47
|
/tests/testthat/test_population.R
|
0cedd9637fe540adcb6c265a2dc2cfda0e32ada3
|
[] |
no_license
|
Jorisvdoorn/lab5group8
|
3a1f7368315af62e18442f35bbcd62d44c4f9a61
|
366d5a9b5dad18ab55cdd8e84cc449a75a601004
|
refs/heads/master
| 2020-08-03T17:47:15.162998
| 2019-10-07T00:19:11
| 2019-10-07T00:19:11
| 211,832,865
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 274
|
r
|
test_population.R
|
context("population")
test_that("population rejects errounous input", {
expect_error(population_mod <- population$new("Municipality"))
})
test_that("class is correct", {
population_mod <- population$new()
expect_true(class(population_mod)[1] == "population")
})
|
4f071261fd70f24ea51a584fbe55a4e6891fb7c0
|
4adbd5b00d14eba6b814f0d52bddbec57d623fed
|
/harvest-cloud/scripts/effect_origin/linear_hypotheses.R
|
a2c456db3a247d650749e865eacf5f6d6279bb00
|
[
"MIT"
] |
permissive
|
PerinatalLab/metaGWAS
|
809f6ebaec7a6321f3fc6b4ed11a8f732c71a34d
|
494ef021b0d17566389f6f3716d1e09f15e50383
|
refs/heads/master
| 2023-04-08T08:50:31.042826
| 2022-11-10T21:14:33
| 2022-11-10T21:14:33
| 267,803,075
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,671
|
r
|
linear_hypotheses.R
|
library(data.table)
library(dplyr)
library(tidyr)
library(car)
format_haps= function(hap){
variants= paste(hap$chr, hap$pos, hap$ref, hap$eff, sep =':')
ids= names(hap)[5:ncol(hap)]
hap= as.data.frame(t(hap[, 5:ncol(hap)]))
names(hap)= variants
hap$PREG_ID= ids
return(hap)
}
h1= fread(snakemake@input[[1]])
h2= fread(snakemake@input[[2]])
h3= fread(snakemake@input[[3]])
h4= fread(snakemake@input[[4]])
h1= format_haps(h1)
h2= format_haps(h2)
h3= format_haps(h3)
h4= format_haps(h4)
pheno= fread(snakemake@input[[5]])
pheno= filter(pheno, spont== 1)
pheno$PREG_ID= as.character(pheno$PREG_ID)
print(nrow(pheno))
write( paste('snp', 'n', 'freq_h1', 'freq_h2', 'freq_h3', 'beta_h1', 'se_h1', 'pvalue_h1', 'beta_h2', 'se_h2', 'pvalue_h2', 'beta_h3', 'se_h3', 'pvalue_h3', 'pval_maternal', 'pval_fetal', 'pval_poe', 'pval_h2_vs_h3', sep= '\t'), snakemake@output[[1]], append= T)
results_list= lapply(names(h1)[1:(length(names(h1))-1)], function(snp) {
if (grepl('X', snp)){
print('Not sure how to handle chromosome X.')
} else {
h1_temp= h1[, c('PREG_ID', snp)]
h2_temp= h2[, c('PREG_ID', snp)]
h3_temp= h3[, c('PREG_ID', snp)]
h4_temp= h4[, c('PREG_ID', snp)]
names(h1_temp)= c('PREG_ID', 'h1')
names(h2_temp)= c('PREG_ID', 'h2')
names(h3_temp)= c('PREG_ID', 'h3')
names(h4_temp)= c('PREG_ID', 'h4')
d= inner_join(pheno, h1_temp, by= 'PREG_ID') %>% inner_join(., h2_temp, by= 'PREG_ID') %>% inner_join(., h3_temp, by= 'PREG_ID')
m1= lm(SVLEN_UL_DG~ h1 + h2 + h3 + PARITY + cohort + PC1 + PC2 + PC3 + PC4 + PC5 + PC6 + PC7 + PC8 + PC9 + PC10, d)
n= length(resid(m1))
coefs= summary(m1)$coefficients[2:5,]
beta_h1= coefs[1,1]
se_h1= coefs[1,2]
pvalue_h1= coefs[1,4]
beta_h2= coefs[2,1]
se_h2= coefs[2,2]
pvalue_h2= coefs[2,4]
beta_h3= coefs[3,1]
se_h3= coefs[3,2]
pvalue_h3= coefs[3,4]
freq_h1= mean(d$h1, na.rm= T)
freq_h2= mean(d$h2, na.rm= T)
freq_h3= mean(d$h3, na.rm= T)
pval_maternal= tryCatch(linearHypothesis(m1, 'h1 + h2 = h3')[['Pr(>F)']][2], warning= function(w){NA}, error= function(w) {NA})
pval_fetal= tryCatch(linearHypothesis(m1, 'h1 + h3 = h2')[['Pr(>F)']][2], warning= function(w){NA}, error= function(w) {NA})
pval_poe= tryCatch(linearHypothesis(m1, 'h1 - h2 = h3')[['Pr(>F)']][2], warning= function(w){NA}, error= function(w) {NA})
pval_h2_vs_h3= tryCatch(linearHypothesis(m1, 'h1 = h2')[['Pr(>F)']][2], warning= function(w){NA}, error= function(w) {NA})
print(pval_maternal)
results= paste(snp, n, freq_h1, freq_h2, freq_h3, beta_h1, se_h1, pvalue_h1, beta_h2, se_h2, pvalue_h2, beta_h3, se_h3, pvalue_h3, pval_maternal, pval_fetal, pval_poe, pval_h2_vs_h3, sep= '\t')
write(results, file= snakemake@output[[1]], append=TRUE)
}
}
)
|
6c41e8a488c002340396812276d448ee0628cd95
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleadmindirectoryv1.auto/man/CalendarResource.Rd
|
71324ce4ce9e2354fe279bca9f3b2f97b2c4bc22
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,119
|
rd
|
CalendarResource.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin_objects.R
\name{CalendarResource}
\alias{CalendarResource}
\title{CalendarResource Object}
\usage{
CalendarResource(etags = NULL, resourceDescription = NULL,
resourceEmail = NULL, resourceId = NULL, resourceName = NULL,
resourceType = NULL)
}
\arguments{
\item{etags}{ETag of the resource}
\item{resourceDescription}{The brief description of the calendar resource}
\item{resourceEmail}{The read-only email ID for the calendar resource}
\item{resourceId}{The unique ID for the calendar resource}
\item{resourceName}{The name of the calendar resource}
\item{resourceType}{The type of the calendar resource}
}
\value{
CalendarResource object
}
\description{
CalendarResource Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
JSON template for Calendar Resource object in Directory API.
}
\seealso{
Other CalendarResource functions: \code{\link{directory.resources.calendars.insert}},
\code{\link{directory.resources.calendars.patch}},
\code{\link{directory.resources.calendars.update}}
}
|
2086c4bc68540fd781ad9352118bf391a1217955
|
251ccd25d1fa210198b8c68f465b086d3cf76797
|
/plot6.R
|
e3d3fc282dfbe131bdc9e46fc9f0de6a85bd33c8
|
[] |
no_license
|
Joe-Lindenmayer/EDA-Week-4-Project
|
0abfbf79f6b72ebf76a61f6cc56a432f68cbc56f
|
27ba350e61e42e203b7f37fa06f85b919f0bbe05
|
refs/heads/main
| 2023-01-28T04:19:01.372896
| 2020-12-11T15:06:41
| 2020-12-11T15:06:41
| 320,350,101
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,266
|
r
|
plot6.R
|
library(ggplot2)
archiveFile <- "exdata_data_NEI_data.zip"
if(!file.exists(archiveFile)) {
archiveURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url=archiveURL,destfile=archiveFile)
}
if(!(file.exists("summarySCC_PM25.rds") && file.exists("Source_Classification_Code.rds"))) {
unzip(archiveFile)
}
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
vehicles <- grepl("vehicle",SCC$SCC.Level.Two, ignore.case=TRUE)
vehiclesSCC <- SCC[vehicles,]$SCC
vehiclesNEI <- NEI[NEI$SCC %in% vehiclesSCC,]
baltimoreVehiclesNEI <- subset(vehiclesNEI,fips=="24510")
baltimoreVehiclesNEI$city <- "Baltimore City"
LAVehiclesNEI <- subset(vehiclesNEI,fips=="06037")
LAVehiclesNEI$city <- "Los Angeles County"
bothNEI <- rbind(baltimoreVehiclesNEI,LAVehiclesNEI)
#for some reason I could not get theme_gray or theme_bw to work like in previous plots, used the default theme
ggp3 <- ggplot(bothNEI, aes(factor(year),y=Emissions, fill=city)) + geom_bar(aes(fill=year),stat="identity")+facet_grid(scales="free",space="free",.~city)+guides(fill=FALSE)+labs(x="Year", y="Total Emission (Kilo-Tons)")+labs(title="Motor Vehicle Source Emissions in Baltimore & LA in 1999-2008")
print(ggp3)
|
d6f1b8d4271691f68e40686fbf9014b914cdf5cd
|
28bef7897d90393b7c0c0f50cab0ecfa717b2627
|
/schedule_and_results.R
|
fc2a01af535f41a19f0d3add4342e80d0bdd0ee2
|
[
"CC0-1.0"
] |
permissive
|
200256845/psy6009
|
9838a939a2df1774b4d4a66fe92b6bcfeb362b6c
|
c2d4722e03bc7bdabd9afc57e5c6c8d6a203482d
|
refs/heads/master
| 2023-07-27T04:38:17.563872
| 2021-09-13T14:49:59
| 2021-09-13T14:49:59
| 403,243,211
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,159
|
r
|
schedule_and_results.R
|
# The NBA schedule and results, of basketball-reference.com, are scrapped below
# for the 2016-17, 2017-18, 2018-19, 2019-20 and 2020-21 seasons
# Packages
library(rvest)
library(xml2)
library(data.table)
library(here)
library(dplyr)
library(lubridate)
library(XML)
library(stringr)
library(tidyverse)
# Note:
# The NBA box score URLs are also scrapped
# (the NBA box score URLs are utilized to obtain total basic box scores
# (please see total_basic_box_scores.R for further information))
# Step one: Create the NBA schedule and results URLs
# Create a years and a months variable
# Note:
# The years and months variables are utilized for the NBA schedule and results URLs
# See for example https://www.basketball-reference.com/leagues/NBA_2017_games-october.html
# 2017 stands for the 2016-17 season
# October stands for the month
years <- c(
"2017", "2017", "2017", "2017", "2017", "2017", "2017", "2017", "2017",
"2018", "2018", "2018", "2018", "2018", "2018", "2018", "2018", "2018",
"2019", "2019", "2019", "2019", "2019", "2019", "2019", "2019", "2019",
"2020", "2020", "2020", "2020", "2020", "2020", "2020", "2020", "2020", "2020",
"2021", "2021", "2021", "2021", "2021", "2021", "2021", "2021"
)
# Note:
# We need to repeat each year multiple times as each month has a separate URL
# 2017 = 9
# 2018 = 9
# 2019 = 9
# 2020 = 10
# 2021 = 8
# Check to see if we have the correct number of years
table(years)
## years
## 2017 2018 2019 2020 2021
## 9 9 9 10 8
# We have the correct number of years, lets continue.
months <- c(
"october", "november", "december", "january", "february",
"march", "april", "may", "june",
"october", "november", "december", "january", "february",
"march", "april", "may", "june",
"october", "november", "december", "january", "february",
"march", "april", "may", "june",
"october-2019", "november", "december", "january", "february",
"march", "july", "august", "september", "october-2020",
"december", "january", "february", "march", "april", "may", "june",
"july"
)
# Note:
# Due to the COVID-19 pandemic two URLs are slightly different
# We have october-2019 and october-2020
# October = 3
# October-2019 = 1
# October-2020 = 1
# November = 4
# December = 5
# January = 5
# February = 5
# March = 5
# April = 4
# May = 4
# June = 4
# July = 2
# August = 1
# September = 1
# Check to see if we have the correct number of months
table(months)
## months
## april august december february january july june
## 4 1 5 5 5 2 4
## march may november october october-2019 october-2020 september
## 5 4 4 3 1 1 1
# We have the correct number of months, lets continue.
# Create a df of months
df <- as.data.frame(months)
# Add the other URL elements
df$months <- paste0( # paste0 ensures that there are no spaces in our URLs
"https://www.basketball-reference.com/leagues/NBA_", years, # years adds in our years variable
"_games-", df$months, ".html" # df$months adds in our months
)
# Note if you look at the df you can see the NBA schedule and results URLs
# for the 2016-17, 2017-18, 2018-19, 2019-20 and 2020-21 seasons
# Step two: download the NBA schedule and results HTMLs
# Assign df$months to an object (urls)
urls <- df$months
# Download HTMLs
for (url in urls) { # the for loop function loops through each of our URLs and downloads them
download_html(url)
# Create an HTML file list
htmls <- list.files("...", pattern = ".html") # htmls will create a list of all files in our directory that end in .html
}
# Note this may take some time
# If you wish to see the progress please click on the files tab
# You should be able to see each HTML, once it has been downloaded
# Once our HTMLs are downloaded, we need to ensure that they are in the correct order
# Step three: ensure that the HTMLs remain in the predetermined order
# Create a new df with solely HTML names (utilize df)
df_htmls <- df %>%
mutate_at("months", str_replace, "https://www.basketball-reference.com/leagues/", "")
# Assign the correctly ordered HTML names to an object
order_htmls <- df_htmls$months
# Obtain the file paths of the order_htmls object
paths <- here(order_htmls)
# create a 45 element vector (the 45 element vector will be used to rename the HTMLs)
a <- c(1:9)
a <- paste0("a0", a)
b <- c(10:45)
b <- paste0("a", b)
a <- as.data.frame(a)
b <- as.data.frame(b)
names(b) <- c("a") # rename the b column in the b data frame to a
numbers <- rbind(a, b) # bind data frame a and b
# Paste .html behind the numbers
numbers$a <- paste0(numbers$a, ".html")
# Assign numbers$a to an object
numbers_htmls <- numbers$a
# Rename the HTMLs (and ensure that they are renamed in the correct order)
for (i in 1:45) {
file.rename(paths[i], numbers_htmls[i])
}
# Step four: scrape the data
# Create an HTML file list
html_list <- list.files("...", pattern = ".html")
# Use the function readLines to read all text lines of the HTMLs (and assign them to an object)
# Note:
# This is needed for our NBA schedule and results tables
urltxt <- lapply(html_list, function(x) try(readLines(x)))
# Use the function read_html to read all HTMLs (and assign them to an object)
# Note:
# This is needed for our box score URLs
webpages <- lapply(html_list, function(x) try(read_html(x)))
# Parse the urltxt
doc <- htmlParse(urltxt)
# Note this may take some time
# Retrieve all <table> tags
tables <- xpathApply(doc, "//table")
# Read in the tables of interest
schedule_results <- lapply(tables, function(i) readHTMLTable(i))
# Create a df from schedule_results
schedule_and_results_2016_2020 <- data.table::rbindlist(schedule_results)
# Step five: save schedule_and_results_2016_2020 in the raw data folder
write.csv(schedule_and_results_2016_2020, file = here("data", "raw", "schedule_and_results_2016_2020.csv"))
# Step six: obtain box score URLs
# Create an empty box_score_urls df
box_score_urls <- data.frame()
# Create a for loop to loop over the web pages
for (webpage in webpages) {
# Code to extract box score URLs
boxscore_links <- webpage %>%
html_nodes("table#schedule > tbody > tr > td > a") %>% # this line of code tells R where to look for the box score URLs (in each html)
html_attr("href") %>% # this line of code tells R to retrieve href attributes (each href attribute contains a partial box score URL)
paste("https://www.basketball-reference.com", ., sep = "") # paste "https://www.basketball-reference.com" in front of the partial box scores to obtain complete box score URLs
# Create a df using boxscore_links
urls_df <- as.data.frame(boxscore_links)
# rbind box_score_urls with urls_df (this is necessary to obtain box score URLs for every season, month and game)
box_score_urls <- rbind(box_score_urls, urls_df)
}
# Step seven: save box_score_urls in the raw data folder
write.csv(box_score_urls, file = here("data", "raw", "box_score_urls_2016_2020.csv"))
# step eight: clean-up!
# Remove HTMLs from our folder
for (i in 1:45) {
unlink(html_list)
}
##### ----
# The schedule_and_results_2016_2020 data is pre-processed below
# Clean the environment
rm(list = ls())
# Read in schedule_and_results_2016_2020.csv
df_2016_2020 <- read.csv(here("data", "raw", "schedule_and_results_2016_2020.csv"), row.names = "X")
# Rename the columns
names(df_2016_2020) <- c(
"date", "start_time", "away_team", "points_away",
"home_team", "points_home", "box_score", "overtime",
"attendance", "remarks"
)
# Select the columns of interest
df_2016_2020 <- df_2016_2020 %>%
select(date, away_team, points_away, home_team, points_home, attendance)
# Remove "Playoffs" rows: 1231, 2541 and 3854
df_2016_2020 <- df_2016_2020[-c(1231, 2541, 3854), ]
# Ensure that each column has the correct class
df_2016_2020$date <- mdy(df_2016_2020$date)
df_2016_2020$attendance <- as.numeric(gsub(",", "", df_2016_2020$attendance))
# Code to add a column to indicate a home win
df_2016_2020 <- df_2016_2020 %>%
add_column(
home_win = if_else(df_2016_2020$points_away < df_2016_2020$points_home, 1, 0),
.after = "attendance"
)
# Change NAs in attendance to zero
df_2016_2020$attendance[is.na(df_2016_2020$attendance)] <- 0
# Most of the NAs where during the 2019 bubble time period
# Seven of the NAs where in the 2020-2021 season
# nba.com was utilized to double check if attendance was zero for each NA attendance game (in the 2020-21 season)
# Create point_difference column
df_2016_2020$point_difference <- (df_2016_2020$points_home - df_2016_2020$points_away)
# Create point difference column based on absolute values
df_2016_2020$abs_point_dif <- abs(df_2016_2020$points_home - df_2016_2020$points_away)
# Create a function to separated the NBA seasons
myfunc_dates <- function(x, y) {
df_2016_2020[df_2016_2020$date >= x & df_2016_2020$date <= y, ]
}
# Create separated dfs for each season
df_2016_17 <- myfunc_dates(as.Date("2016-10-25"), as.Date("2017-06-12"))
df_2017_18 <- myfunc_dates(as.Date("2017-10-17"), as.Date("2018-06-08"))
df_2018_19 <- myfunc_dates(as.Date("2018-10-16"), as.Date("2019-06-13"))
df_2019_20 <- myfunc_dates(as.Date("2019-10-22"), as.Date("2020-10-11"))
df_2020_21 <- myfunc_dates(as.Date("2020-12-22"), as.Date("2021-07-22"))
# Add in a season column to each df
df_2016_17$season <- "2016-17"
df_2017_18$season <- "2017-18"
df_2018_19$season <- "2018-19"
df_2019_20$season <- "2019-20"
df_2020_21$season <- "2020-21"
# rbind the seperate dfs
df_2016_2020 <- rbind(df_2016_17, df_2017_18, df_2018_19, df_2019_20, df_2020_21)
# Create win percentage column
df_2016_2020 <- df_2016_2020 %>% group_by(home_team, season) %>% mutate(win_percentage = mean(home_win) * 100)
# Save df_2016_2020 in the processed folder
write.csv(df_2016_2020, file = here("data", "processed", "schedule_and_results_2016_2020.csv"))
##### ----
# The box_score_urls_2016_2020 data is pre-processed below
# Clean the environment
rm(list = ls())
# Read in box_score_urls_2016_2020.csv
box_score_urls <- read.csv(here("data", "raw", "box_score_urls_2016_2020.csv"), row.names = "X")
# Step one: separate boxscores from teams URLs
# Create a box scores object
boxscores <- box_score_urls[!grepl(
"https://www.basketball-reference.com/teams/",
box_score_urls$boxscore_links
), ]
# Create a teams object
teams <- box_score_urls[!grepl(
"https://www.basketball-reference.com/boxscores/",
box_score_urls$boxscore_links
), ]
# Create dfs for box scores and teams
boxscores <- as.data.frame(boxscores)
teams <- as.data.frame(teams)
# Step two: separate home and away team URLs
# odd = away team; even = home team)
row_odd <- seq_len(nrow(teams)) %% 2
# Create a data_row_odd object
data_row_odd <- teams[row_odd == 1, ]
# Create a data_row_even object
data_row_even <- teams[row_odd == 0, ]
# Create dfs for data_row_odd data_row_even
data_row_odd <- as.data.frame(data_row_odd)
data_row_even <- as.data.frame(data_row_even)
# cbind boxscores, data_row_even and data_row_odd
box_scores <- cbind(boxscores, data_row_even, data_row_odd)
# Remove unnecessary details
box_scores <- box_scores %>%
mutate_at("data_row_even", str_replace, "https://www.basketball-reference.com/teams/", "")
box_scores <- box_scores %>%
mutate_at("data_row_odd", str_replace, "https://www.basketball-reference.com/teams/", "")
removes <- c("data_row_even", "data_row_odd")
for (remove in removes) {
box_scores <- box_scores %>%
mutate_at(remove, str_replace, "/2017.html", "")
box_scores <- box_scores %>%
mutate_at(remove, str_replace, "/2018.html", "")
box_scores <- box_scores %>%
mutate_at(remove, str_replace, "/2019.html", "")
box_scores <- box_scores %>%
mutate_at(remove, str_replace, "/2020.html", "")
box_scores <- box_scores %>%
mutate_at(remove, str_replace, "/2021.html", "")
}
# Assign new column names
names(box_scores) <- c("urls", "home", "away")
# Step three: save box_scores
write.csv(box_scores, file = here("data", "processed", "box_score_urls_2016_2020.csv"))
|
94351caa7664a7130e2b1b3f5c358995827209a0
|
3215b0e4972171cd74c178d9492b8e8c3977c0fc
|
/MCMC_Diabetes_Drug_Effect.R
|
cc81cee7c1cfb0a68ccd491a7f12234736a1a71c
|
[] |
no_license
|
YidiJiang/Applied-Bayesian-Methods
|
585bfe4553ff118260f32d8d152ce3d1addb2ed4
|
99c178917cd555b1fcce8f540143df3da64dbd32
|
refs/heads/master
| 2020-06-26T03:59:57.626517
| 2019-07-29T20:35:31
| 2019-07-29T20:35:31
| 199,522,326
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,873
|
r
|
MCMC_Diabetes_Drug_Effect.R
|
# Fit models to the data with a Bayesian analysis use Openbugs
# Fit model 1: Yi is the difference between two analysis (meta analysis with i=12), delta is a normal
# distribution with mean= theta (true mean: difference between treatment group and control group), there is
# only one true mean for 12 studies.
library(R2OpenBUGS)
DiabetesDrugEffect <- read.table(file.choose(), sep=",", header=T)
attach(DiabetesDrugEffect)
data=list("StudyID", "StudyN", "diff", "Sediff")
init.fun=function(){list(
delta=rnorm(12,-1.5,0.25),
theta=rnorm(1,-1.5,0.25),
sd0=runif(1,0,1)
)}
param=c("delta", "theta", "tau0", "sd0")
cat(" model{
for(i in 1:12){
diff[i]~dnorm(delta[i], tau[i])
delta[i]~dnorm(theta, tau0)
tau[i]<-1/Sediff[i]/Sediff[i]}
theta~dnorm(0,1)
sd0~dunif(0,1)
tau0<-1/sd0/sd0
}", file="DiabetesDrugEffect.txt")
diabetes=bugs(data, init.fun, param
, model.file="DiabetesDrugEffect.txt",
n.chains=5, n.iter=30000, n.burnin=10000,
n.thin=5 ,debug=TRUE)
print(diabetes, digits.summary = 3)
output<-diabetes$sims.array
pdf("ACFPlotBetaS.pdf")
par(mfrow=c(3,2))
for(i in 1:12){
acf(output[,1,paste0("delta[",i,"]")], main=paste0("delta[",i,"]"))
}
acf(output[,1,"theta"], main="theta")
acf(output[,1,"tau0"], main="tau0")
acf(output[,1,"sd0"], main="sd0") #sd0 is sigma0
dev.off()
# plots of autocorrelation
par(mfrow=c(3,2))
for(i in 1:12){
acf(output[,1,paste0("delta[",i,"]")], main=paste0("delta[",i,"]"))
}
acf(output[,1,"theta"], main="theta")
acf(output[,1,"tau0"], main="tau0")
acf(output[,1,"sd0"], main="sd0")
dev.off()
# Fit model 2, every study has its own variance
DiabetesDrugEffect <- read.table(file.choose(), sep=",", header=T)
attach(DiabetesDrugEffect)
data=list("StudyID", "StudyN", "diff", "Sediff")
init.fun=function(){list(
delta=rnorm(12,-1.5,0.25),
theta=rnorm(1,-1.5,0.25),
sd0=runif(1,0,1)
)}
parameters=c("sd", "theta", "sd0")
cat(" model{
for(i in 1:12){
diff[i]~dnorm(theta, tau[i])
sd[i]<-pow(sd_0,2)+pow(Sediff[i],2)
tau[i]<-1/sd[i]/sd[i]}
theta~dnorm(0,1)
sd0~dunif(0,1)
}", file="DiabetesDrugEffect.txt")
diabetes=bugs(data, init.fun, parameters
, model.file="DiabetesDrugEffect.txt",
n.chains=5, n.iter=30000, n.burnin=10000,
n.thin=5 ,debug=TRUE)
print(diabetes, digits.summary = 3)
output<-diabetes$sims.array
pdf("ACFPlotBetaS.pdf")
par(mfrow=c(3,2))
for(i in 1:12){
acf(output[,1,paste0("sd[",i,"]")], main=paste0("sd[",i,"]"))
}
acf(output[,1,"theta"], main="theta")
acf(output[,1,"sd0"], main="sd0")
dev.off()
# fit the model 2: In model 2, every study has its own variance
DiabetesDrugEffect <- read.table(file.choose(), sep=",", header=T)
attach(DiabetesDrugEffect)
data=list("StudyID", "StudyN", "diff", "Sediff")
init.fun=function(){list(
delta=rnorm(12,-1.5,0.25),
theta=rnorm(1,-1.5,0.25),
sd0=runif(1,0,1)
)}
parameters=c("sd", "theta", "sd0")
cat(" model{
for(i in 1:12){
diff[i]~dnorm(theta, tau[i])
sd[i]<-pow(sd_0,2)+pow(Sediff[i],2)
tau[i]<-1/sd[i]/sd[i]}
theta~dnorm(0,1)
sd0~dunif(0,1)
}", file="DiabetesDrugEffect.txt")
diabetes=bugs(data, init.fun, parameters
, model.file="DiabetesDrugEffect.txt",
n.chains=5, n.iter=30000, n.burnin=10000,
n.thin=5 ,debug=TRUE)
print(diabetes, digits.summary = 3)
output<-diabetes$sims.array
pdf("ACFPlotBetaS.pdf")
par(mfrow=c(3,2))
for(i in 1:12){
acf(output[,1,paste0("sd[",i,"]")], main=paste0("sd[",i,"]"))
}
acf(output[,1,"theta"], main="theta")
acf(output[,1,"sd0"], main="sd0")
dev.off()
# Plots of autocorrelation
par(mfrow=c(3,2))
for(i in 1:12){
acf(output[,1,paste0("sd[",i,"]")], main=paste0("sd[",i,"]"))
}
acf(output[,1,"theta"], main="theta")
acf(output[,1,"sd0"], main="sd0")
dev.off()
|
cef26b2469c4f8545cf2a44f02557d0847aa9bfc
|
777ab372f7f9a7741256a6e562cf9e450a8a798f
|
/R_Scripts/piclapply_run_sim.R
|
fde3936e13c75a8123ce8fcac72a57af655b984c
|
[] |
no_license
|
dan410/SimStudy_weighted_cov_est
|
cb2d166e9f60c5c63e795ef582799e9f9fb60caf
|
8d37e48531adc9f1fe75c19548c32c42f7066113
|
refs/heads/master
| 2021-01-18T21:21:00.868125
| 2015-08-30T19:49:09
| 2015-08-30T19:49:09
| 22,741,502
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,352
|
r
|
piclapply_run_sim.R
|
setwd('~/Dissertation_projects/SimStudy_weighted_cov_est')
##### MLE using piclapply #####
require(picRutils)
obj.fun <- function(dat, weight){
# assign weights to each point based on the intensity values
for( j in 1:length(unique(dat$ID))){
dat$wt[dat$ID == j] <- (1/intensity[j])^(weight)
}
########################################################
# estimate the covariance function using weights
########################################################
# if estimation fails with an error return NA
result = tryCatch({
cov.est <- estimate_cov_function(dat, n.marginal.knots = 5)
}, warning = function(w) {
}, error = function(e) {
return(NA)
}, finally = {
})
### estimate the L2 distance between estimated cov fun and true cov fun
tt <- seq(0,1, length = 20)
grid <- expand.grid(t1 = tt, t2 = tt)
cov.true.pts <- mapply(cov.true, x = grid[,1], y=grid[,2])
cov.est.pts <- mapply(sseigfun:::cov.fn, x = grid[,1], y=grid[,2], MoreArgs = list(knots=cov.est$knots, fit.obj=cov.est))
dist.L2 <- sum((cov.true.pts - cov.est.pts)^2)/nrow(grid)
res <- data.frame(L2 = dist.L2)
return(res)
}
### true covariance function
cov.true <- function(x,y){
alpha = 2
k <- 1:3
res <- sum(k^(-2*alpha)*cos(k*pi*x)*cos(k*pi*y))
}
### Select Grid ###
grid_ID <- 2
## read in object with different configuration of spatial locations
sim_locs <- readRDS("Data/sim_locs.rds")
intensity <- subset(sim_locs, grid == grid_ID)$intensity
DAT <- readRDS(paste("Data/GRID", grid_ID, ".rds", sep = ""))
# This will be a slow computation in serial, but we can do it quickly with
# piclapply. We will request 2 nodes to do our job (64 CPUs). Of course,
# when you do it, use your own account and your own PNNL e-mail address.
weight <- 0.8
L2 <- piclapply(DAT,
obj.fun,
weight = weight,
account = 'spyglass',
needed.objects = c('obj.fun', 'cov.true', 'intensity'),
packages = "sseigfun",
numNodes = 2,
partition = 'short',
time.limit.mins = 60,
jobName = 'CovEstgrid2wt1',
email.notification = 'daniel.fortin@pnnl.gov',
verbose = TRUE)
save(L2, file = paste("piclapply_Res/grid", grid_ID, "_wt_", weight, ".RData", sep=""))
|
5de9ea7c2a06b0d2f87f9e8a0723789234b3aeca
|
89e5a4b76e9899aa767da1f1df372db7d4aa7ff2
|
/analiza/analiza.r
|
58f3d750a5c65ecb522d70ec30cffbd0bec2a20c
|
[
"MIT"
] |
permissive
|
ZilavecM14/APPR-2015-16
|
940f7ef1547aa13e9398d5372f3fe13473a465ef
|
292e9daecdb74eaa88dfae808928b02198ceb0d7
|
refs/heads/master
| 2020-12-01T13:06:16.233841
| 2016-03-31T09:35:11
| 2016-03-31T09:35:11
| 45,672,237
| 0
| 1
| null | 2015-11-06T09:25:21
| 2015-11-06T09:25:21
| null |
UTF-8
|
R
| false
| false
| 3,231
|
r
|
analiza.r
|
# 4. faza: Analiza podatkov
novo <- skupaj %>% filter(vrsta_kratka == "Skupaj")
#Izriše graf na katerem so napovedi do leta 2022 za vse štipendije skupaj
analiza1<-ggplot(data=novo, aes(x=leto, y=stevilo))+ xlim(2008, 2022) +
geom_line(size=0.5)+
geom_point(size=3, fill="black")+
ggtitle("Napoved števila štipendij do leta 2022")+
geom_smooth(method = "lm",formula = y ~ x+I(x^2)+I(x^3),
size = 1, fullrange = TRUE)
#Izpiše koeficient, prosti člen
lin <- lm(stevilo ~ leto, data = novo)
#Izpiše število štipendij od leta 2015 do leta 2022
predict(lin, data.frame(leto = c(2015:2022)))
novo1 <- studenti %>% filter(vrsta_kratka == "Skupaj")
lin <- lm(stevilo ~ leto, data=novo1)
predict(lin, data.frame(leto = c(2015:2022)))
# izriše območje premikanja od leta 2008-2014 za študente
g <- ggplot (novo1, aes(x=leto, y=stevilo))+ geom_point(fill = "black")
z <- lowess(novo1$leto, novo1$stevilo)
m <- g + geom_line(color="red")
loess(data = novo1, stevilo ~ leto, color="red")
m + geom_smooth(method = "loess", size=1,fullrange = TRUE)
# napoved do leta 2022
gam (data = novo1, leto ~ stevilo, color = "red")
analiza2 <- ggplot (novo1, aes(x=leto, y=stevilo))+ xlim (2008,2022) +
geom_line () + geom_point(fill="black") +
geom_smooth (method = "gam",formula = y ~ splines::bs(x, 3),
fullrange = TRUE)
novo2 <- dijaki %>% filter(vrsta_kratka == "Skupaj")
lin <- lm(stevilo ~ leto, data=novo2)
predict(lin, data.frame(leto = c(2015:2022)))
# izriše območje premikanja od leta 2008-2014 za dijake
g <- ggplot (novo2, aes(x=leto, y=stevilo))+ geom_point(fill = "black")
z <- lowess(novo2$leto, novo2$stevilo)
m <- g + geom_line(color="red")
loess(data = novo2, stevilo ~ leto, color="red")
m + geom_smooth(method = "loess", size=1,fullrange = TRUE)
# napoved do leta 2022
gam (data = novo2, leto ~ stevilo, color = "red")
analiza3 <- ggplot (novo2, aes(x=leto, y=stevilo))+ xlim (2008,2022) +
geom_line () + geom_point(fill="black") +
geom_smooth (method = "gam",formula = y ~ splines::bs(x, 4),
fullrange = TRUE)
#Filter po višini štipendij za dijake
novo3 <- dijakivisina %>% filter (vrsta_kratka == "Skupaj")
#Izpiše število štipendij
lin <- lm(visina ~ leto, data=novo3)
predict(lin, data.frame(leto = c(2015:2022)))
#Napoved do leta 2022
analiza4 <- ggplot(data=novo3, aes(x=leto, y=visina))+ xlim (2008,2022) +
geom_point(size=3, fill="black")+
ggtitle("Napoved višine štipendij za dijake do leta 2022")+
geom_smooth(method ="lm",formula = y ~ x+I(x^2)+I(x^3),
fill ="blue", colour="darkblue", size=1, alpha=0.2,
fullrange=TRUE)
#Filter po višini štipendij za študente
novo4 <- studentivisina %>% filter (vrsta_kratka == "Skupaj")
#Izpiše število štipendij
lin <- lm(visina ~ leto, data=novo4)
predict(lin, data.frame(leto = c(2015:2022)))
#Napoved do leta 2022
analiza5 <- ggplot(data=novo4, aes(x=leto, y=visina))+ xlim (2008,2022) +
geom_point(size=3, fill="black")+
ggtitle("Napoved višine štipendij za študente do leta 2022")+
geom_smooth(method="gam",formula = y ~ x+I(x^2)+I(x^3),
fill ="red", colour="orange", size=1, alpha=0.2,
fullrange=TRUE)
|
b0f5d4d8588ece6f9712f528276b1794c4b0ee18
|
7578a071a321f9c7d30842bc6d142dae824d3b9c
|
/cachematrix.R
|
01ce3a241c07859122a9a305a42899380f0cf9dd
|
[] |
no_license
|
teconomix/ProgrammingAssignment2
|
50b6dfa28627a29d8f02eebb832f243aebde2c94
|
a0fc8fd70045078230d7fcd36514fb5e3ff831ff
|
refs/heads/master
| 2021-01-17T06:31:45.523773
| 2014-10-24T15:08:21
| 2014-10-24T15:08:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,172
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix is a function that stores the inverse of a matrix and
## returns a list of elements that contain the original matrix,
## the inverse matrix and functions to get and set those
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(mean) m <<- mean
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve calculates the inverse of the special "matrix" created with makeCacheMatrix.
## However, it first checks to see if the inverse has already
## been calculated. If so, it gets the inverse from the cache and skips the computation.
## Otherwise, it calculates the inverse of the matrix and sets the inverse matrix in the cache
##via the setinv function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached inverse")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
43aa5baeaf8fc66e5ad2100a0a2802826de7f24f
|
d97e2169ce9cd893920a54cffa3e754d1e309e6f
|
/man/getap.Rd
|
9244a52b942606b866e43c458b6ea202781ac6e1
|
[] |
no_license
|
bpollner/aquap2
|
5ccef0ba4423413e56df77a1d2d83967bffd1d04
|
7f7e2cf84056aad4c8a66f55d099b7bdaa42c0be
|
refs/heads/master
| 2021-07-22T15:07:22.912086
| 2021-05-27T12:50:22
| 2021-05-27T12:50:22
| 30,932,899
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,780
|
rd
|
getap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prep_metadata.r
\name{getap}
\alias{getap}
\title{Get Analysis Procedure}
\usage{
getap(fn = "def", ...)
}
\arguments{
\item{fn}{Character length one. The filename of the analysis procedure file
to load. If left at 'def', the default filename for an analysis procedure
file as specified in the settings (factory default is "anproc.r") is read in.
Provide any other valid name of an analysis procedure file to load it. (Do not
forget the '.r' at the end.)}
\item{...}{Any of the arguments of the analysis procedure - please see
\code{\link{anproc_file}}. Any argument/value provided via \code{...} will
override the value in the analysis procedure .r file.}
}
\value{
A list with the analysis procedure.
}
\description{
Read in the analysis procedure from the default or a custom
analysis procedure file located in the metadata-folder. By providing any of
the arguments of the analysis procedure file (see \code{\link{anproc_file}})
to the function you can override the values in the file with the provided
values.
}
\details{
The name of the default analysis procedure file can be specified in
the settings. The provided value and defaults will be checked in
\code{\link{gdmm}} and the resulting \code{\link{aquap_cube}} contains the
final analysis procedure in its slot @anproc.
}
\examples{
\dontrun{
ap <- getap(); str(ap); names(ap)
ap <- getap("myFile.r")
ap <- getap(pca.colorBy="C_Group") # change the value of 'pca.colorBy'
from the .r file to 'C_Group'
ap <- getap(do.sim=FALSE) # switch off the calculation of SIMCA models
ap <- getap(spl.var="C_Group") # change the split variable to "C_Group"
}
}
\seealso{
\code{\link{anproc_file}}, \code{\link{getmd}}, \code{\link{gdmm}}
}
|
d5663103afb36f0834b34c66680b23a29c57bd7f
|
a40c81bdda0a632276c6a157fb6f2b83b051c300
|
/R/resp.patterns.R
|
ca7d79e822f54cbb0bd5d9cb8a6b67c8780c0809
|
[] |
no_license
|
cran/mRm
|
e6d5b2cdc18095d71356cdbe4b8de6ee3dd3bc5f
|
ccfce449042ef17167b2e626f16121ac3a4d52f8
|
refs/heads/master
| 2020-05-30T07:35:20.249582
| 2016-12-27T13:15:45
| 2016-12-27T13:15:45
| 17,697,237
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,305
|
r
|
resp.patterns.R
|
# Function: Determines different response patterns in a sample.
# Copyright (C) 2011 David Preinerstorfer
# david.preinerstorfer@univie.ac.at
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. A copy may be obtained at
# http://www.r-project.org/Licenses/
resp.patterns <-function(data.matrix){
data.matrix <- na.omit(as.matrix(data.matrix))
#recode answer vectors to strings
fu <- function(x){paste(x, collapse = "")}
pat <- apply(data.matrix, 1, fu)
#count patterns
tab <- table(pat)
#return pattern-strings into answer vectors
patterns <- matrix(as.numeric(unlist(strsplit(names(tab), ""))), ncol = dim(data.matrix)[2], byrow = TRUE)
#compute scores
scores <- apply(patterns, 1, sum)
#return different patterns, their counts and scores
return(cbind(patterns, "count" = as.numeric(tab), scores)[order(scores),])
}
|
6bb50a10aef5d00c39076fc3c1c6090ad0b0e98e
|
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
|
/cran/paws.migration/man/sms_list_apps.Rd
|
4d1f39602ac76516dbe8833f6ab097df521d3732
|
[
"Apache-2.0"
] |
permissive
|
johnnytommy/paws
|
019b410ad8d4218199eb7349eb1844864bd45119
|
a371a5f2207b534cf60735e693c809bd33ce3ccf
|
refs/heads/master
| 2020-09-14T23:09:23.848860
| 2020-04-06T21:49:17
| 2020-04-06T21:49:17
| 223,286,996
| 1
| 0
|
NOASSERTION
| 2019-11-22T00:29:10
| 2019-11-21T23:56:19
| null |
UTF-8
|
R
| false
| true
| 761
|
rd
|
sms_list_apps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sms_operations.R
\name{sms_list_apps}
\alias{sms_list_apps}
\title{Returns a list of summaries for all applications}
\usage{
sms_list_apps(appIds, nextToken, maxResults)
}
\arguments{
\item{appIds}{}
\item{nextToken}{The token for the next set of results.}
\item{maxResults}{The maximum number of results to return in a single call. The default
value is 50. To retrieve the remaining results, make another call with
the returned \code{NextToken} value.}
}
\description{
Returns a list of summaries for all applications.
}
\section{Request syntax}{
\preformatted{svc$list_apps(
appIds = list(
"string"
),
nextToken = "string",
maxResults = 123
)
}
}
\keyword{internal}
|
e110000ebd3200723c428c3f25568029a8717efa
|
5ad1da3166c1239e68ab5497fd1cda807e0847c1
|
/20_process_data/src/downsizeForNAs.R
|
f08e21549e48e8f1c6c13127a50a5f4aecbdb717
|
[] |
no_license
|
srcorsi-USGS/BeachBMPs
|
20f46f5416b68caaee14497b939230012c66ef8c
|
1e9fc132f1c7526c044939393f6df5138c2412cc
|
refs/heads/master
| 2021-06-08T22:04:01.996276
| 2021-01-22T22:39:56
| 2021-01-22T22:39:56
| 112,625,301
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 811
|
r
|
downsizeForNAs.R
|
df63rd <- make("df63rd")
response <- "Ecoli"
df <- df63rd[which(!is.na(df63rd[,response])),]
dfMaxRows <- df[,colSums(is.na(df)) <= nrow(df)*0.3] #Remove columns with more than 20% NAs
dfMaxRows <- na.omit(dfMaxRows)
plot(dfMaxRows$pdate,dfMaxRows$Ecoli)
plot(df$pdate,df$Ecoli)
IVcount <- apply(df,MARGIN = 1,function(x)sum(!is.na(x)))
# Explore number of IVs available for each observation over the years
plot(df$pdate,IVcount,xlab="",ylab="")
mtext("Independent variable availability",side=2,line=2.5,cex=1.5,font=2)
mtext("Date",side=1,line=3,cex=1.5,font=2)
mtext("63rd St: Independent Variables Available for E. coli observations",side = 3, line = 2,font = 2, cex = 1.5)
countNAs <- function(df){
test <- apply(df, MARGIN=2, FUN=function(x)sum(is.na(x)))
df <- df[,-which(test>150)]
}
|
31b4bfa05764a10c80b539c1005c2a83ab99c0b5
|
9477c6d638c2918ca4b03eefe0cbe067b790ceaa
|
/R/find_linear_peptides.R
|
a2a002a6cb31ac3e42e831fdca6fcd09d03b9fa0
|
[] |
no_license
|
cran/RHybridFinder
|
bbbfddab71db4890cd3d6c5ebf463731e6803542
|
3a89236619fc5f249a816ae80fd4ea35daf251df
|
refs/heads/master
| 2023-07-07T14:29:51.392978
| 2021-08-17T15:30:24
| 2021-08-17T15:30:24
| 379,599,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 966
|
r
|
find_linear_peptides.R
|
#' @title find_linear_peptides
#' @description this is a non-exportable function, the goal of this function is
#' to check for linear peptides among the high-ALC unassigned spectra from the
#' denovo results.
#' @param input_for_HF the prepare_input_for_HF output
#' @param proteome_db proteome database
#' @return list of linear high-ALC denovo peptides and those that will be explored
#' further
#' @details this is a non-exportable function, that takes in as input the peptide
#' sequences and the proteome database to search into and returns a list of all
#' all these peptides and their respective hits in the proteome, and blank if there
#' wasn't a hit.
#' @noRd
#' @keywords internal
#' @importFrom seqinr getAnnot
find_linear_peptides<- function(input_for_HF, proteome_db){
isLinear_peptides<- tapply(input_for_HF$Peptide,input_for_HF$extraid,function(x){
seqinr::getAnnot(proteome_db[grep(x, proteome_db, fixed=TRUE)])})
return(isLinear_peptides)
}
|
08c72e900e420b38b06a27d211eb75cb10208918
|
ed48b04e4ecf2f68aa301987c029e7c11c65540c
|
/R/mymlnorm.R
|
79293c5a20286b9e6c952731d4a0b1f427ee7f9c
|
[
"MIT"
] |
permissive
|
RILEYBYNUM/MATH4753BYNUM
|
2296e1f9fe6a96ec75e9fd51c7528f7e1b8d1b70
|
e7c969c5741279b1bb6b659674f0083e734a2b9d
|
refs/heads/master
| 2023-04-10T03:11:06.922694
| 2021-04-22T21:33:55
| 2021-04-22T21:33:55
| 334,220,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,498
|
r
|
mymlnorm.R
|
#' @title mymlnorm
#'
#' @param x data
#' @param mu mean
#' @param sig standard deviation
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
mymlnorm=function(x,mu,sig,...){ #x sample vector
nmu=length(mu) # number of values in mu
nsig=length(sig)
n=length(x) # sample size
zz=c() ## initialize a new vector
lfun=function(x,m,p) log(dnorm(x,mean=m,sd=p)) # log lik for normal
for(j in 1:nsig){
z=outer(x,mu,lfun,p=sig[j]) # z a matrix
# col 1 of z contains lfun evaluated at each x with first value of mu,
# col2 each x with 2nd value of m
# all with sig=sig[j]
y=apply(z,2,sum)
# y is a vector filled with log lik values,
# each with a difft mu and all with the same sig[j]
zz=cbind(zz,y)
## zz is the matrix with each column containing log L values, rows difft mu, cols difft sigmas
}
maxl=max(exp(zz))
coord=which(exp(zz)==maxl,arr.ind=TRUE)
maxlsig=apply(zz,1,max)
contour(mu,sig,exp(zz),las=3,xlab=expression(mu),ylab=expression(sigma),axes=TRUE,
main=expression(paste("L(",mu,",",sigma,")",sep="")),...)
mlx=round(mean(x),2) # theoretical
mly=round(sqrt((n-1)/n)*sd(x),2)
#axis(1,at=c(0:20,mlx),labels=sort(c(0:20,mlx)))
#axis(2,at=c(0:20,mly),labels=TRUE)
abline(v=mean(x),lwd=2,col="Green")
abline(h=sqrt((n-1)/n)*sd(x),lwd=2,col="Red")
# Now find the estimates from the co-ords
muest=mu[coord[1]]
sigest=sig[coord[2]]
abline(v=muest, h=sigest)
return(list(x=x,coord=coord,maxl=maxl))
}
|
67ee342ee00d452ebfa65dde659d40314ce32979
|
a226f4b4cf54dd0e8164a727d24dca99e79e1354
|
/R/stringsAsNumeric.R
|
5b17869f0c76dca800ebd9657303fa7888618ccc
|
[] |
no_license
|
beckerbenj/eatGADS
|
5ef0bdc3ce52b1895aaaf40349cbac4adcaa293a
|
e16b423bd085f703f5a548c5252da61703bfc9bb
|
refs/heads/master
| 2023-09-04T07:06:12.720324
| 2023-08-25T11:08:48
| 2023-08-25T11:08:48
| 150,725,511
| 0
| 1
| null | 2023-09-12T06:44:54
| 2018-09-28T10:41:21
|
R
|
UTF-8
|
R
| false
| false
| 1,498
|
r
|
stringsAsNumeric.R
|
#### As numeric
#############################################################################
#' Transform string to numeric.
#'
#' Transform a string variable within a \code{GADSdat} or \code{all_GADSdat} object to a numeric variable.
#'
#' Applied to a \code{GADSdat} or \code{all_GADSdat} object, this function uses \code{\link[eatTools]{asNumericIfPossible}} to
#' change the variable class and changes the \code{format} column in the meta data.
#'
#'@param GADSdat \code{GADSdat} object imported via \code{eatGADS}.
#'@param varName Character string of a variable name.
#'
#'@return Returns the \code{GADSdat} object with with the changed variable.
#'
#'
#'@export
stringAsNumeric <- function(GADSdat, varName) {
UseMethod("stringAsNumeric")
}
#'@export
stringAsNumeric.GADSdat <- function(GADSdat, varName) {
check_GADSdat(GADSdat)
if(!varName %in% namesGADS(GADSdat)) stop("varName is not a variable in the GADSdat.")
GADSdat$dat[[varName]] <- eatTools::catch_asNumericIfPossible(x = GADSdat$dat[[varName]], warn = paste("Some or all values for ", varName,
" cannot be coerced to numeric and are therefore changed to NA. \n", sep = ""),
maintain.factor.scores = TRUE, force.string = TRUE, transform.factors = TRUE)
GADSdat_out <- changeSPSSformat(GADSdat, varName = varName, format = "F10")
check_var_type(GADSdat_out)
GADSdat_out
}
|
f12ab4ea690dc990b3693fb7542ee46943084c36
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Introduction_to_Probability_Statistics_Using_R/code/17-appendix.R
|
9a7726ca8914f92106a55cb684e9ddbaa7401194
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,462
|
r
|
17-appendix.R
|
sessionInfo()
x <- c(3, 5, 9)
y <- c(3, "5", TRUE)
matrix(letters[1:6], nrow = 2, ncol = 3)
matrix(letters[1:6], nrow = 2, ncol = 3, byrow = TRUE)
matrix(c(1,"2",NA, FALSE), nrow = 2, ncol = 3)
A <- matrix(1:6, 2, 3)
B <- matrix(2:7, 2, 3)
A + B
A * B
try(A * B) # an error
A %*% t(B) # this is alright
solve(A %*% t(B)) # input matrix must be square
array(LETTERS[1:24], dim = c(3,4,2))
x <- c(1.3, 5.2, 6)
y <- letters[1:3]
z <- c(TRUE, FALSE, TRUE)
A <- data.frame(x, y, z)
A
names(A) <- c("Fred","Mary","Sue")
A
A <- as.data.frame(Titanic)
head(A)
B <- with(A, untable(A, Freq))
head(B)
C <- B[, -5]
rownames(C) <- 1:dim(C)[1]
head(C)
tab <- matrix(1:6, nrow = 2, ncol = 3)
rownames(tab) <- c('first', 'second')
colnames(tab) <- c('A', 'B', 'C')
tab # Counts
p <- c("milk","tea")
g <- c("milk","tea")
catgs <- expand.grid(poured = p, guessed = g)
cnts <- c(3, 1, 1, 3)
D <- cbind(catgs, count = cnts)
xtabs(count ~ poured + guessed, data = D)
library("foreign")
read.spss("foo.sav")
Tmp <- Puromycin[order(Puromycin$conc), ]
head(Tmp)
with(Puromycin, Puromycin[order(conc), ])
with(Puromycin, Puromycin[order(state, conc), ])
Tmp <- with(Puromycin, Puromycin[order(-conc), ])
head(Tmp)
Tmp <- with(Puromycin, Puromycin[order(-xtfrm(state)), ])
head(Tmp)
library("odfWeave")
odfWeave(file = "infile.odt", dest = "outfile.odt")
summary(cbind(Sepal.Length, Sepal.Width) ~ Species, data = iris)
save.image("R/IPSUR.RData")
|
c90396a3067167ac58f64a282d1ea52809c29fc8
|
e833729350807bebe936487cbe037f1d29d7fea0
|
/19_SQL vs tidyverse/chinook_12_tidyverse_recursivitate.R
|
c6dc965d3c23eb4342671595101547583ff4018a
|
[] |
no_license
|
catalinaelenaleonte/Baze-de-date-I
|
1993ff47429f0c9434647e373cd2ed0ca039bbfd
|
707b82c1358b27daf1321ea36e676d76f6aa8da5
|
refs/heads/master
| 2020-08-30T22:02:54.768911
| 2019-10-14T12:53:22
| 2019-10-14T12:53:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,808
|
r
|
chinook_12_tidyverse_recursivitate.R
|
# # -- Interogari tidyverse vs SQL - BD Chinook - IE si SPE:
# # --
# # -- 12: (Pseudo) Recursivitate
# # --
# # -- ultima actualizare: 2019-05-10
# #
library(tidyverse)
library(lubridate)
setwd('/Users/marinfotache/Google Drive/Baze de date 2019/Studii de caz/chinook')
load("chinook.RData")
# #
# #
# # -- ############################################################################
# # -- A. Interogari recursive pentru probleme `pseudo-recursive`
# # -- ############################################################################
# #
# #
# # -- ############################################################################
# # -- Stiind ca `trackid` respecta ordinea pieselor de pe albume,
# # -- sa se numeroteze toate piesele de pe toate albumele formatiei
# # -- `Led Zeppelin`; albumele vor fi ordonate alfabetic
# # -- ############################################################################
# #
# #
# # -- SQL
# #
# #
# # -- solutie mai simpla, care, in loc de recursivitate, foloseste gruparea si
# # -- functia `string_agg`
# # -- (vezi si scriptul `chinook_08_sql_subconsultari_in_where_si_having.sql`)
# # WITH track_numbering AS
# # (SELECT album.albumid, title as album_title, artist.name as artist_name,
# # ROW_NUMBER() OVER (PARTITION BY title ORDER BY trackid) AS track_no,
# # track.name AS track_name
# # FROM artist
# # NATURAL JOIN album
# # INNER JOIN track ON album.albumid = track.albumid
# # ORDER BY 2, 4
# # )
# # SELECT albumid, album_title, artist_name,
# # string_agg(DISTINCT CAST(RIGHT(' ' || track_no,2) || ':' || track_name AS VARCHAR), '; '
# # ORDER BY CAST(RIGHT(' ' || track_no,2) || ':' || track_name AS VARCHAR))
# # AS all_tracks_from_this_album
# # FROM track_numbering
# # GROUP BY albumid, album_title, artist_name
# #
# #
# #
###
### tidyverse
###
# Singura solutie `nativa` tidyverse este cea urmatoare, apropiata logicii
# interogarii de mai sus:
temp <- artist %>%
rename (artist_name = name) %>%
inner_join(album) %>%
inner_join(track) %>%
arrange(artist_name, title, trackid) %>%
group_by(artist_name, title) %>%
mutate (track_no = row_number()) %>%
summarise(all_tracks_from_this_album = paste(paste0(track_no, ':', name), collapse = '; ')) %>%
ungroup()
# #
# # -- ############################################################################
# # -- B. Interogari recursive pentru probleme `recursive`
# # -- ############################################################################
# #
# #
### `tidyverse` nu are (inca) un mecanism ne-procedural pentru recursivitate.
### In functie de natura problemei, solutiile pot fi procedurale sau bazate
### pe pachete ce prelucreaza grafuri (ex. `tidygraph`)
|
caf93070544067de30d9c18e7dc26d8d8d761648
|
92d0ec74ce59f8d98e858d02598cced7b0d30c54
|
/data analysis/simulation_code/Figure5/plot_Figure5.R
|
3a59753433d96422f0780c2336ef6f6737c7fca1
|
[] |
no_license
|
Miaoyanwang/ordinal_tensor
|
1a8bd71f227294c16e12c164af12edaa134f3067
|
ba90e6df7ac5865c755373c84370042d4f1d528d
|
refs/heads/master
| 2023-04-22T07:24:17.635342
| 2021-05-10T02:35:09
| 2021-05-10T02:35:09
| 209,680,209
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 911
|
r
|
plot_Figure5.R
|
### Figure 5: MSE vs. number of ordinal levels; half observations ##
library(ggplot2)
d=rep(20,3)
rlist=c(3,5,8)
Klist=2:7
################ plot ############
load("Figure5.RData")
Output$rank=as.factor(Output$rank)
levels(Output$rank)=c("r=3","r=5","r=8")
p=ggplot(data=Output,aes(x=K,y=MSE))+geom_line(aes(color=rank),size=1.2)+ labs(x=expression('number of ordinal levels'~(L)), y='relative MSE')+xlim(2,7)+theme(text = element_text(size=rel(4)),legend.text = element_text(size = 15),axis.text.x = element_text(size=15),axis.text.y = element_text(size=15))+ylim(0,2)
g <- ggplot_build(p)
col=unlist(unique(g$data[[1]]["colour"]))[c(1,2,3)]
p=p+geom_errorbar(aes(ymin=MSE-sd, ymax=MSE+sd), width=0.1,
position=position_dodge(0),color=col[rep(rep(1:3),length(Klist))],size=0.5)+geom_point(aes(shape=rank),size=2)+scale_shape_manual(values = c(0,2,16))
pdf("error_level.pdf",width=5,height=4)
p
dev.off()
|
d3b0c048872d98f226821dc530c5f6b71b8197ff
|
9d3350a99175dd0a11846549330297ccef72aeae
|
/R/create_binned_data.R
|
76e553886fe878c3103576b057e195a60a1c0870
|
[] |
no_license
|
xingzhis/NeuroDecodeR
|
9cb11ff0f7c7eab76239379e4cb18372a402de9d
|
7fb38d4e28a1a1f2cd0f1ea23e72f3258979152c
|
refs/heads/master
| 2022-12-23T04:01:11.780678
| 2020-10-06T02:30:01
| 2020-10-06T02:30:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,135
|
r
|
create_binned_data.R
|
#' Convert data from raster format to binned format
#'
#' This function takes the name of a directory that contains files in raster
#' format and averages the data within a specified bin width at specified
#' sampling interval increments to create data in binned format used for
#' decoding.
#'
#'
#' @param raster_dir_name A string that contains the path to a directory that
#' has files in raster format. These files will be combined into binned format
#' data.
#'
#' @param save_prefix_name A string with a prefix that will be used name of file
#' that contains the saved binned format data.
#'
#' @param bin_width A number that has the bin width that data should be averaged over.
#'
#' @param sampling_interval A number that has the specifies the sampling
#' interval between successive binned data points.
#'
#' @param start_time A number that specifies at which time should the binning
#' start. By default it starts at the first time in the raster data.
#'
#' @param end_time A number that specifies at which time should the binning
#' end. By default is to end at the last time in the raster data.
#'
#' @param files_contain A string that specifies that only raster files that
#' contain this string should be included in the binned format data.
#'
#' @examples
#' # create binned data with 150 ms bin sizes sampled at 10 ms intervals
#' raster_dir_name <- file.path(
#' "..", "data-raw", "raster",
#' "Zhang_Desimone_7objects_raster_data_rda", ""
#' )
#' \dontrun{
#' binned_file_name <- create_binned_data(raster_dir_name, "ZD", 150, 50)
#' }
#'
#' @export
create_binned_data <- function(raster_dir_name,
save_prefix_name,
bin_width,
sampling_interval,
start_time = NULL,
end_time = NULL,
files_contain = "") {
# if the directory name does not end with a slash, add a slash
raster_dir_name <- trimws(file.path(dirname(raster_dir_name), basename(raster_dir_name), " "))
file_names <- list.files(raster_dir_name, pattern = files_contain)
binned_data <- NULL
# loop through all raster data files and bin them
for (iSite in seq_along(file_names)) {
# print message to show progress the number of sites that have been binned
if (iSite == 1) {
message(sprintf("binning site %-5s", iSite))
} else {
message(paste0(rep("\b", 19), collapse = ""), sprintf("binning site %-5s", iSite))
}
binned_data_object_name <- load(paste0(raster_dir_name, file_names[iSite]))
if (binned_data_object_name != "raster_data") {
stop('Data stored in raster files must contain an object called "raster_data"')
# added this line to get rid of R CMD check note: no visible binding for global variable 'raster_data'
raster_data <- NULL
}
one_binned_site <- bin_saved_data_one_site(raster_data, bin_width, sampling_interval, start_time, end_time)
# append siteID to raster data, which is then appended to binned data
one_binned_site$siteID <- rep(iSite, dim(one_binned_site)[1])
binned_data <- rbind(binned_data, one_binned_site)
}
# make the siteID be in the first column of binned dataa
binned_data <- binned_data %>% select(.data$siteID, everything())
# add the class attributes binned_data, data.frame to the binned data
attr(binned_data, "class") <- c("binned_data", "data.frame")
# save the results to a .Rda file
saved_binned_data_file_name <- paste0(
save_prefix_name, "_", bin_width, "bins_", sampling_interval, "sampled")
start_time_name <- ""
end_time_name <- ""
if (!is.null(start_time)) {
start_time_name <- paste0("_start", start_time)
}
if (!is.null(end_time)) {
end_time_name <- paste0("_end", end_time)
}
saved_binned_data_file_name <- paste0(saved_binned_data_file_name, start_time_name, end_time_name, ".Rda")
save("binned_data", file = saved_binned_data_file_name, compress = TRUE)
saved_binned_data_file_name
} # end function
# A helper function for create_binned_data() to bin the data from one site
bin_saved_data_one_site <- function(raster_data,
bin_width,
sampling_interval,
start_time = NULL,
end_time = NULL) {
labels_df <- dplyr::select(raster_data, -starts_with("time"))
spike_df <- dplyr::select(raster_data, starts_with("time"))
# start_df_ind is the index of the time column in spike_df
# start_df_ind and start_time are the same if the time starts at 1
if (is.null(start_time)) {
start_time <- as.numeric(gsub("time.", "", colnames(spike_df)[1]))
}
start_df_ind <- match(paste0("time.", start_time), colnames(spike_df))
if (is.null(end_time)) {
temp_length <- dim(spike_df)[2]
end_time <- as.numeric(gsub("time.", "", colnames(spike_df)[temp_length]))
}
end_df_ind <- match(paste0("time.", end_time), colnames(spike_df))
all_start_df_inds <- seq(start_df_ind, end_df_ind - (bin_width - 1), by = sampling_interval)
all_end_df_inds <- all_start_df_inds + (bin_width - 1)
dfCurr_site_binned_data <- as.data.frame(matrix(nrow = dim(raster_data)[1], ncol = length(all_start_df_inds)))
for (iBin in seq_along(all_start_df_inds)) {
if (all_start_df_inds[iBin] == all_end_df_inds[iBin]) {
# if binning at the same resolution as the original file, return original data
# add start_df_ind to offset the prestimlus time
dfCurr_site_binned_data[, iBin] <- spike_df[, all_start_df_inds[iBin]]
} else {
# otherwise, actually bin the data
dfCurr_site_binned_data[, iBin] <- rowMeans(spike_df[, all_start_df_inds[iBin]:all_end_df_inds[iBin]])
}
}
names(dfCurr_site_binned_data) <- paste0("time.", all_start_df_inds + (start_time - start_df_ind), "_", all_end_df_inds + (end_time - end_df_ind))
dfCurr_site_binned_data <- cbind(labels_df, dfCurr_site_binned_data)
dfCurr_site_binned_data
}
|
461e9fa36b6a57be5a76f0a795f848cf79961f64
|
01c34cd1967cc13fc4e24d9dc030673977d0473e
|
/Untitled.R
|
9701d8c5404c9540d030c5bb0c6e5d68378e0b5a
|
[] |
no_license
|
wupi/course-explanatory-analysis
|
e1bf6b242f1e15a0901ce2061e8471785893a359
|
2a834b5a97a6e61a83e29028e8c8c3f24f332def
|
refs/heads/master
| 2021-01-17T15:59:20.765238
| 2016-06-13T04:54:57
| 2016-06-13T04:54:57
| 59,265,300
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 398
|
r
|
Untitled.R
|
#exploratory analysis - final project
#plot 1
scc <- readRDS("./course-explanatory-analysis/final_project/Source_Classification_Code.rds")
nei <- readRDS("./course-explanatory-analysis/final_project/summarySCC_PM25.rds")
plot1 <- aggregate(Emissions ~ year, nei, sum)
with(plot1, plot(year, Emissions, type = 'l'))
dev.copy(png, "./course-explanatory-analysis/final_project/plot1.png")
dev.off()
|
f305144fbc4e58425710f2d5a9f4f5c512fa8926
|
32f2c862c462c6dcb41fa83e0eb7a9ab8a054e44
|
/scripts/00b_Hwavi_Visuals.R
|
670ecabb861a5f88f2354afe25cad2c44eab0605
|
[] |
no_license
|
JosueVega/Hwaviness
|
5e36bfb26ee524d2eb4bc6bf9f9c95d0a3b07a58
|
5074a768e90e414791c6f2a1df7861aa9866cb2b
|
refs/heads/master
| 2021-09-07T19:47:10.122962
| 2018-02-28T03:49:38
| 2018-02-28T03:49:38
| 112,789,780
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 57,979
|
r
|
00b_Hwavi_Visuals.R
|
#Josue Vega
# Separate visual projects for HWaviness
#################################
# Table 1: ANOVA table for hyphal waviness
rm(list=ls())
library(lme4)
library(readr)
setwd("~/../Desktop/B. cinera/Hwaviness")
mydata <- read.csv("../BR_JV_ManualHyphalDat_032817(edit).csv")
# Mixed Model 2-Way ANOVA -> Parametric Test
mydataANOVA1 <- anova(lmer(Phenotype ~ Isolate + (1|PlateBlock) + Date, mydata))
mydataANOVA2 <- aov(Phenotype ~ Isolate + (Isolate*PlateBlock) + Date, mydata)
mydataANOVA2 <- summary(mydataANOVA2) #tabular form of the data, normal distr of information
mydataANOVA2
write.csv(mydataANOVA2, "../../HWaviness Parts/Completed/ANOVA_Hwavi.csv") # Shows Response (Phenotype) and ANOVA Table
#################################
## Histogram of phenotype Counts
rm(list=ls())
library(ggplot2)
setwd("~/../Desktop/B. cinera/Hwaviness")
#999 Tresh
pheno <- read.csv("data/07_SNPdat_annot/AllAnnots_999Thr_byGene.csv")
hist(pheno$pheno_count, axes=TRUE, breaks = 5, plot=TRUE, labels=TRUE, xlab="Number of SNPs per Gene", main="99.9% Treshold: SNP Frequency Distribution")
#99 Tresh
pheno2 <- read.csv("data/07_SNPdat_annot/AllAnnots_99Thr_byGene.csv")
hist(pheno2$pheno_count, axes=TRUE, breaks = 5, plot=TRUE, labels=TRUE, xlab="Number of SNPs per Gene", main="99% Threshold: SNP Frequency Distribution")
#################################
#Merging GeneID and Function and Estimate
rm(list=ls())
library(ggplot2)
setwd("~/../Desktop/B. cinera/Hwaviness")
IDgene_99_Estimate <- read.csv("data/06_snpdat/99Thr_snpGenes_2kbwin.csv")
IDgene_99_Estimate <- IDgene_99_Estimate [,c("Estimate","geneID", "Chrom")]
IDgene_99_Estimate <- unique(IDgene_99_Estimate, by = "geneID")
# View(IDgene_99_Estimate)
annots_99_Func <- read.csv("data/07_SNPdat_annot/AllAnnots_99Thr_byGene.csv")
annots_99_Func <- annots_99_Func [,c("PFAM_NAME","PFAM_DESCRIPTION", "geneID")]
annots_99_Func <- unique(annots_99_Func, by = "geneID")
View(annots_99_Func)
# View(annots_99_Func)
overall <- merge(IDgene_99_Estimate,annots_99_Func,by="geneID")
View(overall)
# annots_99_Func <- annots_99_Func [,c("PFAM_NAME","PFAM_DESCRIPTION")]
# count(unique(annots_99_Func))
# IDgene_99_Estimate <- IDgene_99_Estimate [,c("Estimate")]
# count(unique(IDgene_99_Estimate))
write.csv(overall, "../HWavinessVisuals/HWaviGeneID_fxn_estimateThresh99.csv")
#################################
# scatter plot of hyphal waviness vs. pectin growth on sugar agar
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
library(readr)
SugarPectin <- read_delim("C:/Users/vegaj/Desktop/B. cinera/HWavinessVisuals/SugarPectin_Lsmeans.csv", ";", escape_double = FALSE, trim_ws = TRUE)
SugarPectin <- SugarPectin [,c("GenoRename", "P72", "P48", "S72", "S48")]
colnames(SugarPectin)<-c("Isolate", "P72", "P48", "S72", "S48")
HwaviBC <- read.csv("BR_JV_ManualHyphalDat_032817(edit).csv")
Hwavi <- HwaviBC
Isolate <- Hwavi %>%
group_by(Isolate) %>%
summarise(avg_pheno = mean(Phenotype, na.rm = TRUE),
min_pheno = min(Phenotype, na.rm = TRUE),
max_pheno = max(Phenotype, na.rm = TRUE),
sd_pheno = sd(Phenotype, na.rm = TRUE),
total = n())
SugarPectinWaviness <- merge(SugarPectin, Isolate, by = "Isolate")
#View(SugarPectinWaviness)
#Scatter Plot of Pectin/Sugar(72+48hrs) v Waviness
##P72
pvaluePear <- summary(lm(avg_pheno~P72, SugarPectinWaviness))$coefficients["P72","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(SugarPectinWaviness$avg_pheno,SugarPectinWaviness$P72, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(SugarPectinWaviness$avg_pheno,SugarPectinWaviness$P72, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/Pectin72hScatter_Waviness.pdf")
plot1 <- ggplot(SugarPectinWaviness, aes(P72,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Isolate Growth on Pectin after 72hr") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Comparison of Isolate growth on Pectin after 72hr to Isolate Hyphal Waviness") +
geom_smooth(method='lm')
plot1
dev.off()
##P48
pvaluePear <- summary(lm(avg_pheno~P48, SugarPectinWaviness))$coefficients["P48","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(SugarPectinWaviness$avg_pheno,SugarPectinWaviness$P48, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(SugarPectinWaviness$avg_pheno,SugarPectinWaviness$P48, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/Pectin48hScatter_Waviness.pdf")
plot2 <- ggplot(SugarPectinWaviness, aes(P48,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Isolate Growth on Pectin after 48hr") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Comparison of Isolate growth on Pectin after 48hr to Isolate Hyphal Waviness") +
geom_smooth(method='lm')
plot2
dev.off()
##S72
pvaluePear <- summary(lm(avg_pheno~S72, SugarPectinWaviness))$coefficients["S72","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(SugarPectinWaviness$avg_pheno,SugarPectinWaviness$S72, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(SugarPectinWaviness$avg_pheno,SugarPectinWaviness$S72, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/Sugar72hScatter_Waviness.pdf")
plot3 <- ggplot(SugarPectinWaviness, aes(S72,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Isolate Growth on Sugar after 72hr") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Comparison of Isolate growth on Sugar after 72hr to Isolate Hyphal Waviness") +
geom_smooth(method='lm')
plot3
dev.off()
##S48
pvaluePear <- summary(lm(avg_pheno~S48, SugarPectinWaviness))$coefficients["S48","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(SugarPectinWaviness$avg_pheno,SugarPectinWaviness$S48, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(SugarPectinWaviness$avg_pheno,SugarPectinWaviness$S48, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/Sugar48hScatter_Waviness.pdf")
plot4 <- ggplot(SugarPectinWaviness, aes(S48,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Isolate Growth on Sugar after 48hr") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Comparison of Isolate growth on Sugar after 48hr to Isolate Hyphal Waviness") +
geom_smooth(method='lm')
plot4
dev.off()
##Multiple Plots in one
library(cowplot)
gridPlot <- plot_grid( plot2, plot1,plot4, plot3, labels = c("A", "B", "C", "D"), scale = .75)
gridPlot
#################################
#################################
# scatter plot of hyphal waviness vs. lesion size on tomato, by isolate
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
BC_tomato <- read.csv("HWavinessVisuals/Bc_tomatoLesion.csv")
HwaviBC <- read.csv("BR_JV_ManualHyphalDat_032817(edit).csv")
Hwavi <- HwaviBC
Isolate <- Hwavi %>%
group_by(Isolate) %>%
summarise(avg_pheno = mean(Phenotype, na.rm = TRUE),
min_pheno = min(Phenotype, na.rm = TRUE),
max_pheno = max(Phenotype, na.rm = TRUE),
sd_pheno = sd(Phenotype, na.rm = TRUE),
total = n())
BC_TomatoWaviness <- merge(BC_tomato, Isolate, by = "Isolate")
#Scatter Plot mean Lesion v Waviness
pvaluePear <- summary(lm(avg_pheno~meanLesion, BC_TomatoWaviness))$coefficients["meanLesion","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_TomatoWaviness$avg_pheno,BC_TomatoWaviness$meanLesion, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_TomatoWaviness$avg_pheno,BC_TomatoWaviness$meanLesion, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/TomatoScatter_Lesion_Waviness.pdf")
plotTom <- ggplot(BC_TomatoWaviness, aes(meanLesion,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Mean Lesion Size on Tomato") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Tomato against Hyphal Waviness per Isolate") +
geom_smooth(method='lm', se = FALSE)
plotTom
dev.off()
# 2nd scatter plot of hyphal waviness vs. lesion size on different interaction types, by isolate
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
BC_Interact <- read.csv("HWavinessVisuals/DataFor100IsoCollection.csv")
BC_Interact <- BC_Interact [,c("Isolate","Cendivia","Brapa","Cintybus","Glycine", "Helianthus", "Solanum")]
HwaviBC <- read.csv("BR_JV_ManualHyphalDat_032817(edit).csv")
Hwavi <- HwaviBC
Isolate <- Hwavi %>%
group_by(Isolate) %>%
summarise(avg_pheno = mean(Phenotype, na.rm = TRUE),
min_pheno = min(Phenotype, na.rm = TRUE),
max_pheno = max(Phenotype, na.rm = TRUE),
sd_pheno = sd(Phenotype, na.rm = TRUE),
total = n())
BC_InteractWaviness <- merge(BC_Interact, Isolate, by = "Isolate")
#Scatter Plot mean Interactions Lesion v Waviness
##C. endivia - dicot
pvaluePear <- summary(lm(avg_pheno~Cendivia, BC_InteractWaviness))$coefficients["Cendivia","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Cendivia, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Cendivia, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/CendiviaScatter_Lesion_Waviness.pdf")
plot1 <- ggplot(BC_InteractWaviness, aes(Cendivia,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) + #geom_text(label = (PvalueSpea), parse = TRUE) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Mean Lesion Size on C. endivia") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Cichorium endivia against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot1
dev.off()
##B. rapa - dicot
pvaluePear <- summary(lm(avg_pheno~Brapa, BC_InteractWaviness))$coefficients["Brapa","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Brapa, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Brapa, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/BrapaScatter_Lesion_Waviness.pdf")
plot2 <- ggplot(BC_InteractWaviness, aes(Brapa,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Lesion Size on B. rapa") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Brassica rapa against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot2
dev.off()
##C. intybus - dicot
pvaluePear <- summary(lm(avg_pheno~Cintybus, BC_InteractWaviness))$coefficients["Cintybus","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Cintybus, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Cintybus, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/CintybusScatter_Lesion_Waviness.pdf")
plot3 <- ggplot(BC_InteractWaviness, aes(Cintybus,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Lesion Size on C. intybus") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Cichorium intybus against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot3
dev.off()
##Glycine max
pvaluePear <- summary(lm(avg_pheno~Glycine, BC_InteractWaviness))$coefficients["Glycine","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Glycine, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Glycine, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/GlycineScatter_Lesion_Waviness.pdf")
plot4 <- ggplot(BC_InteractWaviness, aes(Glycine,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Lesion Size on G. max") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Glycine max against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot4
dev.off()
##Helianthus - dicot
pvaluePear <- summary(lm(avg_pheno~Helianthus, BC_InteractWaviness))$coefficients["Helianthus","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Helianthus, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Helianthus, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/HelianthusScatter_Lesion_Waviness.pdf")
plot5 <- ggplot(BC_InteractWaviness, aes(Helianthus,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Mean Lesion Size on H. annuus") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Helianthus annuus against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot5
dev.off()
##Solanum - dicot
pvaluePear <- summary(lm(avg_pheno~Cendivia, BC_InteractWaviness))$coefficients["Cendivia","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Cendivia, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$avg_pheno,BC_InteractWaviness$Cendivia, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/SolanumScatter_Lesion_Waviness.pdf")
plot6 <- ggplot(BC_InteractWaviness, aes(Solanum,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Lesion Size on Solanum") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Solanum against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot6
dev.off()
##Average Eudicot Data
##Multiple Plots in one
library(cowplot)
gridPlot <- plot_grid(plot1, plot2, plot3, plot4, plot5, plot6, plotTom, labels = c("A", "B", "C", "D", "E", "F", "G"), scale = .75)
gridPlot
#################################
#BOXPLOT: comparison of hyphal waviness across isolates (try violin or box and whiskers plot)
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
HwaviBC <- read.csv("BR_JV_ManualHyphalDat_032817(edit).csv")
Hwavi <- HwaviBC
pdf("../HWaviness Parts/Completed/Boxplot_Hwavi_Isolate.pdf", width = 15, height = 5)
ggplot(Hwavi, aes(reorder(Isolate, Phenotype, mean),Phenotype)) +
theme_bw() +
geom_boxplot() +
theme(axis.text.x=element_text(angle = -90, hjust = 0)) +
xlab("Isolate") + ylab("Phenotype Distribution")
dev.off()
#################################
# VIOLIN PLOT: comparison of hyphal waviness across isolates
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/Hwaviness/")
HwaviBC <- read.csv("../BR_JV_ManualHyphalDat_032817(edit).csv")
Hwavi <- HwaviBC
pdf("../../HWaviness Parts/Completed/Violin_Hwavi_Isolate.pdf", width = 15, height = 5)
ggplot(Hwavi, aes(reorder(Isolate, Phenotype, mean),Phenotype)) +
theme_bw() +
geom_violin(fill='#56B4E9', trim = FALSE) +
theme(axis.text.x=element_text(angle = -90, hjust = 0)) +
xlab("Isolate") + ylab("Hyphal Waviness")
dev.off()
#################################
#Scatter of Eccentricity of Isolates
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
Ecc <- read.csv("Hwaviness/data/BcAtPhenos/BcAtPhenosGWAS_lsmeans.fxmod1_clean.csv")
HwaviBC <- read.csv("BR_JV_ManualHyphalDat_032817(edit).csv")
Hwavi <- HwaviBC
Isolate <- Hwavi %>%
group_by(Isolate) %>%
summarise(avg_pheno = mean(Phenotype, na.rm = TRUE),
min_pheno = min(Phenotype, na.rm = TRUE),
max_pheno = max(Phenotype, na.rm = TRUE),
sd_pheno = sd(Phenotype, na.rm = TRUE),
total = n())
BC_EccWaviness <- merge(Ecc, Isolate, by = "Isolate")
pvaluePear <- summary(lm(avg_pheno~Estimate, BC_EccWaviness))$coefficients["Estimate","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_EccWaviness$avg_pheno,BC_EccWaviness$Estimate, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_EccWaviness$avg_pheno,BC_EccWaviness$Estimate, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/EccenScatter_Lesion_Waviness.pdf")
plot6 <- ggplot(BC_EccWaviness, aes(Estimate,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Eccentricity") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Solanum against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot6
dev.off()
##################################################################
#LsMean comparisons of the plots instead of the averages
##Pectin+Sugar x LsMeans
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
library(readr)
SugarPectin <- read_delim("C:/Users/vegaj/Desktop/B. cinera/HWavinessVisuals/SugarPectin_Lsmeans.csv", ";", escape_double = FALSE, trim_ws = TRUE)
SugarPectin <- SugarPectin [,c("GenoRename", "P72", "P48", "S72", "S48")]
colnames(SugarPectin)<-c("Isolate", "P72", "P48", "S72", "S48")
HwaviBC <- read.csv("WavyGWAS_lsmeans.fxmod1_R_output.csv")
Isolate <- HwaviBC
SugarPectinWaviness <- merge(SugarPectin, Isolate, by = "Isolate")
View(SugarPectinWaviness)
#Scatter Plot of Pectin/Sugar(72+48hrs) v Waviness
##P72
rvaluePear <- summary(lm(HwaviEstimate~P72, SugarPectinWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~P72, SugarPectinWaviness))$coefficients["P72","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(SugarPectinWaviness$HwaviEstimate,SugarPectinWaviness$P72, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(SugarPectinWaviness$HwaviEstimate,SugarPectinWaviness$P72, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/Pectin72hScatter_LsWaviness.pdf")
plot1 <- ggplot(SugarPectinWaviness, aes(P72,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Isolate Growth on Pectin after 72hr") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Comparison of Isolate growth on Pectin after 72hr to Isolate Hyphal Waviness") +
geom_smooth(method='lm')
plot1
dev.off()
##P48
rvaluePear <- summary(lm(HwaviEstimate~P48, SugarPectinWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~P48, SugarPectinWaviness))$coefficients["P48","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(SugarPectinWaviness$HwaviEstimate,SugarPectinWaviness$P48, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(SugarPectinWaviness$HwaviEstimate,SugarPectinWaviness$P48, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/Pectin48hScatter_LsWaviness.pdf")
plot2 <- ggplot(SugarPectinWaviness, aes(P48,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Isolate Growth on Pectin after 48hr") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Comparison of Isolate growth on Pectin after 48hr to Isolate Hyphal Waviness") +
geom_smooth(method='lm')
plot2
dev.off()
##S72
rvaluePear <- summary(lm(HwaviEstimate~S72, SugarPectinWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~S72, SugarPectinWaviness))$coefficients["S72","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(SugarPectinWaviness$HwaviEstimate,SugarPectinWaviness$S72, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(SugarPectinWaviness$HwaviEstimate,SugarPectinWaviness$S72, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/Sugar72hScatter_LsWaviness.pdf")
plot3 <- ggplot(SugarPectinWaviness, aes(S72,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Isolate Growth on Sugar after 72hr") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Comparison of Isolate growth on Sugar after 72hr to Isolate Hyphal Waviness") +
geom_smooth(method='lm')
plot3
dev.off()
##S48
rvaluePear <- summary(lm(HwaviEstimate~S48, SugarPectinWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~S48, SugarPectinWaviness))$coefficients["S48","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(SugarPectinWaviness$HwaviEstimate,SugarPectinWaviness$S48, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(SugarPectinWaviness$HwaviEstimate,SugarPectinWaviness$S48, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/Sugar48hScatter_LsWaviness.pdf")
plot4 <- ggplot(SugarPectinWaviness, aes(S48,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Isolate Growth on Sugar after 48hr") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Comparison of Isolate growth on Sugar after 48hr to Isolate Hyphal Waviness") +
geom_smooth(method='lm')
plot4
dev.off()
##Multiple Plots in one
library(cowplot)
gridPlot <- plot_grid( plot2, plot1,plot4, plot3, labels = c("A", "B", "C", "D"), scale = .75)
gridPlot
#Scatter of Eccentricity of Isolates x LsMeans
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
Ecc <- read.csv("Hwaviness/data/BcAtPhenos/BcAtPhenosGWAS_lsmeans.fxmod1_clean.csv")
HwaviBC <- read.csv("WavyGWAS_lsmeans.fxmod1_R_output.csv")
Isolate <- HwaviBC
BC_EccWaviness <- merge(Ecc, Isolate, by = "Isolate")
rvaluePear <- summary(lm(HwaviEstimate~Estimate, BC_EccWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~Estimate, BC_EccWaviness))$coefficients["Estimate","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_EccWaviness$HwaviEstimate,BC_EccWaviness$Estimate, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_EccWaviness$HwaviEstimate,BC_EccWaviness$Estimate, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/EccenScatter_Lesion_LsWaviness.pdf")
plot6 <- ggplot(BC_EccWaviness, aes(Estimate,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Eccentricity") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Solanum against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot6
dev.off()
# scatter plot of Lsmeanshyphal waviness vs. lesion size on tomato, by isolate
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
BC_tomato <- read.csv("HWavinessVisuals/Bc_tomatoLesion.csv")
HwaviBC <- read.csv("WavyGWAS_lsmeans.fxmod1_R_output.csv")
Isolate <- HwaviBC
BC_TomatoWaviness <- merge(BC_tomato, Isolate, by = "Isolate")
#Scatter Plot mean Lesion v Waviness
rvaluePear <- summary(lm(HwaviEstimate~meanLesion, BC_TomatoWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~meanLesion, BC_TomatoWaviness))$coefficients["meanLesion","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_TomatoWaviness$HwaviEstimate,BC_TomatoWaviness$meanLesion, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_TomatoWaviness$HwaviEstimate,BC_TomatoWaviness$meanLesion, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/TomatoScatter_Lesion_Waviness.pdf")
plotTom <- ggplot(BC_TomatoWaviness, aes(meanLesion,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Mean Lesion Size on Tomato") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Tomato against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plotTom
dev.off()
# 2nd scatter plot of hyphal waviness vs. lesion size on different interaction types, by isolate
setwd("~/../Desktop/B. cinera/")
BC_Interact <- read.csv("HWavinessVisuals/DataFor100IsoCollection.csv")
BC_Interact <- BC_Interact [,c("Isolate","Cendivia","Brapa","Cintybus","Glycine", "Helianthus", "Solanum")]
BC_InteractWaviness <- merge(BC_Interact, Isolate, by = "Isolate")
#Scatter Plot mean Interactions Lesion v Waviness
##C. endivia - dicot
rvaluePear <- summary(lm(HwaviEstimate~Cendivia, BC_InteractWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~Cendivia, BC_InteractWaviness))$coefficients["Cendivia","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Cendivia, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Cendivia, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/CendiviaScatter_Lesion_Waviness.pdf")
plot1 <- ggplot(BC_InteractWaviness, aes(Cendivia,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) + #geom_text(label = (PvalueSpea), parse = TRUE) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Mean Lesion Size on C. endivia") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Cichorium endivia against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot1
dev.off()
##B. rapa - dicot
rvaluePear <- summary(lm(HwaviEstimate~Brapa, BC_InteractWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~Brapa, BC_InteractWaviness))$coefficients["Brapa","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Brapa, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Brapa, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/BrapaScatter_Lesion_Waviness.pdf")
plot2 <- ggplot(BC_InteractWaviness, aes(Brapa,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Lesion Size on B. rapa") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Brassica rapa against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot2
dev.off()
##C. intybus - dicot
rvaluePear <- summary(lm(HwaviEstimate~Cintybus, BC_InteractWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~Cintybus, BC_InteractWaviness))$coefficients["Cintybus","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Cintybus, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Cintybus, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/CintybusScatter_Lesion_Waviness.pdf")
plot3 <- ggplot(BC_InteractWaviness, aes(Cintybus,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Lesion Size on C. intybus") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Cichorium intybus against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot3
dev.off()
##Glycine max
rvaluePear <- summary(lm(HwaviEstimate~Glycine, BC_InteractWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~Glycine, BC_InteractWaviness))$coefficients["Glycine","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Glycine, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Glycine, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/GlycineScatter_Lesion_Waviness.pdf")
plot4 <- ggplot(BC_InteractWaviness, aes(Glycine,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Lesion Size on G. max") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Glycine max against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot4
dev.off()
##Helianthus - dicot
rvaluePear <- summary(lm(HwaviEstimate~Helianthus, BC_InteractWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~Helianthus, BC_InteractWaviness))$coefficients["Helianthus","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Helianthus, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Helianthus, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/HelianthusScatter_Lesion_Waviness.pdf")
plot5 <- ggplot(BC_InteractWaviness, aes(Helianthus,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Mean Lesion Size on H. annuus") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Helianthus annuus against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot5
dev.off()
##Solanum - dicot
rvaluePear <- summary(lm(HwaviEstimate~Solanum, BC_InteractWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~Solanum, BC_InteractWaviness))$coefficients["Solanum","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Solanum, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BC_InteractWaviness$HwaviEstimate,BC_InteractWaviness$Solanum, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/SolanumScatter_Lesion_Waviness.pdf")
plot6 <- ggplot(BC_InteractWaviness, aes(Solanum,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Mean Lesion Size on Solanum") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
#ggtitle("Mean Lesion Size on Solanum against Hyphal Waviness per Isolate") +
geom_smooth(method='lm')
plot6
dev.off()
##Multiple Plots in one
library(cowplot)
gridPlot <- plot_grid(plot1, plot2, plot3, plot4, plot5, plot6, plotTom, labels = c("A", "B", "C", "D", "E", "F", "G"), scale = .75)
gridPlot
##################################################################
# scatter plot of Lsmeanshyphal waviness vs. Ecc per plant, by isolate
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
BcAt <- read.csv("Hwaviness/data/BcAtPhenos/BcAtPhenosGWAS_lsmeans.fxmod1_organiz.csv")
HwaviBC <- read.csv("WavyGWAS_lsmeans.fxmod1_R_output.csv")
Isolate <- HwaviBC
BcAt_LsWaviness <- merge(BcAt, Isolate, by = "Isolate")
#Scatter Plot mean Interactions Lesion v Waviness
## anac088 - dicot
rvaluePear <- summary(lm(HwaviEstimate~anac055, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~anac055, BcAt_LsWaviness))$coefficients["anac055","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$anac055, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$anac055, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/anac088Scatter_Lesion_LsWaviness.pdf")
plot1 <- ggplot(BcAt_LsWaviness, aes(anac055,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) + #geom_text(label = (PvalueSpea), parse = TRUE) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Lesion Eccentricity on anac088") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot1
dev.off()
##coi1 - dicot
rvaluePear <- summary(lm(HwaviEstimate~coi1, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~coi1, BcAt_LsWaviness))$coefficients["coi1","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$coi1, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$coi1, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/coi1Scatter_Lesion_LsWaviness.pdf")
plot2 <- ggplot(BcAt_LsWaviness, aes(coi1,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Lesion Eccentricity on coi1") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot2
dev.off()
##col0 - dicot
rvaluePear <- summary(lm(HwaviEstimate~col0, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~col0, BcAt_LsWaviness))$coefficients["col0","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$col0, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$col0, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/col0Scatter_Lesion_LsWaviness.pdf")
plot3 <- ggplot(BcAt_LsWaviness, aes(col0,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Lesion Eccentricity on col0") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot3
dev.off()
##npr1
rvaluePear <- summary(lm(HwaviEstimate~npr1, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~npr1, BcAt_LsWaviness))$coefficients["npr1","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$npr1, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$npr1, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/npr1Scatter_Lesion_LsWaviness.pdf")
plot4 <- ggplot(BcAt_LsWaviness, aes(npr1,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Lesion Eccentricity on npr1") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot4
dev.off()
##pad3 - dicot
rvaluePear <- summary(lm(HwaviEstimate~pad3, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~pad3, BcAt_LsWaviness))$coefficients["pad3","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$pad3, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$pad3, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/pad3Scatter_Lesion_LsWaviness.pdf")
plot5 <- ggplot(BcAt_LsWaviness, aes(pad3,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Lesion Eccentricity on pad3") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot5
dev.off()
##tga3 - dicot
rvaluePear <- summary(lm(HwaviEstimate~tga3, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(HwaviEstimate~tga3, BcAt_LsWaviness))$coefficients["tga3","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$tga3, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$HwaviEstimate,BcAt_LsWaviness$tga3, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/LsMeanScatters/tga3Scatter_Lesion_LsWaviness.pdf")
plot6 <- ggplot(BcAt_LsWaviness, aes(tga3,HwaviEstimate)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Lesion Eccentricity on tga3") + ylab("LsMeans Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot5
dev.off()
##Multiple Plots in one
library(cowplot)
gridPlot <- plot_grid(plot1, plot2, plot3, plot4, plot5, plot6, labels = c("A", "B", "C", "D", "E", "F"), scale = .75)
gridPlot
##################################################################
# scatter plot of Lsmeanshyphal waviness vs. Ecc per plant, by isolate
rm(list=ls())
library(ggplot2)
library(dplyr)
setwd("~/../Desktop/B. cinera/")
BcAt <- read.csv("Hwaviness/data/BcAtPhenos/BcAtPhenosGWAS_lsmeans.fxmod1_organiz.csv")
HwaviBC <- read.csv("BR_JV_ManualHyphalDat_032817(edit).csv")
Hwavi <- HwaviBC
Isolate <- Hwavi %>%
group_by(Isolate) %>%
summarise(avg_pheno = mean(Phenotype, na.rm = TRUE),
min_pheno = min(Phenotype, na.rm = TRUE),
max_pheno = max(Phenotype, na.rm = TRUE),
sd_pheno = sd(Phenotype, na.rm = TRUE),
total = n())
BcAt_LsWaviness <- merge(BcAt, Isolate, by = "Isolate")
#Scatter Plot mean Interactions Lesion v Waviness
## anac088 - dicot
rvaluePear <- summary(lm(avg_pheno~anac055, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(avg_pheno~anac055, BcAt_LsWaviness))$coefficients["anac055","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$anac055, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$anac055, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/anac088Scatter_Lesion_LsWaviness.pdf")
plot1 <- ggplot(BcAt_LsWaviness, aes(anac055,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) + #geom_text(label = (PvalueSpea), parse = TRUE) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Lesion Eccentricity on anac088") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot1
dev.off()
##coi1 - dicot
rvaluePear <- summary(lm(avg_pheno~coi1, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(avg_pheno~coi1, BcAt_LsWaviness))$coefficients["coi1","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$coi1, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$coi1, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/coi1Scatter_Lesion_LsWaviness.pdf")
plot2 <- ggplot(BcAt_LsWaviness, aes(coi1,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Lesion Eccentricity on coi1") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot2
dev.off()
##col0 - dicot
rvaluePear <- summary(lm(avg_pheno~col0, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(avg_pheno~col0, BcAt_LsWaviness))$coefficients["col0","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$col0, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$col0, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/col0Scatter_Lesion_LsWaviness.pdf")
plot3 <- ggplot(BcAt_LsWaviness, aes(col0,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Lesion Eccentricity on col0") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot3
dev.off()
##npr1
rvaluePear <- summary(lm(avg_pheno~npr1, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(avg_pheno~npr1, BcAt_LsWaviness))$coefficients["npr1","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$npr1, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$npr1, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/npr1Scatter_Lesion_LsWaviness.pdf")
plot4 <- ggplot(BcAt_LsWaviness, aes(npr1,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
geom_text(x = 2, y = 6, label = (PvalueSpea), parse = TRUE) +
xlab("Lesion Eccentricity on npr1") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot4
dev.off()
##pad3 - dicot
rvaluePear <- summary(lm(avg_pheno~pad3, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(avg_pheno~pad3, BcAt_LsWaviness))$coefficients["pad3","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$pad3, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$pad3, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/pad3Scatter_Lesion_LsWaviness.pdf")
plot5 <- ggplot(BcAt_LsWaviness, aes(pad3,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Lesion Eccentricity on pad3") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot5
dev.off()
##tga3 - dicot
rvaluePear <- summary(lm(avg_pheno~tga3, BcAt_LsWaviness))$r.squared
rvaluePear <- format(round(rvaluePear, 5), nsmall = 4)
pvaluePear <- summary(lm(avg_pheno~tga3, BcAt_LsWaviness))$coefficients["tga3","Pr(>|t|)"]
pvaluePear <- format(round(pvaluePear, 5), nsmall = 4)
PvalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$tga3, method="spearman", exact=FALSE)$p.value
PvalueSpea <- format(round(PvalueSpea, 5), nsmall = 4)
rhovalueSpea <- cor.test(BcAt_LsWaviness$avg_pheno,BcAt_LsWaviness$tga3, method="spearman", exact=FALSE)$estimate
rhovalueSpea <- format(round(rhovalueSpea, 5), nsmall = 4)
pdf("../HWaviness Parts/Completed/tga3Scatter_Lesion_Waviness.pdf")
plot6 <- ggplot(BcAt_LsWaviness, aes(tga3,avg_pheno)) +
geom_point(color='red') + #geom_text(aes(label=Isolate), position = position_nudge(y = -0.1), size=3) +
annotate(geom = 'text', label = paste('p =', PvalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -1) +
annotate(geom = 'text', label = paste('rho = ', rhovalueSpea), x = Inf, y = -Inf, hjust = 1, vjust = -2.5) +
xlab("Lesion Eccentricity on tga3") + ylab("Average Hyphal Waviness") +
ggtitle(NULL) +
geom_smooth(method='lm')
plot5
dev.off()
##Multiple Plots in one
library(cowplot)
gridPlot <- plot_grid(plot1, plot2, plot3, plot4, plot5, plot6, labels = c("A", "B", "C", "D", "E", "F"), scale = .75)
gridPlot
|
727494f463adfa777241d84aa7563df9f04bfb18
|
2099a2b0f63f250e09f7cd7350ca45d212e2d364
|
/ACL-Dataset/Summary_rnd/P14-2114.xhtml.A.R
|
08d95a8c3001c6b1c8f9d28c50c38a97d773c98e
|
[] |
no_license
|
Angela7126/SLNSumEval
|
3548301645264f9656b67dc807aec93b636778ef
|
b9e7157a735555861d2baf6c182e807e732a9dd6
|
refs/heads/master
| 2023-04-20T06:41:01.728968
| 2021-05-12T03:40:11
| 2021-05-12T03:40:11
| 366,429,744
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
P14-2114.xhtml.A.R
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:4, WORD_NUM:115">
</head>
<body bgcolor="white">
<a href="#0" id="0">Still, we observe that compared to the baseline approach, the performance of our proposed framework evaluated on the 4-tuple achieves nearly 17% improvement on precision.</a>
<a href="#1" id="1">We set the hyperparameters α=β=γ=η=λ=0.5 and run Gibbs sampler for 10,000 iterations and stop the iteration once the log-likelihood of the training data converges under the learned model.</a>
<a href="#2" id="2">A Simple Bayesian Modelling Approach to Event Extraction from Twitter.</a>
<a href="#3" id="3">In this model, we assume that each tweet message m∈{1..M} is assigned to one event instance e , while e is modeled as a joint distribution over the named entities y , the date/time d when the event occurred, the location l where the event occurred and the event-related keywords k .</a>
</body>
</html>
|
82d4fd852c6200636ca18e9efeb0e984e66c9417
|
094a4be67a12253c075fabff45e0148bed39246b
|
/6sense.r
|
a18aea6e12814eda60c5082c5268d9e6fb048f54
|
[] |
no_license
|
bamhand/gems
|
6abafbc0eb9ee01cbd5122530a44a232cda4a70b
|
016cc18b9ca71d4d59187de1e3543534d319e443
|
refs/heads/master
| 2021-01-20T19:39:56.566314
| 2017-07-28T15:17:52
| 2017-07-28T15:17:52
| 62,260,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,230
|
r
|
6sense.r
|
rm(list=ls())
library(dplyr)
library(magrittr)
library(tidyr)
library(doParallel)
train<-read.csv(file="c:/Users/f367573/Desktop/R/6sense/training.tsv", sep="")
train$id<-train[,1]
train$date<-train[,2]
train$action<-train[,3]
train<-train[,4:6]
train<-group_by(train, id, action) %>% summarize(count = n()) %>% spread(action, count)
train[is.na(train)]<-0
train$Purchase1<-as.integer(pmin(train$Purchase, 1))
train$Purchase1[train$Purchase1 == 1]<-"Purchase"
train$Purchase1[train$Purchase1 == 0]<-"NoPurchase"
train$Purchase1<-as.factor(train$Purchase1)
test<-read.csv(file="c:/Users/f367573/Desktop/R/6sense/test.tsv", sep="")
test$id<-test[,1]
test$date<-test[,2]
test$action<-test[,3]
test<-test[,4:6]
test<-group_by(test, id, action) %>% summarize(count = n()) %>% spread(action, count)
test[is.na(test)]<-0
library(caret)
library(glmnet)
gbm_ctrl <-trainControl(method="cv"
,number = 4
,repeats = 1
,classProbs= T
,allowParallel=T
,verboseIter = F
,summaryFunction = twoClassSummary)
gbm_tunegrid <- expand.grid(interaction.depth = (3:7)*3
,n.trees = (10:15)*100
,shrinkage = 0.01
,n.minobsinnode = 10)
vars<-c("EmailClickthrough", "EmailOpen", "FormSubmit", "PageView", "WebVisit")
registerDoParallel(3)
# Start the clock!
ptm <- proc.time()
gbm_mdl <- train(as.formula(paste("~", paste(vars, collapse="+")))
,y = train$Purchase1
,data = train
,method = 'gbm'
,trControl = gbm_ctrl
,tuneGrid = gbm_tunegrid
,verbose = T
,metric = "ROC")
proc.time() - ptm
gbm_VI <- varImp(gbm_mdl,scale=F)$importance
gbm_VI$var <- row.names(gbm_VI)
gbm_VI <- gbm_VI[order(gbm_VI$Overall,decreasing=T),]
plot(gbm_mdl)
confusionMatrix(gbm_mdl)
test_predict <- predict(gbm_mdl, test, type = "prob")
library(nnet)
library(e1071)
library(Hmisc)
#5 fold cross validation for training
fitControl <- trainControl(
method = 'repeatedcv',
number = 4,
repeats = 0)
#testing 10-20 notes with decaye of 0.001, single hidden layer
nngrid <- expand.grid(size=c(2:10), decay = 0.001)
trainnet = train(
x=train[,c(vars)],
y=train$Purchase1,
method = 'nnet',
verbose = F,
tuneGrid = nngrid,
trControl = fitControl)
plot(trainnet)
#confusion matrix for training and test data
table(predict(trainnet, train, type='raw'), train$Purchase1)
nnet_predict<-predict(trainnet, test, type='prob')
diff<-nnet_predict$Purchase-test_predict$Purchase
avg<-(nnet_predict$Purchase+test_predict$Purchase)/2
test$rank<-avg
sorted<-test[order(-test$rank),]
write.csv(sorted[1:1000,],file="6sense.csv")
confusionMatrix(trainnet)
train$clickopen<-train$EmailOpen*train$EmailClickthrough
train$clicksubmit<-train$EmailClickthrough*train$FormSubmit
train$clickview<-train$EmailClickthrough*train$PageView
train$clickvisit<-train$EmailClickthrough*train$WebVisit
train$opensubmit<-train$EmailOpen*train$FormSubmit
train$openview<-train$EmailOpen*train$PageView
train$openvisit<-train$EmailOpen*train$WebVisit
train$submitview<-train$FormSubmit*train$PageView
train$viewvisit<-train$PageView*train$WebVisit
lvars<-names(train[,!names(train) %in% c("id","CustomerSupport","Purchase","Purchase1")])
options(na.action="na.fail")
x = model.matrix(as.formula(paste("~", paste(lvars, collapse="+"))), train)
set.seed(12345)
registerDoParallel(3)
# Start the clock!
ptm <- proc.time()
lasso_lambda<-cv.glmnet(x=x,y=train$Purchase1,family="binomial", nfolds = 4, parallel = T)
proc.time() - ptm
plot(lasso_lambda)
lasso_lambda$lambda.min
lasso_lambda$lambda.1se
coef(lasso_lambda, s=lasso_lambda$lambda.min)
predict(lasso_lambda, x, type = "class", s=lasso_lambda$lambda.1se)
sum(predict(lasso_lambda, x, type = "class", s=lasso_lambda$lambda.1se)==train$Purchase1)
sum(predict(lasso_lambda, x, type = "class", s=lasso_lambda$lambda.1se)!=train$Purchase1)
|
a98bc7367f4d02b46bc753cec6c839ae5f9204bd
|
62b4a877ce27a7a7a3b00ec565d5a5b86a1f8c0c
|
/asymmetry.R
|
1c1d72ba147c343374c73fbcabc913410711d548
|
[
"MIT"
] |
permissive
|
sdwfrost/exploratory-phylodynamics
|
4d18bdcc3dbadbcc53d4ca4c2ff7966d60e708c9
|
2720e48759406d0a427ac7e0fec9a2443b51fbe0
|
refs/heads/master
| 2021-01-24T16:09:30.167642
| 2018-08-15T14:17:51
| 2018-08-15T14:17:51
| 40,710,041
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,234
|
r
|
asymmetry.R
|
library(ape)
library(magrittr)
library(treeImbalance)
lasv.chronos <- read.tree("LASV_chronos.nwk")
lasv.tipdates <- strsplit(lasv.chronos$tip.label,"-",fixed=TRUE) %>% lapply(.,tail,1) %>% unlist %>% as.double
lasv.cherries.obs <- ct(lasv.chronos)
lasv.sackins.obs <- snt(lasv.chronos)
treelist <- list()
nsims <- 1000
for(i in 1:nsims){
treelist[[i]] <- getSimTree(lasv.chronos)
}
lasv.cherries.sim <- lapply(treelist,ct)
lasv.sackins.sim <- lapply(treelist,snt)
par(mfrow=c(1,2),pty="s")
plot(max(lasv.tipdates)-lasv.cherries.obs[[1]],lasv.cherries.obs[[2]],col="red",xlab="Year",ylab="Cherries",las=1,ylim=c(0,30),type="n",main="Cherries")
for(i in 1:nsims){
lines(max(lasv.tipdates)-lasv.cherries.sim[[i]][[1]],lasv.cherries.sim[[i]][[2]],type="s",col="gray")
}
lines(max(lasv.tipdates)-lasv.cherries.obs[[1]],lasv.cherries.obs[[2]],type="s",col="red",lwd=2)
plot(max(lasv.tipdates)-lasv.sackins.obs[[1]],lasv.sackins.obs[[2]],col="red",xlab="Year",ylab="Sackins",las=1,type="n",main="Sackin's index")
for(i in 1:nsims){
lines(max(lasv.tipdates)-lasv.sackins.sim[[i]][[1]],lasv.sackins.sim[[i]][[2]],type="s",col="gray")
}
lines(max(lasv.tipdates)-lasv.sackins.obs[[1]],lasv.sackins.obs[[2]],type="s",col="red",lwd=2)
|
2e2de578f904d2f59f65c26b9b010b384eea412c
|
8eb4b0e89610dc7fdcc68463bdc57e5adbf10849
|
/R/xclip.R
|
a236994ad82d29cd6149b1c2d5dc37da0b74e524
|
[] |
no_license
|
cmcouto-silva/snpsel
|
f0b1fa1675fb10e15329cae874441f241f3a5e15
|
e3898308f5b0b2ae071cefdc111f5334a0319cf7
|
refs/heads/master
| 2023-05-07T22:19:08.444440
| 2021-06-06T15:55:12
| 2021-06-06T15:55:12
| 127,948,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 457
|
r
|
xclip.R
|
#' @title Remove additional spaces/paragraphs from clipboard text
#' @description This function removes any additional whitespace or paragraphs from copied text (clipboard)
#' in a single block of text, adding it to the clipboard OS
#'
#' @return Single block text in the clipboard.
#' @export
#'
#' @author Cainã Max Couto-Silva
xclip <- function(){
x <- tm::stripWhitespace(clipr::read_clip())
x <- paste(x, collapse = " ")
clipr::write_clip(x)
}
|
3655efc855f403f16274adc75e3842d69c40b31d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/memery/examples/meme.Rd.R
|
3808c1b267b1b51e8baaea686c4fdf51ec2f8f23
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,370
|
r
|
meme.Rd.R
|
library(memery)
### Name: meme
### Title: Generate a meme
### Aliases: meme meme meme_gif
### ** Examples
# Prepare data and make a graph
library(ggplot2)
x <- seq(0, 2*pi, length.out = 50)
panels <- rep(c("Plot A", "Plot B"), each = 50)
d <- data.frame(x = x, y = sin(x), grp = panels)
txt <- c("Philosoraptor's plots", "I like to make plots",
"Figure 1. (A) shows a plot and (B) shows another plot.")
p <- ggplot(d, aes(x, y)) + geom_line(colour = "cornflowerblue", size = 2) +
geom_point(colour = "orange", size = 4) + facet_wrap(~grp) +
labs(title = txt[1], subtitle = txt[2], caption = txt[3])
# meme image background and text labels
img <- system.file("philosoraptor.jpg", package = "memery")
lab <- c("Title meme text", "Subtitle text")
## Not run:
##D
##D # Not run due to file size
##D # basic meme
##D meme(img, lab[1:2], "meme_basic.jpg")
##D # data analyst's meme
##D meme(img, lab[1:2], "meme_data.jpg", size = 2, inset = p, mult = 2)
## End(Not run)
# data meme with additional content control
vp_bg <- list(fill = "#FF00FF50", col = "#FFFFFF80") # graph background
# arbitrary number of labels, placement, and other vectorized attributes
lab <- c(lab, "Middle plot text")
pos <- list(w = rep(0.9, 3), h = rep(0.3, 3), x = c(0.35, 0.65, 0.5),
y = c(0.95, 0.85, 0.3))
fam <- c("Impact", "serif", "Impact")
clrs1 <- c("black", "orange", "white")
clrs2 <- clrs1[c(2, 1, 1)]
meme(img, lab, "meme_data2.jpg", size = c(2, 1.5, 1), family = fam, col = clrs1,
shadow = clrs2, label_pos = pos, inset = p, inset_bg = vp_bg, mult = 2)
## Not run:
##D
##D # Not run due to file size and software requirements
##D # GIF meme. Requires Imagemagick and magick package. See details.
##D p <- ggplot(d, aes(x, y)) + geom_line(colour = "white", size = 2) +
##D geom_point(colour = "orange", size = 1) + facet_wrap(~grp) +
##D labs(title = "The wiggles", subtitle = "Plots for cats",
##D caption = "Figure 1. Gimme sine waves.")
##D lab <- c("R plots for cats", "Sine wave sine wave sine wave sine wave...")
##D pos <- list(w = rep(0.9, 2), h = rep(0.3, 2), x = rep(0.5, 2), y = c(0.9, 0.75))
##D img <- "http://forgifs.com/gallery/d/228621-4/Cat-wiggles.gif"
##D meme_gif(img, lab, "meme_data3.gif", size = c(1.5, 0.75), label_pos = pos,
##D inset = p, inset_bg = list(fill = "#00BFFF80"), mult = 1.5, fps = 20)
## End(Not run)
|
f26f795f561d515cc233d124b865bde20586fe64
|
ea651d3961b61d5692e2e87b085ec86905ccf21d
|
/plot3.R
|
810472b955d6f3fd5964791d5c4a34c72dbf6748
|
[] |
no_license
|
SMinkes/ExDataPA2
|
bd6b1cd346d8d55e4fa902d431d707fdc6f6fe40
|
1c92f3a898e6b2cd9fbd281f89e4d6b6bbabfb7c
|
refs/heads/master
| 2021-01-21T07:45:44.136918
| 2015-02-22T15:07:04
| 2015-02-22T15:07:04
| 31,166,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 834
|
r
|
plot3.R
|
## install required packages
install.packages("plyr")
library(plyr)
install.packages("dplyr")
library(dplyr)
install.packages("ggplot2")
library(ggplot2)
## Loading the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Create the subset for Baltimore
Baltimore <- subset(NEI, fips == "24510")
## Emission values separated for Baltimore
TotalBaltSep <- ddply(Baltimore, .(year, type), numcolwise(sum))
# Open PNG device
png(filename="figures/plot3.png")
## create the plot with ggplot
ggplot(TotalBaltSep, aes(year, Emissions)) + geom_line() +facet_wrap(~ type, nrow = 2, ncol = 2) + scale_x_continuous(breaks=c(1999,2002,2005,2008)) + labs(list(title = "Total PM2.5 emission for Baltimore City (MD) by type.", y = "Emission PM2.5 (tons)", x = "Year"))
# Turn off PNG device
dev.off()
|
6c8383e8513d7461cffdccbb3272c63035b0871c
|
98b08ae1f7356558e8699ca8ee1d08fb1bec8dc5
|
/man-roxygen/all_form_types.R
|
363f59f13063d850e6a76ca931275c2d91e1a91b
|
[
"MIT"
] |
permissive
|
MrJoan/hubspot
|
edf4695a98f1d0b30fcf29a6abb3ddc1b00f502e
|
c816d3f7a709efdddcafce830c9b1ca0a4414900
|
refs/heads/master
| 2022-04-05T16:59:01.240250
| 2020-02-25T09:58:58
| 2020-02-25T09:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 156
|
r
|
all_form_types.R
|
#' @param all_form_types By default non-marketing forms are filtered out of this endpoint. To request all forms, use this paramter with the value of TRUE.
|
661342acf0486463be0149e445f462262731228e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/beautier/examples/get_site_model_names.Rd.R
|
a811e2f5d96877f354dcbb5aeb646f4297f4cd85
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 340
|
r
|
get_site_model_names.Rd.R
|
library(beautier)
### Name: get_site_model_names
### Title: Get the site models' names
### Aliases: get_site_model_names
### ** Examples
# Check all names
names <- get_site_model_names()
testit::assert("JC69" %in% names)
testit::assert("HKY" %in% names)
testit::assert("TN93" %in% names)
testit::assert("GTR" %in% names)
|
697aecbdb14ae10727b380af19a6fe4a5427d54b
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/tsDyn/R/TVECM.sim.R
|
2af2102367da7d42610e421f9c92d0ded35163a5
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,217
|
r
|
TVECM.sim.R
|
#' @export
#' @rdname TVECM.sim
VECM.sim <- function(data,B,VECMobject, beta, n=200, lag=1,
type=c("simul","boot", "check"),
include = c("const", "trend","none", "both"),
starting=NULL, innov=rmnorm(n, varcov=varcov), varcov=diag(1,k),
show.parMat=FALSE, seed){
k<- if(!missing(VECMobject)) VECMobject$k else if(!missing(B)) nrow(B) else if(!missing(data)) ncol(data)
TVECM.sim(data=data,B=B,TVECMobject=VECMobject, nthresh=0, beta=beta, n=n,
lag=lag, type=type, include = include,
starting=starting, innov=innov, varcov=varcov,
show.parMat=show.parMat, seed=seed)
}
#' @export
#' @rdname TVECM.sim
#' @param check When performing a bootstrap replication, check if taking original residuals (instead of resampled)
#' leads to the original data.
VECM.boot <- function(VECMobject, show.parMat=FALSE, seed, check=TRUE){
if(VECMobject$num_exogen!=0) stop("VECM.boot() does not work for VECM() with exogen variables")
if(check){
ch <- TVECM.sim(TVECMobject=VECMobject, type="check")
if(!isTRUE(all.equal(as.matrix(ch), as.matrix(VECMobject$model[,1:VECMobject$k]), check.attributes=FALSE)))
warning("Pseudo Bootstrap was not able to replicate original data, there might be an issue")
}
TVECM.sim(TVECMobject=VECMobject, type="boot", show.parMat=show.parMat, seed=seed)
}
#' @export
#' @rdname TVECM.sim
TVECM.boot <- function(TVECMobject, show.parMat=FALSE, seed, check=TRUE){
if(check){
ch <- TVECM.sim(TVECMobject=TVECMobject, type="check")
if(!isTRUE(all.equal(as.matrix(ch), as.matrix(TVECMobject$model[,1:TVECMobject$k]), check.attributes=FALSE)))
warning("Pseudo Bootstrap was not able to replicate original data, there might be an issue")
}
TVECM.sim(TVECMobject=TVECMobject, type="boot", show.parMat=show.parMat, seed=seed)
}
check.VECM.boot <- function(VECMobject, show.parMat=FALSE, seed, check=TRUE){
if(VECMobject$num_exogen!=0) stop("VECM.boot() does not work for VECM() with exogen variables")
ch <- TVECM.sim(TVECMobject=VECMobject, type="check")
res <- isTRUE(all.equal(as.matrix(ch), as.matrix(VECMobject$model[,1:VECMobject$k]), check.attributes=FALSE))
res
}
check.TVECM.boot <- function(TVECMobject){
ch <- TVECM.sim(TVECMobject=TVECMobject, type="check")
res <- isTRUE(all.equal(as.matrix(ch), as.matrix(TVECMobject$model[,1:TVECMobject$k]), check.attributes=FALSE))
res
}
as.matrix.ts <-
function(x, ...)
{
# A function implemented by Diethelm Wuertz
ans = as.matrix.default(unclass(x))
attr(ans, "tsp")<-NULL
rownames(ans)<-NULL # colnames(ans)<-NULL
ans
}
#'Simulation and bootstrap of bivariate VECM/TVECM
#'
#'Estimate or bootstraps a multivariate Threshold VAR
#'
#'This function offers the possibility to generate series following a
#'VECM/TVECM from two approaches: bootstrap or simulation. \code{VECM.sim} is
#'just a wrapper for \code{\link{TVECM.sim}}.
#'
#'When the argument \code{matrix} is given, on can only simulate a VECM
#'(\code{nthresh}=0) or TVECM (\code{nthresh}=1 or 2). One can have a
#'specification with constant (\code{"const"}), \code{"trend"}, \code{"both"}
#'or \code{"none"} (see argument \code{include}). Order for the parameters is
#'ECT/include/lags for VECM and ECT1/include1/lags1/ECT2/include2/lags2 for
#'TVECM. To be sure that once is using it correctly, setting \code{show.parMat
#'= TRUE} will show the matrix of parameters together with their values and
#'names.
#'
#'The argument \code{beta} is the contegrating value on the right side of the
#'long-run relationship, and hence the function use the vector (1,-beta). The
#'\code{innov} argument specifies the innovations. It should be given as a
#'matrix of dim nxk, (here \var{n} does not include the starting values!), by
#'default it uses a multivariate normal distribution, with covariance matrix
#'specified by \code{varcov}.
#'
#'The starting values (of dim lags x k) can be given through argument
#'\code{starting}. The user should take care for their choice, since it is not
#'sure that the simulated values will cross the threshold even once. Notice
#'that only one cointegrating value is allowed. User interested in simulating a
#'VECM with more cointegrating values should do use the VAR representation and
#'use \code{\link{TVAR.sim}}.
#'
#'The second possibility is to bootstrap series. This is done on a object
#'generated by \code{\link{TVECM}} (or \code{\link{VECM}}). A simple residual
#'bootstrap is done, or one can simulate a series with the same parameter
#'matrix and with normal distributed residuals (with variance pre-specified),
#'corresponding to Monte-carlo simulations.
#'
#'One can alternatively give only the series, and then the function will call
#'internally \code{\link{TVECM}}.
#'
#'@aliases TVECM.sim VECM.sim
#'@param data matrix of parameter to simulate
#'@param B Matrix of coefficients to simulate
#'@param TVECMobject,VECMobject Object computed by function \code{\link{TVECM}}
#'or linear \code{\link{VECM}}
#'@param nthresh number of threshold (see details)
#'@param Thresh The threshold value(s). Vector of length nthresh
#'@param beta The cointegrating value
#'@param n Number of observations to create when type="simul"
#'@param lag Number of lags to include in each regime
#'@param type Whether a bootstrap or simulation is to employ. See details
#'@param include Type of deterministic regressors to include. NOT WORKING
#'PROPERLY CURRENTLY if not const
#'@param starting Starting values when a simulation with given parameter matrix
#'is made
#'@param innov Innovations used for simulation. Should be matrix of dim nxk. By
#'default multivariate normal.
#'@param varcov Variance-covariance matrix for the innovations. By default
#'multivariate normal is used.
#'@param show.parMat Logical. Should the parameter matrix be shown? Useful to
#'understand how to give right input
#'@param seed Optional. Seed for the random number generation.
#'@return A matrix with the simulated/bootstraped series.
#'@author Matthieu Stigler
#'@seealso \code{\link{TVECM}} to estimate a TVECM, \code{\link{VAR.sim}} to
#'simulate/bootstrap a VAR.
#'@keywords ts
#'@export
#'@examples
#'
#'
#'###reproduce example in Enders (2004, 2 edition) p. 350,
#' # (similar example in Enders (2010, 3 edition) 301-302).
#'
#'if(require(mnormt)){
#'#see that the full "VAR" coefficient matrix is:
#' A <- matrix(c(-0.2, 0.2, 0.2, -0.2), byrow=TRUE, ncol=2)
#'
#'# but this is not the input of VECM.sim. You should decompose into the a and b matrix:
#' a<-matrix(c(-0.2, 0.2), ncol=1)
#' b<-matrix(c(1,-1), nrow=1)
#'
#'# so that:
#' a%*%b
#'
#'# The a matrix is the input under argument B, while the b matrix is under argument beta:
#' # (the other zeros in B are for the not-specified lags)
#' innov<-rmnorm(100, varcov=diag(2))
#' startVal <- matrix(0, nrow=2, ncol=1)
#' Bvecm <- rbind(c(-0.2, 0,0), c(0.2, 0,0))
#' vecm1 <- VECM.sim(B=Bvecm, beta=1,n=100, lag=1,include="none", innov=innov, starting=startVal)
#' ECT <- vecm1[,1]-vecm1[,2]
#'
#'#add an intercept as in panel B
#' Bvecm2 <- rbind(c(-0.2, 0.1,0,0), c(0.2,0.4, 0,0))
#' vecm2 <- VECM.sim(B=Bvecm2, n=100,beta=1, lag=1,include="const", innov=innov, starting=startVal)
#'
#' par(mfrow=c(2,1))
#' plot(vecm1[,1], type="l", main="Panel a: no drift or intercept", ylab="", xlab="")
#' lines(vecm1[,2], lty=2)
#' plot(vecm2[,1], type="l", main="Panel b: drift terms (0.1)", ylab="", xlab="")
#' lines(vecm2[,2], lty=2)
#'}
#'##Bootstrap a TVAR with 1 threshold (two regimes)
#'data(zeroyld)
#'dat<-zeroyld
#'TVECMobject<-TVECM(dat, nthresh=1, lag=1, ngridBeta=20, ngridTh=20, plot=FALSE)
#'TVECM.sim(TVECMobject=TVECMobject,type="boot")
#'
#'##Check the bootstrap
#' TVECM.sim.check <- TVECM.sim(TVECMobject=TVECMobject,type="check")
#' all(TVECM.sim.check==dat)
#'
TVECM.sim<-function(data,B,TVECMobject, nthresh=1, Thresh, beta, n=200, lag=1,
type=c("simul","boot", "check"),
include = c("const", "trend","none", "both"),
starting=NULL, innov=rmnorm(n, varcov=varcov), varcov=diag(1,k),
show.parMat=FALSE, seed){
if(!missing(data)&!missing(B))
stop("You have to provide either B or y, but not both")
p<-lag
type<-match.arg(type)
include<-match.arg(include)
isMissingB <- missing(B)
###check correct arguments
if(!nthresh%in%c(0,1,2))
stop("Arg nthresh should be either 0, 1 or 2")
if(!missing(n)&any(!missing(data), !missing(TVECMobject)))
stop("arg n should not be given with arg data or TVECMobject")
if(!missing(TVECMobject)&any(!missing(Thresh), !missing(nthresh), !missing(lag)))
warning("When object TVECMobject is given, only args 'type' and 'round' are relevant, others are not considered")
##include term
ninc<- switch(include, "none"=0, "const"=1, "trend"=1, "both"=2)
incVal<- switch(include, "none"=NULL, "const"="const", "trend"="trend", "both"=c("const","trend"))
### possibility 1: only parameters matrix is given
if(!missing(B)){
if(missing(beta))
stop("please provide arg beta (cointegrating value)")
if(type!="simul"){
type<-"simul"
warning("Type check or boot are only avalaible with pre specified data. The type simul was used")
}
nB<-nrow(B)
if(nB==1) stop("B matrix should at least have two rows for two variables\n")
ndig<-4
esp<-p*nB+1+ninc #number of lags +ecm
## naming of variables:
pa<-switch(as.character(nthresh), "0"="", "1"=c("_low", "_upr"),"2"=c("_low", "_mid","_upr"))
lags<-as.vector(outer("L{x", 1:nB, paste, sep=""))
lags2<-paste(rep(lags, times=p),"}{", rep(1:p,each=p),"}",sep="")
if(esp*(nthresh+1)!=ncol(B)){
colnames_Matrix_input<-as.vector(outer(c("ECT",incVal, lags2), pa, paste, sep=""))
cat("Matrix B badly specified: should have ", esp*(nthresh+1), "columns, but has", ncol(B), "\n")
print(matrix(NA, nrow=nB, ncol=esp*(nthresh+1), dimnames=list(paste("Equ x", 1:nB, sep=""), colnames_Matrix_input)))
stop()
}
rownames(B)<- paste("Equ x", 1:nB, ":",sep="")
y<-matrix(0,ncol=nB, nrow=n)
if(!is.null(starting)){
if(all(dim(as.matrix(starting))==c(nB,p)))
y[seq_len(p),]<-starting
else
stop("Bad specification of starting values. Should have nrow = lag and ncol = number of variables")
}
Bmat<-B
k <- ncol(y) #Number of variables
T <- nrow(y) #Size of start sample
if(is.vector(beta)){
if(length(beta)==k-1) beta <- c(1, -beta)
tBETA<-matrix(beta, nrow=1)
r <- 1
} else {
if(nrow(beta)!=k) stop("beta should have k rows and r cols")
r <- ncol(beta)
tBETA <- t(beta)
}
}
### possibility 2: only data is given: compute it with linear or selectSETAR
else if(!missing(data)){
if(nthresh==0){
TVECMobject<-lineVar(data, lag=p, include=include, model="VECM")
} else {
if(!missing(Thresh)){
if(nthresh==1) {
TVECMobject<-TVECM(data, lag=p, include=include, nthresh=nthresh, plot=FALSE, trace=FALSE, th1=list(exact=Thresh))
} else if(nthresh==2){
TVECMobject<-TVECM(data, lag=p, include=include, nthresh=nthresh, plot=FALSE, trace=FALSE, th1=list(exact=Thresh[1]),th2=list(exact=Thresh[2]))
}
} else {
TVECMobject<-TVECM(data, lag=p, include=include, nthresh=nthresh, plot=FALSE, trace=FALSE)
}
}
}
### possibility 3: setarobject is given by user (or by poss 2)
if(!missing(TVECMobject)){
k<-TVECMobject$k
T<-TVECMobject$T
p<-TVECMobject$lag
include<-TVECMobject$include
if(include %in% c("trend", "both"))
warning(paste("Accuracy of function (tested with arg type=check) is not good when arg include=",include," is given\n"))
modSpe<-TVECMobject$model.specific
LRinclude <- modSpe$LRinclude
nthresh <- modSpe$nthresh
if(nthresh>0 &&modSpe$model=="only_ECT") stop("TVECM.sim() does not work for 'common=only_ECT'")
if(LRinclude!="none") stop("TVECM.sim() does not work for 'LRinclude!='none'")
beta<- -modSpe$coint[2,1]
tBETA <- t(modSpe$coint)
r <- modSpe$r
res<-residuals(TVECMobject)
Bmat<-coefMat(TVECMobject)
y<-as.matrix(TVECMobject$model)[,1:k]
ndig<-getndp(y[,1])
if(nthresh>0){
Thresh<-modSpe$Thresh
nthresh<-modSpe$nthresh
}
}
t <- T-p-1 #Size of end sample
npar<-k*(p+ninc+1)
##### put coefficients vector in right form according to arg include (arg both need no modif)
if(include!="both"){
aa1 <- r+switch(include, "none"=1:2, "const"=2, "trend"=1, "both"=NULL)
aa <- sort(rep(aa1, each=nthresh+1)+ (0:nthresh)*(p*k+max(aa1)))
Bmat<-myInsertCol(Bmat, c=aa, 0)
}
nparBmat<-p*k+2+1
##############################
###Reconstitution boot/simul
##############################
#initial values
#initial values
Yb<-matrix(0, nrow=nrow(y), ncol=k)
Yb[1:(p+1),]<-y[1:(p+1),]
trend<-c(rep(NA, T-t),1:t)
#resampling/ simulation of residual/innovations
if(type=="simul"&&dim(innov)!=c(n,k))
stop(paste("input innov is not of right dim, should be matrix with", n,"rows and ", k, "cols\n"))
if(!missing(seed)) set.seed(seed)
resids<-switch(type, "boot"=res[sample(seq_len(t), replace=TRUE),], "simul"= innov, "check"=res)
resb<-rbind(matrix(0,nrow=p+1, ncol=k),resids)
if(nthresh==0){
for(i in (p+2):T){
ECT<-Bmat[,1:r]%*%tBETA%*%matrix(Yb[i-1,], ncol=1)
Yb[i,]<-rowSums(cbind(Yb[i-1,],Bmat[,r+1], Bmat[,r+2]*trend[i], ECT,Bmat[,-c(1:(r+2))]%*%matrix(t(Yb[i-c(1:p),]-Yb[i-c(2:(p+1)),]), ncol=1),resb[i,]))
}
} else if(nthresh==1){
BD<-Bmat[,seq_len(nparBmat)]
BU<-Bmat[,-seq_len(nparBmat)]
for(i in (p+2):(nrow(y))){
ECT<-tBETA%*%matrix(Yb[i-1,], ncol=1)
if(round(ECT,ndig)<=Thresh){
Yb[i,]<-rowSums(cbind(Yb[i-1,],BD[,1]%*%ECT, BD[,2], BD[,3]*trend[i],BD[,-c(1,2,3)]%*%matrix(t(Yb[i-c(1:p),]-Yb[i-c(2:(p+1)),]), ncol=1),resb[i,]))
} else{
Yb[i,]<-rowSums(cbind(Yb[i-1,],BU[,1]%*%ECT, BU[,2], BU[,3]*trend[i],BU[,-c(1,2,3)]%*%matrix(t(Yb[i-c(1:p),]-Yb[i-c(2:(p+1)),]), ncol=1),resb[i,]))
}
}
} else if(nthresh==2){
BD <- Bmat[,seq_len(nparBmat)]
BM <- Bmat[,seq_len(nparBmat)+nparBmat]
BU <- Bmat[,seq_len(nparBmat)+2*nparBmat]
for(i in (p+2):(nrow(y))){
ECT<-tBETA%*%matrix(Yb[i-1,], ncol=1)
if(round(ECT,ndig)<=Thresh[1]){
Yb[i,]<-rowSums(cbind(Yb[i-1,],BD[,1]%*%ECT,BD[,2], BD[,3]*trend[i], BD[,-c(1,2,3)]%*%matrix(t(Yb[i-c(1:p),]-Yb[i-c(2:(p+1)),]), ncol=1),resb[i,]))
} else if(round(ECT,ndig)>Thresh[2]) {
Yb[i,]<-rowSums(cbind(Yb[i-1,],BU[,1]%*%ECT,BU[,2], BU[,3]*trend[i],BU[,-c(1,2,3)]%*%matrix(t(Yb[i-c(1:p),]-Yb[i-c(2:(p+1)),]), ncol=1),resb[i,]))
} else{
Yb[i,]<-rowSums(cbind(Yb[i-1,],BM[,1]%*%ECT,BM[,2], BM[,3]*trend[i],BM[,-c(1,2,3)]%*%matrix(t(Yb[i-c(1:p),]-Yb[i-c(2:(p+1)),]), ncol=1),resb[i,]))
}
}
}
if(show.parMat){
if(!isMissingB){
colnames_Matrix_system<-as.vector(outer(c("ECT","Const", "Trend", lags2), pa, paste, sep=""))
colnames(Bmat)<- colnames_Matrix_system
} else if(include!="both"){
add <- switch(include, "const"="Trend", "trend"="Const", "none"=c("Const", "Trend"))
colnames(Bmat)[aa] <- rep(add, nthresh+1)
}
print(Bmat)
}
res<-round(Yb, ndig)
return(res)
}
if(FALSE){
library(tsDyn)
environment(TVECM.sim)<-environment(star)
##Simulation of a TVAR with 1 threshold
B<-rbind(c(-0.2, 0,0), c(0.2, 0,0))
a<-TVECM.sim(B=B, nthresh=0, beta=1, lag=1,include="none", starting=c(2,2))
ECT<-a[,1]-a[,2]
layout(matrix(1:2, ncol=1))
plot(a[,1], type="l", xlab="", ylab="", ylim=range(a, ECT))
lines(a[,2], col=2, type="l")
plot(ECT, type="l")
B<-rbind(c(0.2, 0.11928245, 1.00880447, -0.009974585, 0.3, -0.089316, 0.95425564, 0.02592617),c( -0.1, 0.25283578, 0.09182279, 0.914763741, 0.35,-0.0530613, 0.02248586, 0.94309347))
sim<-TVECM.sim(B=B,beta=1, nthresh=1,n=500, type="simul",Thresh=5, starting=c(5.2, 5.5))
#estimate the new serie
TVECM(sim, lag=1)
##Bootstrap a TVAR with two threshold (three regimes)
#data(zeroyld)
dat<-zeroyld
TVECMobject<-TVECM(dat, lag=1, nthresh=2, plot=FALSE, trace=FALSE, th1=list(exact=-1),th2=list(exact=1))
TVECMobject<-TVECM(dat, lag=1, nthresh=2)#, plot=FALSE, trace=FALSE, th1=list(exact=7),th2=list(exact=9))
TVECM.sim(data=dat,nthresh=2, type="boot", Thresh=c(7,9))
##Check the bootstrap
linObject<-lineVar(dat, lag=1, model="VECM")
all(TVECM.sim(TVECMobject=linObject,type="check")==dat)
all(TVECM.sim(TVECMobject=lineVar(dat, lag=1, model="VECM", include="none"),type="check")==dat)
#not working: (probably trend coefficients too small so digits errors)
all(TVECM.sim(TVECMobject=lineVar(dat, lag=1, model="VECM", include="trend"),type="check")==dat)
all(TVECM.sim(TVECMobject=lineVar(dat, lag=1, model="VECM", include="both"),type="check")==dat)
#nthresh=1
TVECMobject<-TVECM(dat, nthresh=1, lag=1, ngridBeta=20, ngridTh=20, plot=FALSE)
all(TVECM.sim(TVECMobject=TVECMobject,type="check")==dat)
all(TVECM.sim(TVECMobject=TVECM(dat, nthresh=1, lag=2, ngridBeta=20, ngridTh=20, plot=FALSE),type="check")==dat)
all(TVECM.sim(TVECMobject=TVECM(dat, nthresh=1, lag=1, ngridBeta=20, ngridTh=20, plot=FALSE, include="none"),type="check")==dat)
all(TVECM.sim(TVECMobject=TVECM(dat, nthresh=1, lag=2, ngridBeta=20, ngridTh=20, plot=FALSE, include="none"),type="check")==dat)
#nthresh=2
TVECMobject2<-TVECM(dat, nthresh=2, lag=1, ngridBeta=20, ngridTh=20, plot=FALSE)
all(TVECM.sim(TVECMobject=TVECMobject2,type="check")==dat)
all(TVECM.sim(TVECMobject=TVECM(dat, nthresh=2, lag=2, ngridBeta=20, ngridTh=20, plot=FALSE),type="check")==dat)
all(TVECM.sim(TVECMobject=TVECM(dat, nthresh=2, lag=1, ngridBeta=20, ngridTh=20, plot=FALSE, include="none"),type="check")==dat)
#famous rounding problem...
all(TVECM.sim(TVECMobject=TVECM(dat, nthresh=2, lag=2, ngridBeta=20, ngridTh=20, plot=FALSE, include="none"),type="check")==dat)
###TODO:
#improve trend/both case
#TVECM: possibility to give args!
TVECM(dat, nthresh=1, lag=2, ngridBeta=20, ngridTh=20, plot=FALSE, th1=list(exact=-1.4),include="none")
TVECM(dat, nthresh=1, lag=2, ngridBeta=20, ngridTh=20, plot=FALSE, th1=list(exact=-1.4),beta=list(exact=1),include="none")
TVECM(dat, nthresh=2, lag=2, ngridBeta=20, ngridTh=20, plot=FALSE, th1=list(exact=-1.4),th2=list(exact=0.5),include="none")
}
|
d4e36824e2f13bd113878598f064b8c06fb732ee
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/anominate/examples/densplot.anominate.Rd.R
|
ff41cec3abef21b0fdf839a2a92d6f72f1b1ccfa
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 303
|
r
|
densplot.anominate.Rd.R
|
library(anominate)
### Name: densplot.anominate
### Title: alpha-NOMINATE Density Plot Function
### Aliases: densplot.anominate
### Keywords: ideal point estimation, NOMINATE, Bayesian latent variable
### models
### ** Examples
data(sen111)
data(sen111_anom)
densplot.anominate(sen111_anom)
|
0f61e930614fae525e5398bdf2ec1150e0a5b25d
|
c282d03b8dfdf943cf80c03b5ad2d50fb5a06509
|
/overview.R
|
c55f249282ee2eea37f2e64f5ea4b30e6f8c0477
|
[] |
no_license
|
sprocketsullivan/attention_confidence
|
14131ec10bae6e1a7a5753ada1435d829a27c543
|
c93174b6a5efa63a835062c9116587bd91fdce51
|
refs/heads/master
| 2021-05-02T01:03:58.993851
| 2017-04-04T14:24:08
| 2017-04-04T14:24:08
| 78,653,698
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 124
|
r
|
overview.R
|
#description of analysis files and execution
#read_in_data.R
#reads in data, cleans data, an produces the following plots:
#
|
3c89cad1cd404d8e1d99e337976195a4aa35b909
|
9964189e463815a446c2659f57b9c726408f8313
|
/script/week8_assignment.R
|
90b172cdf74843b77c3042808b3f4e76b8de8ff0
|
[] |
no_license
|
fish497-2018/Cohen-reef_fish
|
3845ee51dcb77ba875e02c5dd4a184d4a7329fd1
|
0aed17913596b149d419ec1552d4897469832d16
|
refs/heads/master
| 2020-03-17T11:53:41.401990
| 2018-05-31T17:24:17
| 2018-05-31T17:24:17
| 133,567,777
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,705
|
r
|
week8_assignment.R
|
#load the packages needed
library(dplyr)
library(ggplot2)
library(tidyr)
#read in the data
reef_fish <- read.csv('data/RLSreeffishdataset.csv')
View(reef_fish)
#start looking for a trend in data
ggplot(reef_fish, aes(Depth)) +
geom_histogram()
ggplot(reef_fish, aes(Diver, Family)) +
geom_point()
ggplot(reef_fish, aes(Diver, Total)) +
geom_point() +
facet_wrap("Family")
#by site
ggplot(reef_fish, aes(Total)) +
geom_histogram() +
facet_wrap("Site")
CG10_site <- select(reef_fish, SiteCode, Diver, Family, Total) %>%
filter(SiteCode == "CG10")
CG10_site_large <- select(reef_fish, SiteCode, Diver, Family, Total) %>%
filter(SiteCode == "CG10") %>%
filter(Total > 7)
#create a graph with the new data set
ggplot(CG10_site, aes(Diver, Total, color = Diver)) +
geom_point() +
labs(x = "Diver", y = "Total_Fish_Count")
ggplot(CG10_site, aes(Diver, color = Diver)) +
geom_bar() +
labs(x = "Diver", y = "Entries")
ggplot(CG10_site_large, aes(Diver, Total)) +
geom_point() +
labs(x = "Diver", y = "Total_Fish_Count")
ggplot(CG10_site, aes(Diver, Total, color = Diver)) +
geom_point() +
facet_wrap("Family") +
labs(x = "Diver", y = "Total_Fish_Count", color = "Divers")
ggplot(CG10_site_large, aes(Diver, Total, color = Diver)) +
geom_point() +
facet_wrap("Family") +
labs(x = "Diver", y = "Total_Fish_Count", color = "Divers")
#Another site to see how it compares
CG11_site <- select(reef_fish, SiteCode, Diver, Family, Total) %>%
filter(SiteCode == "CG11")
ggplot(CG11_site, aes(Diver, Total, color = Diver)) +
geom_point() +
labs(x = "Diver", y = "Total_Fish_Count")
ggplot(CG11_site, aes(Diver, color = Diver)) +
geom_bar() +
labs(x = "Diver", y = "Entries")
CG11_site_large <- select(reef_fish, SiteCode, Diver, Family, Total) %>%
filter(SiteCode == "CG11") %>%
filter(Total > 7)
ggplot(CG11_site, aes(Diver, Total, color = Diver)) +
geom_point() +
facet_wrap("Family") +
labs(x = "Diver", y = "Total_Fish_Count", color = "Divers")
ggplot(CG11_site_large, aes(Diver, Total, color = Diver)) +
geom_point() +
facet_wrap("Family") +
labs(x = "Diver", y = "Total_Fish_Count", color = "Divers")
install.packages("rmarkdown")
ggplot(CG11_site, aes(Family, Total, color = Family)) +
geom_point() +
facet_wrap("Diver") +
labs(x = "Family", y = "Total_Fish_Count", color = "Family")
ggplot(CG10_site, aes(Family, Total, color = Family)) +
geom_point() +
facet_wrap("Diver") +
labs(x = "Family", y = "Total_Fish_Count", color = "Family")
#we will make a new dataframe with tidyr to show the data in a new way
new_data <- reef_fish %>%
select(SiteCode, Family, Depth) %>%
filter(SiteCode == "CG10") %>%
filter(Family == "Scorpididae" | Family == "Pomacentridae" | Family == "Plesiopidae" | Family == "Enoplosidae") %>%
mutate("avg_Depth") %>%
group_by(Family) %>% summarise(avg_Depth, mean(Depth))
new_data2 <- reef_fish %>%
select(SiteCode, Family, Depth) %>%
filter(Family == "Scorpididae" | Family == "Pomacentridae" | Family == "Plesiopidae" | Family == "Enoplosidae") %>%
group_by(Family, SiteCode) %>%
summarise(avg_Depth = mean(Depth)) %>%
spread(SiteCode, avg_Depth) %>%
select(CG10, CG11)
Scorp_depth <- filter(new_data, Family == "Scorpididae") %>%
summarize(avg_depth = mean(Depth))
head(Scorp_depth)
Poma_depth <- filter(new_data, Family == "Pomacentridae") %>%
summarize(avg_depth = mean(Depth))
head(Poma_depth)
Plesi_depth <- filter(new_data, Family == "Plesiopidae") %>%
summarize(avg_depth = mean(Depth))
head(Plesi_depth)
Eno_depth <- filter(new_data, Family == "Enoplosidae") %>%
summarize(avg_depth = mean(Depth))
head(Eno_depth)
|
2bb13da0214fa1d7c1afcc4c8eaee8c1df4d7143
|
81b443dbe06b02c7ce9c36878a34d79c5863db7b
|
/chapitre-5.R
|
925dd2ae0c859e0214ee98b71edfeb1b9d7520d4
|
[] |
no_license
|
aejb22122/Analyse-de-donnees-these-paris-saclay
|
7367be49a56938a9fa5a2f3f3113744aa3a45d01
|
53e278ff71e5de0050cce269606162e65ac760a2
|
refs/heads/master
| 2020-12-02T16:28:50.690770
| 2020-01-14T22:25:23
| 2020-01-14T22:25:23
| 96,557,982
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,532
|
r
|
chapitre-5.R
|
# ---- Thèse de doctorat Annick Eudes JEAN-BAPTISTE ----
# Codes de réplication des calculs du Chapitre # 5 - Contexte empirique
# ---- Préliminaires ----
# Adding the packages used in this analysis
install.packages("xlsx") # Lire les fichers excel
install.packages("ggplot2") # Installer le packet ggplot2
install.packages("calibrate") # Pour ajouter les noms des points dans un scatter plot
install.packages("reshape2") # Load le packet qui permet de faire le reshaping et le melt:
install.packages("ggpubr") # ggpubr: 'ggplot2' Based Publication Ready Plots -
# stat() for the Pearson correlation in the plot
# Loading the required packages :
library("xlsx")
library("ggplot2")
library(calibrate)
library(reshape2)
library(ggpubr)
# Removinng the scientific notations
options(scipen=999)
# Cleaning the session form other the objects in the environment
remove(list = ls())
ls()
# setting the working directory
setwd("~/OneDrive/Documents/2_Data_analysis_research/GitHub/Analyse-de-donnees-these-paris-saclay/datasets")
# ---- Graphique # 38 ----
# Figure 38. Transition de la population urbaine/rurale du cas typique
library(readxl)
df <- read_excel("rural_urbain_long.xlsx",
col_types = c("numeric", "numeric", "numeric"))
View(df)
str(df)
ggplot(df, aes(df$Date)) +
geom_line(aes(y = df$Urban_population, color = "Population urbaine")) +
geom_line(aes(y = df$Rural_population, color = "Population rurale")) +
scale_color_discrete(name = "Couleur") +
xlab("Années") +
ylab("En million d'habitants")
# ---- Graphique # 47 ----
# Figure 47. Évolution des revenus de quelques municipalités et impact
# combiné de l'élection et des mesures de la loi de finances de 2015
df <- read_excel("Revenus_fiscaux_2011_2016.xlsx")
View(Df)
View(df)
ls()
ggplot(df, aes(df$Date)) +
geom_line(aes(y = df$Ouanaminthe, color = "Ouanaminthe")) +
geom_line(aes(y = df$Caracol, color = "Caracol")) +
geom_line(aes(y = df$Acul_du_Nord, color = "Acul du Nord")) +
geom_line(aes(y = df$Carrefour, color = "Carrefour")) +
geom_line(aes(y = df$Limonade, color = "Limonage")) +
geom_line(aes(y = df$Cape_Haitian, color = "Cap-Haitien")) +
geom_line(aes(y = df$Kenscoff, color = "Kenscoff")) +
geom_line(aes(y = df$Delmas, color = "Delmas")) +
geom_line(aes(y = df$Saint_Marc, color = "Saint-Marc")) +
xlab("Années") +
ylab("En USD")
# ---- Graphique # 50 ----
# Figure 50. Cartographie des projets de développement local à financements mixtes relevés de 2013 à 2016
# Analyses des projets de développement local géo-référencés dans le PMA typique analysé
# Spacial analysis of the LED projects in the communes
# ploting the geo data
# Analyses des projets de développement local géo-référencés dans le PMA typique analysé
# Spacial analysis of the LED projects in the communes
# ------------------ ploting the geo data
# ------------------ Packages ----
# First we need to install some required packages :
install.packages("ggmap")
install.packages("mapproj")
# ------------------ Importing the geo coded data ----
library(readxl)
geodata <- read_excel("geodataled.xlsx")
View(geodata)
str(geodata)
# Loading the ggplot package and ggmap, if not done allready running
library(ggplot2)
library(ggmap)
attach(geodata)
# ------------------ Ploting the data ----
ggplot(geodata, aes(x= geodata$Longitude, geodata$Latitude)) + geom_point()
# To stop the overlay of points : in the geom_point() inter the arguments posiion_jitter
ggplot(geodata, aes(x= geodata$Longitude, geodata$Latitude)) + geom_point(position = position_jitter(w = 0.3, h = 0.3)) + xlab("Longitude") + ylab("Latitude")
# Just to keep the theme going, we'll just put the points in blue (colour = 'blue', size = 2) in the
# geom_point() arguments
ggplot(geodata, aes(x= geodata$Longitude, geodata$Latitude)) + geom_point(position = position_jitter(w = 0.3, h = 0.3), colour = 'blue', size = 2) + xlab("Longitude") + ylab("Latitude")
# ------------------ Cartes ----
# Coordonnées de Haiti (centrée sur Hinche), que l'on met dans un vecteur sur r:
haiti <- c(lon = -72.01667, lat = 19.15)
cap_haitien <- c(lon = -72.206768, lat = 19.737036)
# Haiti - centrée sur Hinche :
# Plot map at zoom level 5 - (trop loin cela donne tout le bassin des Caraibes)
map_ht5 <- get_map(location = haiti, zoom = 5, scale = 1)
ggmap(map_ht5)
# plot map at zoom level 9
map_ht9 <- get_map(location = haiti, zoom = 9, scale = 1)
ggmap(map_ht9)
# Haiti -centrée sur Cap Haitien :
map_cap <- get_map(location = cap_haitien, zoom = 9, scale = 1)
ggmap(map_cap)
# We can change the map type by adding : (maptype = "satellite") in the arguments
map_ht99 <- get_map(location = haiti, zoom = 9, scale = 1, maptype = "satellite")
ggmap(map_ht99)
# ------------ Carte centrée sur Hinche ----
# Add the plots to the map (the normal non satellite version):
# Remember to leave the color argument inside the aes() function within your geom_point(), to have
# the gradiant label
ggmap(map_ht9) + geom_point(aes(geodata$Longitude, geodata$Latitude), data = geodata)
ggmap(map_ht9) + geom_point(aes(geodata$Longitude, geodata$Latitude), data = geodata, colour = "red", alpha = 0.1, size = 5)
ggmap(map_ht9) + geom_point(aes(geodata$Longitude, geodata$Latitude), data = geodata, colour = "red", alpha = 0.3, size = 7)
# Add the plots satellite version
ggmap(map_ht99) + geom_point(aes(geodata$Longitude, geodata$Latitude), data = geodata, color = geodata$Budget)
ggmap(map_ht99) + geom_point(aes(geodata$Longitude, geodata$Latitude), data = geodata, colour = "red", alpha = 0.1, size = 7)
ggmap(map_ht99) + geom_point(aes(geodata$Longitude, geodata$Latitude), data = geodata, colour = "red", alpha = 0.3, size = 7)
ggmap(map_ht99) + geom_point(aes(geodata$Longitude, geodata$Latitude), data = geodata, colour = "red", alpha = 0.1, size = 7) + scale_fill_gradient(low = "blue", high = "red")
# ------------ Adding the budgets to the points ----
# Centrée sur Hinche :
# This is the best one :
ggmap(map_ht9) + geom_point(aes(geodata$Longitude, geodata$Latitude,
color = geodata$Budget),
data = geodata,
alpha = 0.5,
size = 8)
# Map avec les financements
# Adding fiscal revenus to the map
ggmap(map_ht9) +
geom_point(aes(geodata$Longitude,
geodata$Latitude,
color = geodata$Revenus_t3),
data = geodata,
alpha = 0.3, size = 8)
# Centrée sur cap-Haitien :
ggmap(map_cap) +
geom_point(aes(geodata$Longitude,
geodata$Latitude,
color = geodata$Budget),
data = geodata,
alpha = 0.6, size = 6)
# Adding investment budget to the map
ggmap(map_ht9) +
geom_point(aes(geodata$Longitude,
geodata$Latitude,
color = geodata$Budget),
data = geodata,
alpha = 0.3, size = 8)
# ---- Different types of maps ----
# https://www.nceas.ucsb.edu/~frazier/RSpatialGuides/ggmap/ggmapCheatsheet.pdf
# 1e. Adding the maptypes --> get_map :
# 2e. Ploting the get_map in the ggmap()
### 1e. Adding the maptypes --> get_map :
# a) maptype = toner version - black and white):
map_ht <- get_map(haiti, zoom = 9, source = "stamen", maptype = "toner") # un peu noir et blanc
# b) maptype = stamen: watercolor ):
map_ht <- get_map(haiti, zoom = 9, source = "stamen", maptype = "watercolor")
# scale_color_discrete(name = "Budget d'investissement") # un peu noir et blanc
# c) maptype = stamen: terrain from stamen):
map_ht <- get_map(haiti, zoom = 9, source = "stamen", maptype = "terrain")
# d) maptype = stamen: terrain from google):
map_ht <- get_map(haiti, zoom = 9, source = "google", maptype = "terrain")
# e) maptype = roadmap - from google):
map_ht <- get_map(haiti, zoom = 9, source = "google", maptype = "roadmap")
# f) maptype = google: hybrid):
map_ht <- get_map(haiti, zoom = 9, source = "google", maptype = "hybrid")
### 2e. Ploting the get_map in the ggmap()
ggmap(map_ht) +
geom_point(aes(geodata$Longitude, geodata$Latitude, color = geodata$Budget),
data = geodata, alpha = 0.5, size = 10) +
xlab("Longitude") +
ylab("Latitude")
# Map avec les financements et le type de carte que l'on veut ... "map_ht"
ggmap(map_ht) +
geom_point(aes(geodata$Longitude, geodata$Latitude, color = geodata$Budget),
data = geodata, alpha = 0.5, size = 10) +
facet_wrap(~ City) +
xlab("Longitude") +
ylab("Latitude")
# -------------------- Other types of map -----
# A quick alternative :
qmplot(Longitude, Latitude, data = geodata, geom = "point", color = Budget) + facet_wrap(~ City)
# Heat map :
ggmap(map_ht9, extent = "device") + geom_density2d(data = geodata, aes(x = geodata$Longitude, y = geodata$Latitude), size = 0.5) + stat_density2d(data = geodata, aes(x = geodata$Longitude, y = geodata$Latitude, fill = ..level.., alpha = ..level..), size = 0.01, bins = 16, geom = "polygon") + scale_fill_gradient(low = "green", high = "red") + scale_alpha(range = c(0, 0.3), guide = FALSE)
|
307ceb376a51917c0c5ffe8cf408602c80ffef7a
|
b59cc783d2da2f32737432c1b13cf72c5802f067
|
/man/inla.mesh.2d.Rd
|
376b8036995478294c354cf3c93f5281e04b2281
|
[] |
no_license
|
jdsimkin04/shinyinla
|
9a16007b375975a3f96b6ca29a1284aa6cafb180
|
e58da27a2a090557058b2a5ee63717b116216bf7
|
refs/heads/master
| 2023-06-05T08:34:34.423593
| 2021-06-24T00:27:04
| 2021-06-24T00:27:04
| 330,322,338
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,414
|
rd
|
inla.mesh.2d.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mesh.R
\name{inla.mesh.2d}
\alias{inla.mesh.2d}
\title{High-quality triangulations}
\usage{
inla.mesh.2d(
loc = NULL,
loc.domain = NULL,
offset = NULL,
n = NULL,
boundary = NULL,
interior = NULL,
max.edge = NULL,
min.angle = NULL,
cutoff = 1e-12,
max.n.strict = NULL,
max.n = NULL,
plot.delay = NULL,
crs = NULL
)
}
\arguments{
\item{loc}{Matrix of point locations to be used as initial triangulation
nodes. Can alternatively be a \code{SpatialPoints} or
\code{SpatialPointsDataFrame} object.}
\item{loc.domain}{Matrix of point locations used to determine the domain
extent. Can alternatively be a \code{SpatialPoints} or
\code{SpatialPointsDataFrame} object.}
\item{offset}{The automatic extension distance. One or two values, for an
inner and an optional outer extension. If negative, interpreted as a factor
relative to the approximate data diameter (default=-0.10???)}
\item{n}{The number of initial nodes in the automatic extensions
(default=16)}
\item{boundary}{A list of one or two \code{\link[=inla.mesh.segment]{inla.mesh.segment()}} objects
describing domain boundaries.}
\item{interior}{An \code{\link[=inla.mesh.segment]{inla.mesh.segment()}} object describing desired
interior edges.}
\item{max.edge}{The largest allowed triangle edge length. One or two
values.}
\item{min.angle}{The smallest allowed triangle angle. One or two values.
(Default=21)}
\item{cutoff}{The minimum allowed distance between points. Point at most as
far apart as this are replaced by a single vertex prior to the mesh
refinement step.}
\item{max.n.strict}{The maximum number of vertices allowed, overriding
\code{min.angle} and \code{max.edge} (default=-1, meaning no limit). One or
two values, where the second value gives the number of additional vertices
allowed for the extension.}
\item{max.n}{The maximum number of vertices allowed, overriding
\code{max.edge} only (default=-1, meaning no limit). One or two values,
where the second value gives the number of additional vertices allowed for
the extension.}
\item{plot.delay}{On Linux (and Mac if appropriate X11 libraries are
installed), specifying a nonnegative numeric value activates a rudimentary
plotting system in the underlying \code{fmesher} program, showing the
triangulation algorithm at work, with waiting time factor \code{plot.delay}
between each step.
On all systems, specifying any negative value activates displaying the
result after each step of the multi-step domain extension algorithm.}
\item{crs}{An optional \code{CRS} or \code{inla.CRS} object}
}
\value{
An \code{inla.mesh} object.
}
\description{
Create a triangle mesh based on initial point locations, specified or
automatic boundaries, and mesh quality parameters.
}
\examples{
loc <- matrix(runif(10 * 2), 10, 2)
if (require("splancs")) {
boundary <- list(
inla.nonconvex.hull(loc, 0.1, 0.15),
inla.nonconvex.hull(loc, 0.2, 0.2)
)
offset <- NULL
} else {
boundary <- NULL
offset <- c(0.1, 0.2)
}
mesh <- inla.mesh.2d(loc, boundary = boundary, offset = offset, max.edge = c(0.05, 0.1))
plot(mesh)
}
\seealso{
\code{\link[=inla.mesh.create]{inla.mesh.create()}}, \code{\link[=inla.delaunay]{inla.delaunay()}},
\code{\link[=inla.nonconvex.hull]{inla.nonconvex.hull()}}
}
\author{
Finn Lindgren \email{finn.lindgren@gmail.com}
}
|
eecb660fb5e3741af481c9b68d2f9963bb384b6f
|
fff9ee52053ff5acd4d358add0793bf4ed6b2aba
|
/Exract_Names_From_Text_String_v2.R
|
8f7d7f0c01780f4e9704c82e02bc4c26e5c2de91
|
[] |
no_license
|
jfedgerton/Cleaning_CHAMP_Speakers
|
a6baabb378521c0cdd0d6e930b977cb2a36ff480
|
b57c3e376a0c49d2fff361027b5f20adaa90ba7b
|
refs/heads/master
| 2020-04-21T13:09:12.997216
| 2019-02-07T14:59:05
| 2019-02-07T14:59:05
| 169,588,961
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 22,133
|
r
|
Exract_Names_From_Text_String_v2.R
|
## Clear working directory
rm(list = ls())
## Load R libraries
library('tidyverse')
library('foreach')
library('doParallel')
## Load R data
load("CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/Text_Search.Rda")
str(names)
names <- names %>%
mutate(.,
Name = as.character(Name)) %>%
arrange(.,
-Count)
## Extract all the show files
all_shows <- list.files(path = "CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/Text Search",
pattern = ".Rda",
all.files = FALSE,
full.names = FALSE,
recursive = FALSE,
ignore.case = FALSE,
include.dirs = FALSE,
no.. = FALSE)
## Create a frequency of names for text extraction
frequency_list <- list()
for (i in 1:length(all_shows)){
load(paste0("CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/", all_shows[i]))
frequency_list[[i]] <- as.data.frame(table(temp$New_Speaker3))
if (nrow(frequency_list[[i]]) > 0){
colnames(frequency_list[[i]]) <- c("Name", "Frequency")
}
}
names <- do.call(rbind, frequency_list)
names <- names %>%
plyr::ddply(.,
~Name,
summarize,
Count = sum(Frequency, na.rm = T)) %>%
arrange(.,
-Count) %>%
filter(.,
Count >= 250)
str(names)
names <- mutate(names,
Name = as.character(Name))
names$Name[grepl(",", names$Name) != T] <- paste0(names$Name, ",")
split_data <- unlist(strsplit(names$Name, ","))
names$lastnames <- trimws(split_data[seq(from = 1, to = length(split_data)-1, by = 2)])
names$firstnames <- trimws(split_data[seq(from = 2, to = length(split_data), by = 2)])
## Take out all first names that are just one letter
for (i in 1:length(LETTERS)){
names$firstnames[names$firstnames == LETTERS[i]] <- NA
}
## Fix incorrect names
missing_names <- subset(names, is.na(firstnames))
not_missing_names <- subset(names, !is.na(firstnames))
missing_names$Name[missing_names$Name == "MCCAIN, J"] <- "MCCAIN, JOHN"
missing_names$Name[missing_names$Name == "LIEBERMAN, J"] <- "LIEBERMAN, JOHN"
missing_names$Name[missing_names$Name == "BUSH, JEB"] <- "BUSH, JEB"
missing_names$Name[missing_names$Name == "HOLMES, TJ,"] <- "HOLMES, TJ"
missing_names$Name[missing_names$Name == "HOLMES, TJ,"] <- "HOLMES, TJ"
missing_names$Name[missing_names$Name == "CLINTON, R"] <- "CLINTON, ROGER"
missing_names$Name[missing_names$Name == "SMITH, GARY B"] <- "SMITH, GARY"
missing_names$Name[missing_names$Name == "KENNEDY, T"] <- "SMITH, GARY"
missing_names$Name[missing_names$Name == "KERRY, J"] <- "KERRY, JOHN"
missing_names$Name[missing_names$Name == "CROSLIN, TIMMY"] <- "CROSLIN, MISTY"
missing_names$Name[missing_names$Name == "CROSLIN, TIMMY MISTY"] <- "CROSLIN, MISTY"
missing_names$Name[missing_names$Name == "EDWARDS, E"] <- "EDWARDS, ELIZABETH"
missing_names$Name[missing_names$Name == "EDWARDS, J"] <- "EDWARDS, JOHN"
missing_names$Name[missing_names$Name == "CYRUS, M"] <- "CYRUS, MILEY"
missing_names$Name[missing_names$Name == "RAMSEY, P"] <- "RAMSEY, PATSY"
missing_names$Name[missing_names$Name == "KENNEDY, ED"] <- "KENNEDY, TED"
missing_names$Name[missing_names$Name == "KENNEDY, ED M"] <- "KENNEDY, TED"
missing_names$Name[missing_names$Name == "KENNEDY, E"] <- "KENNEDY, TED"
missing_names$Name[missing_names$Name == "MEIER, T"] <- "MEIER, TINA"
missing_names$Name[missing_names$Name == "MARTIN, T"] <- "MARTIN, TIM"
missing_names$Name[missing_names$Name == "MARTIN, TI"] <- "MARTIN, TIM"
missing_names$Name[missing_names$Name == "AGOSTINO, D"] <- "DAGOSTINO, MARK"
missing_names$Name[missing_names$Name == "DARBY, J"] <- "DARBY, JOE"
missing_names$Name[missing_names$Name == "HEENE, R"] <- "HEENE, RICHARD"
missing_names$Name[missing_names$Name == "WILLIAMS, E"] <- "WILLIAMS, ERIC"
missing_names$Name[missing_names$Name == "RODHAM, H"] <- "CLINTON, HILLARY"
missing_names$Name[missing_names$Name == "SULEMAN, N"] <- "SULEMAN, NADYA"
## Create a new frequency with corrected names
names <- rbind(missing_names,
not_missing_names)
names <- names %>%
plyr::ddply(.,
~Name,
summarize,
Count = sum(Count, na.rm = T)) %>%
arrange(.,
-Count)
names$Name[grepl(",", names$Name) != T] <- paste0(names$Name, ",")
split_data <- unlist(strsplit(names$Name, ","))
names$lastnames <- trimws(split_data[seq(from = 1, to = length(split_data)-1, by = 2)])
names$firstnames <- trimws(split_data[seq(from = 2, to = length(split_data), by = 2)])
for (i in 1:length(LETTERS)){
names$firstnames[names$firstnames == LETTERS[i]] <- NA
}
for (i in 1:length(LETTERS)){
names$lastnames[names$lastnames == LETTERS[i]] <- NA
}
names <- names %>%
filter(.,
!is.na(firstnames))
## Separate the first and last names for text extraction purposes
split_data <- unlist(strsplit(names$Name, ","))
lastnames <- trimws(split_data[seq(from = 1, to = length(split_data)-1, by = 2)])
firstnames <- trimws(split_data[seq(from = 2, to = length(split_data), by = 2)])
lastnames <- c(lastnames, "WEIR")
firstnames <- c(firstnames, "BILL")
lastnames <- c(lastnames, "WEINER")
firstnames <- c(firstnames, "NANCY")
lastnames <- c(lastnames, "BARZ")
firstnames <- c(firstnames, "MIKE")
lastnames <- c(lastnames, "CASTRO")
firstnames <- c(firstnames, "MARYSOL")
lastnames <- c(lastnames, "SANDLER")
firstnames <- c(firstnames, "ADAM")
lastnames <- c(lastnames, "MARRIS")
firstnames <- c(firstnames, "JACQUELINE")
lastnames <- c(lastnames, "DESCHAINE")
firstnames <- c(firstnames, "ROB")
lastnames <- c(lastnames, "WINKLER")
firstnames <- c(firstnames, "KELLY")
lastnames <- c(lastnames, "DESCHAINE")
firstnames <- c(firstnames, "MEGAN")
lastnames <- c(lastnames, "DOLGOFF")
firstnames <- c(firstnames, "JOANNA")
remove_titles <- c("DR. ", "DOCTOR", "PRESIDENT", "PRES. ", "NOMINEE", "SEN. ", "SENATOR ", "MR. ", "MRS. ", "MS. ", "GOVERNOR", "SENATOR", "PRESIDENT", "GOV. ", "SECRETARY ", "SEC'Y ", "SEC. ", "CONGRESSMAN", "CONGRESSWOMAN", "SPEAKER", "REP. ", "REPRESENTATIVE")
drop <- !(all_shows %in% c("Names_and_show.Rda", "names_frequency.Rda", "Text_Search.Rda"))
all_shows <- all_shows[drop == T]
for (i in 8:17){
## Loop through all files
load(paste0("CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/", all_shows[i]))
temp$unique.observation <- as.character(temp$unique.observation)
## Change the text variable from factor to string
temp$Text <- toupper(as.character(temp$Text))
missing_speaker <- filter(temp, is.na(New_Speaker3))
if (nrow(missing_speaker) != 0){
missing_speaker$New_Speaker4 <- NA
}
not_missing_speaker <- filter(temp, !(unique.observation %in% missing_speaker$unique.observation))
not_missing_speaker$New_Speaker4 <- not_missing_speaker$New_Speaker3
if (nrow(missing_speaker) != 0){
missing_speaker$Text_Extract <- T
}
if (nrow(not_missing_speaker) != 0){
not_missing_speaker$Text_Extract <- F
}
## Loop through the frequency of last names for each row
for (j in 1:length(lastnames)){
if (nrow(missing_speaker) != 0){
missing_speaker$last_name_flag <- rep(F, nrow(missing_speaker))
missing_speaker$first_name_flag <- rep(F, nrow(missing_speaker))
missing_speaker$temp_text <- substr(toupper(missing_speaker$Text), 1, 30)
for (q in 1:length(remove_titles)){
missing_speaker$temp_text <- gsub(remove_titles[q], "", missing_speaker$temp_text)
}
missing_speaker$last_name_flag <- grepl(lastnames[j], missing_speaker$temp_text)
missing_speaker$first_name_flag <- grepl(firstnames[j], missing_speaker$temp_text)
temp_missing <- subset(missing_speaker,
last_name_flag == T &
first_name_flag == T)
temp_missing <- dplyr::select(temp_missing, -temp_text)
if (j == round(length(lastnames)*0.1)){
print(paste0("10% done with ", all_shows[i]))
}
if (j == round(length(lastnames)*0.2)){
print(paste0("20% done with ", all_shows[i]))
}
if (j == round(length(lastnames)*0.3)){
print(paste0("30% done with ", all_shows[i]))
}
if (j == round(length(lastnames)*0.4)){
print(paste0("40% done with ", all_shows[i]))
}
if (j == round(length(lastnames)*0.5)){
print(paste0("50% done with ", all_shows[i]))
}
if (j == round(length(lastnames)*0.6)){
print(paste0("60% done with ", all_shows[i]))
}
if (j == round(length(lastnames)*0.7)){
print(paste0("70% done with ", all_shows[i]))
}
if (j == round(length(lastnames)*0.8)){
print(paste0("80% done with ", all_shows[i]))
}
if (j == round(length(lastnames)*0.9)){
print(paste0("90% done with ", all_shows[i]))
}
if (nrow(temp_missing) != 0){
temp_missing$New_Speaker4 <- paste0(lastnames[j], ", ", firstnames[j])
not_missing_speaker <- temp_missing %>%
dplyr::select(.,
-first_name_flag,
-last_name_flag) %>%
rbind(.,
not_missing_speaker)
missing_speaker <- filter(missing_speaker,
!(unique.observation %in% not_missing_speaker$unique.observation))
}
}
if (j == length(lastnames) & nrow(missing_speaker) != 0){
temp <- missing_speaker %>%
dplyr::select(.,
-first_name_flag,
-last_name_flag,
-temp_text) %>%
rbind(.,
not_missing_speaker)
}
if (j == length(lastnames) & nrow(missing_speaker) == 0){
temp <- not_missing_speaker
}
}
save(temp, file = paste0("CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/Text Search/", all_shows[i]))
print(paste0("Pct Files Processed ", round(i/length(all_shows), 3)))
}
for (i in 1:length(all_shows)){
load(paste0("CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/Text Search/", all_shows[i]))
temp$New_Speaker4[temp$New_Speaker4 == "MCCAIN, J"] <- "MCCAIN, JOHN"
temp$New_Speaker4[temp$New_Speaker4 == "LIEBERMAN, J"] <- "LIEBERMAN, JOHN"
temp$New_Speaker4[temp$New_Speaker4 == "BUSH, JEB"] <- "BUSH, JEB"
temp$New_Speaker4[temp$New_Speaker4 == "HOLMES, TJ,"] <- "HOLMES, TJ"
temp$New_Speaker4[temp$New_Speaker4 == "HOLMES, TJ,"] <- "HOLMES, TJ"
temp$New_Speaker4[temp$New_Speaker4 == "CLINTON, R"] <- "CLINTON, ROGER"
temp$New_Speaker4[temp$New_Speaker4 == "SMITH, GARY B"] <- "SMITH, GARY"
temp$New_Speaker4[temp$New_Speaker4 == "KENNEDY, T"] <- "SMITH, GARY"
temp$New_Speaker4[temp$New_Speaker4 == "KERRY, J"] <- "KERRY, JOHN"
temp$New_Speaker4[temp$New_Speaker4 == "CROSLIN, TIMMY"] <- "CROSLIN, MISTY"
temp$New_Speaker4[temp$New_Speaker4 == "CROSLIN, TIMMY MISTY"] <- "CROSLIN, MISTY"
temp$New_Speaker4[temp$New_Speaker4 == "EDWARDS, E"] <- "EDWARDS, ELIZABETH"
temp$New_Speaker4[temp$New_Speaker4 == "EDWARDS, J"] <- "EDWARDS, JOHN"
temp$New_Speaker4[temp$New_Speaker4 == "CYRUS, M"] <- "CYRUS, MILEY"
temp$New_Speaker4[temp$New_Speaker4 == "RAMSEY, P"] <- "RAMSEY, PATSY"
temp$New_Speaker4[temp$New_Speaker4 == "KENNEDY, ED"] <- "KENNEDY, TED"
temp$New_Speaker4[temp$New_Speaker4 == "KENNEDY, ED M"] <- "KENNEDY, TED"
temp$New_Speaker4[temp$New_Speaker4 == "KENNEDY, E"] <- "KENNEDY, TED"
temp$New_Speaker4[temp$New_Speaker4 == "MEIER, T"] <- "MEIER, TINA"
temp$New_Speaker4[temp$New_Speaker4 == "MARTIN, T"] <- "MARTIN, TIM"
temp$New_Speaker4[temp$New_Speaker4 == "MARTIN, TI"] <- "MARTIN, TIM"
temp$New_Speaker4[temp$New_Speaker4 == "AGOSTINO, D"] <- "DAGOSTINO, MARK"
temp$New_Speaker4[temp$New_Speaker4 == "DARBY, J"] <- "DARBY, JOE"
temp$New_Speaker4[temp$New_Speaker4 == "HEENE, R"] <- "HEENE, RICHARD"
temp$New_Speaker4[temp$New_Speaker4 == "WILLIAMS, E"] <- "WILLIAMS, ERIC"
temp$New_Speaker4[temp$New_Speaker4 == "RODHAM, H"] <- "CLINTON, HILLARY"
temp$New_Speaker4[temp$New_Speaker4 == "FREMD, VON"] <- "VON FREMD, MIKE"
temp$New_Speaker4[temp$New_Speaker4 == "SULEMAN, N"] <- "SULEMAN, NADYA"
temp$New_Speaker4[temp$New_Speaker4 == "CAROLINA PANTHERS CHEERLEADER"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "DURHAM, NORTH CAROLINA"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", ANDREW"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", CARRIE"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", CHARLOTTE"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", JOHN"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", MARIO RICCIO"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", MEIR"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", NAMTHIP"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", STUDENT"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", SUDJADNAN"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", SUWAT"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", TODD"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", TONY FARRANTE"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", UNIDENTIFIED"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == ", WILLA MAE"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, A US"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, ADEL"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, AI"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, ANNA"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, AOL"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, AOLTV"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, AYMAN"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, BALDWIN"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, BEGALA"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, BJ"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, CASEY"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, CPL"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, DONNA"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, DS"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, ERIKA"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, FBI"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, GD"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, GHAZI"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, HAFEZ"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, HANCOCKS"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, HUSSAIN"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, ICO TO"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, ID"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, IMAM GIUMAA"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, IN DEPTH"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, JANELLE"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, JEANNE"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, JJ"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, KAGAN"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, KAREN"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, KASICH"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, KELSEY SMITH"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, KOPPEL"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, MAHMOUD"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, ME"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, MIKE"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, MOWAFFAK"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, MUNAF"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, MUSHIR"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, NASA"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, NBC"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, NOW JAN"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, OK"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, SARS"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, SGT"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, SPC"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, US"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "A, WILL"] <- NA
temp$New_Speaker4[temp$New_Speaker4 == "ABC, JAKE TAPPER"] <- "TAPPER, JACK"
temp$New_Speaker4[temp$New_Speaker4 == "ABRAHAM, LYNNE M"] <- "ABRAHAM, LYNNE"
temp$New_Speaker4[temp$New_Speaker4 == "BLITZER, WOLFF"] <- "BLITZER, WOLF"
temp$New_Speaker4[temp$New_Speaker4 == "BLITZTER, WOLF"] <- "BLITZER, WOLF"
temp$New_Speaker4[temp$New_Speaker4 == "FBLITZER, WOL"] <- "BLITZER, WOLF"
temp$New_Speaker4[temp$New_Speaker4 == "HOST, WOLF BLITZER"] <- "BLITZER, WOLF"
temp$New_Speaker4[temp$New_Speaker4 == "KING, LARRY,"] <- "KING, LARRY"
temp$New_Speaker4[temp$New_Speaker4 == "KING, LARY"] <- "KING, LARRY"
temp$New_Speaker4[temp$New_Speaker4 == "KING, LLEWELLYN"] <- "KING, LLEWELLYLN"
temp$New_Speaker4[temp$New_Speaker4 == "OREILLY, B"] <- "OREILLY, BILL"
temp$New_Speaker4[temp$New_Speaker4 == "OREILLY, BIILL"] <- "OREILLY, BILL"
temp$New_Speaker4[temp$New_Speaker4 == "OREILLY, BILL"] <- "OREILLY, BILL"
temp$New_Speaker4[temp$New_Speaker4 == "OREILLY, BILL,"] <- "OREILLY, BILL"
temp$New_Speaker4[temp$New_Speaker4 == "SMITH, HARR"] <- "SMITH, HARRY"
temp$New_Speaker4[temp$New_Speaker4 == "COOPER, ANDERCON"] <- "COOPER, ANDERSON"
temp$New_Speaker4[temp$New_Speaker4 == "COOPER, ANDERON"] <- "COOPER, ANDERSON"
temp$New_Speaker4[temp$New_Speaker4 == "COOPER, ANDERSOJN"] <- "COOPER, ANDERSON"
temp$New_Speaker4[temp$New_Speaker4 == "COOPER, ANDERSON"] <- "COOPER, ANDERSON"
temp$New_Speaker4[temp$New_Speaker4 == "COOPER, ANDERSON,"] <- "COOPER, ANDERSON"
temp$New_Speaker4[temp$New_Speaker4 == "COOPER, ANDERSONS"] <- "COOPER, ANDERSON"
temp$New_Speaker4[temp$New_Speaker4 == "DOBBS, LOUB"] <- "DOBBS, LOU"
temp$New_Speaker4[temp$New_Speaker4 == "DOBBS, LOUD"] <- "DOBBS, LOU"
temp$New_Speaker4[temp$New_Speaker4 == "MATHEWS, CHRIS"] <- "MATTHEWS, CHRIS"
temp$New_Speaker4[temp$New_Speaker4 == "BECK, GLEN"] <- "BECK, GLENN"
temp$New_Speaker4[temp$New_Speaker4 == "CHEN, JULE"] <- "CHEN, JULIE"
temp$New_Speaker4[temp$New_Speaker4 == "CHEN, JULEI"] <- "CHEN, JULIE"
temp$New_Speaker4[temp$New_Speaker4 == "CHEN, JULIE,"] <- "CHEN, JULIE"
temp$New_Speaker4[temp$New_Speaker4 == "CHEN, JULIES"] <- "CHEN, JULIE"
temp$New_Speaker4[temp$New_Speaker4 == "CHEN, JULLIE"] <- "CHEN, JULIE"
temp$New_Speaker4[temp$New_Speaker4 == "SUSTEREN, VAM"] <- "VAN SUSTEREN, GRETA"
temp$New_Speaker4[temp$New_Speaker4 == "SUSTEREN, VAN"] <- "VAN SUSTEREN, GRETA"
temp$New_Speaker4[temp$New_Speaker4 == "SUSTEREN, VAN,"] <- "VAN SUSTEREN, GRETA"
temp$New_Speaker4[temp$New_Speaker4 == "SUSTEREN, VANE"] <- "VAN SUSTEREN, GRETA"
temp$New_Speaker4[temp$New_Speaker4 == "PHILLIPS, KYRAN"] <- "PHILLIPS, KYRA"
temp$New_Speaker4[temp$New_Speaker4 == "GUMBEL, BYANT"] <- "GUMBEL, BRYANT"
temp$New_Speaker4[temp$New_Speaker4 == "LEMON, DAN"] <- "LEMON, DON"
temp$New_Speaker4[temp$New_Speaker4 == "LEMON, DOM"] <- "LEMON, DON"
save(temp, file = paste0("CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/Text Search/", all_shows[i]))
}
name_find <- paste0(lastnames, ", ", firstnames)
for (i in 1:length(all_shows)){
load(paste0("CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/Text Search/", all_shows[i]))
temp <- dplyr::select(temp, -check)
for (i in 1:length(lastnames)){
temp$flag <- F
temp$flag[temp$New_Speaker4 %in% name_find] <- T
correct_names <- temp %>%
filter(.,
flag == T)
incorrect_names <- temp %>%
filter(.,
!(unique.observation %in% correct_names$unique.observation))
}
}
name_list <- list()
for (i in 1:length(all_shows)){
load(paste0("CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/Text Search/", all_shows[i]))
if (all_shows[i] != "Text_Search.Rda"){
speakers <- temp %>%
plyr::ddply(.,
~New_Speaker4,
summarize,
Count = n())
speakers$Show <- substr(all_shows[i], 1, nchar(all_shows[i])-9)
name_list[[i]] <- speakers
}
}
all_names <- do.call(rbind, name_list)
names_and_show <- all_names %>%
filter(.,
!is.na(New_Speaker4)) %>%
mutate(.,
ID = paste0(New_Speaker4, "@", Show)) %>%
plyr::ddply(.,
~ID,
summarize,
Count = sum(Count)) %>%
filter(.,
Count >= 50)
split_data <- unlist(strsplit(names_and_show$ID, "@"))
names_and_show$Names <- trimws(split_data[seq(from = 1, to = length(split_data)-1, by = 2)])
names_and_show$show <- trimws(split_data[seq(from = 2, to = length(split_data), by = 2)])
names_and_show <- names_and_show %>%
dplyr::select(.,
-ID)
names_frequency <- all_names %>%
dplyr::select(.,
-Show) %>%
plyr::ddply(.,
~New_Speaker4,
summarize,
Count = sum(Count))
save(names_and_show, file = "CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/Text Search/Names_and_show.Rda")
save(names_frequency, file = "CHAMP-Net/Data/Show-Year CSVs/formatted_data/2000 to 2010/Updated/Text Search/names_frequency.Rda")
|
2ba8d5934e3747b61eac2725e089ddacbf67e2f0
|
0e279497c0cd75ff64aede481a49b3389104a403
|
/R/stoch_ord2.R
|
7acd7fc172208da871c6ee202df57b0e61a38ff9
|
[
"BSD-2-Clause"
] |
permissive
|
spertus/permuter
|
b1391c45cc33f7f1bf93cec0373ec631f53c6bc0
|
f2f03c65ff553bcda45efe07ba269615d2fa0249
|
refs/heads/master
| 2021-06-22T00:44:03.284030
| 2017-08-28T20:40:59
| 2017-08-28T20:40:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,707
|
r
|
stoch_ord2.R
|
#' Description about the function
#'
#' @param x A number.
#' @param y A number.
#' @param z
#' @param alt
#' @param B
#' @param cat
#' @param rep
#' @param seed
#' @return something
stoch.ord2 <- function(y, x, z = NULL, alt = c(-1, 1), B = 1000, cat = 0, rep = FALSE,
seed = 101) {
K <- length(unique(x))
g <- unique(sort(x))
n <- table(x)
if (K == 1) {
return(1)
}
T <- array(0, dim = c((B + 1), K - 1))
for (j in 1:(K - 1)) {
ID <- g[1:j]
ID.not <- g[-c(1:j)]
# cat('ID:',ID,'\t ID.not:',ID.not,'\n')
if (cat == 0) {
s <- (sum((y[x %in% ID] - mean(y[x %in% ID]))^2) + sum((y[x %in% ID.not] -
mean(y[x %in% ID.not]))^2))/(sum(n) - 2)
if (alt == -1) {
T[1, j] <- (mean(y[x %in% ID]) - mean(y[x %in% ID.not]))/sqrt(s)
}
if (alt == 1) {
T[1, j] <- (mean(y[x %in% ID.not]) - mean(y[x %in% ID]))/sqrt(s)
}
}
if (cat == 1)
{
### two pseudo-samples
label <- as.integer(names(table(y)[table(y) > 0])) ## categories
l <- length(label)
N <- array(0, dim = c(l, 2))
rownames(N) <- label
y1 <- y[x %in% ID]
y2 <- y[x %in% ID.not]
for (i in 1:l) {
N[i, ] <- c(sum(y1 %in% label[i]), sum(y2 %in% label[i]))
}
N <- apply(N, 2, cumsum)
if (alt == 1) {
T[1, j] <- sum(N[, 1]/(apply(N, 1, sum) * (sum(N) - apply(N, 1,
sum)))^0.5)
}
if (alt == -1) {
T[1, j] <- sum(N[, 2]/(apply(N, 1, sum) * (sum(N) - apply(N, 1,
sum)))^0.5)
}
} ## end cat
} ## end j
set.seed(seed)
for (bb in 2:(B + 1)) {
if (rep == FALSE) {
y.perm <- sample(y)
}
if (rep == TRUE) {
y.perm <- y
n <- length(unique(z))
n.star <- sample(n)
for (i in 1:n) {
y.perm[z == i] <- y[z == n.star[i]]
}
}
for (j in 1:(K - 1)) {
ID <- g[1:j]
ID.not <- g[-c(1:j)]
# cat('ID:',ID,'\t ID.not:',ID.not,'\n')
if (cat == 0) {
s <- (sum((y.perm[x %in% ID] - mean(y.perm[x %in% ID]))^2) + sum((y.perm[x %in%
ID.not] - mean(y.perm[x %in% ID.not]))^2))/(sum(n) - 2)
if (alt == -1) {
T[bb, j] <- (mean(y.perm[x %in% ID]) - mean(y.perm[x %in% ID.not]))/sqrt(s)
}
if (alt == 1) {
T[bb, j] <- (mean(y.perm[x %in% ID.not]) - mean(y.perm[x %in%
ID]))/sqrt(s)
}
}
if (cat == 1)
{
label <- as.integer(names(table(y)[table(y) > 0])) ## categories
l <- length(label)
N <- array(0, dim = c(l, 2))
rownames(N) <- label
y1 <- y.perm[x %in% ID]
y2 <- y.perm[x %in% ID.not]
for (i in 1:l) {
N[i, ] <- c(sum(y1 %in% label[i]), sum(y2 %in% label[i]))
}
### N=apply(N,2,function(x){cumsum(x)/sum(x)}) ### relative frequencies
N <- apply(N, 2, cumsum)
if (alt == 1) {
T[bb, j] <- sum(N[, 1]/(apply(N, 1, sum) * (sum(N) - apply(N,
1, sum)))^0.5)
}
if (alt == -1) {
T[bb, j] <- sum(N[, 2]/(apply(N, 1, sum) * (sum(N) - apply(N,
1, sum)))^0.5)
}
} ## end cat
} ## end j
} ## end bb
P <- t2p_old(T)
T1 <- apply(P, 1, function(x) {
-2 * log(prod(x))
})
P1 <- t2p_old(T1)
p.val <- P1[1]
return(P1)
}
|
8d035b81c90c7c431691c86cb9fe92143632180c
|
d5597a9ffb8565d723a2068c069228d28d39181c
|
/Plot3.R
|
ac8df610ce0e8202ef2949802ac59ca4bf7c67f2
|
[] |
no_license
|
darthvelez/ExData_Plotting1
|
11a485c0cf1c6645425cb8d8fb4d52c9ed472ca9
|
607ba31c37ccc44fe7b1c7e6e2a8436c9bdace9b
|
refs/heads/master
| 2020-12-24T14:18:09.984156
| 2016-08-28T12:50:46
| 2016-08-28T12:50:46
| 40,596,442
| 0
| 0
| null | 2015-08-12T11:12:12
| 2015-08-12T11:12:12
| null |
UTF-8
|
R
| false
| false
| 1,174
|
r
|
Plot3.R
|
#Download File#
if(!file.exists("exdata-data-household_power_consumption.zip")) {
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "household_power_consumption.zip")
unzip("household_power_consumption.zip")
}
#Subsetting the Data#
hpc <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings = "?", stringsAsFactors = FALSE)
hpc_subset <- hpc[(hpc$Date=="1/2/2007") | (hpc$Date=="2/2/2007"),]
Sub_metering_1 <- as.numeric(as.character(hpc_subset$Sub_metering_1))
Sub_metering_2 <- as.numeric(as.character(hpc_subset$Sub_metering_2))
Sub_metering_3 <- as.numeric(as.character(hpc_subset$Sub_metering_3))
timeseries <- strptime(paste(hpc_subset$Date, hpc_subset$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Plot the plotting plotted plot#
png("plot3.png", width=480, height=480)
plot(timeseries, Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(timeseries, Sub_metering_2, type="l", col="red")
lines(timeseries, Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
050c521dcb702be0708cb603af9d2b248adae156
|
d3d58e46ed61ee6d4bc61fa4c004100b7b1fd708
|
/R/query-db.R
|
4dc1d12069b166168b29f44fdf7f354aacc59572
|
[
"MIT"
] |
permissive
|
poissonconsulting/subfoldr2
|
b6abd83463710d9ce6ad02e3b77668a71db5c1a8
|
6766176b1ce575a87e59da849d51515c96818777
|
refs/heads/main
| 2023-07-19T19:08:52.225956
| 2023-07-07T01:01:00
| 2023-07-07T01:01:00
| 162,794,273
| 1
| 0
|
NOASSERTION
| 2023-07-07T01:01:02
| 2018-12-22T08:20:25
|
R
|
UTF-8
|
R
| false
| false
| 645
|
r
|
query-db.R
|
#' Query Existing Database
#'
#' Really just a wrapper on DBI::dbGetQuery().
#'
#' @inheritParams sbf_save_object
#' @inheritParams sbf_open_db
#' @param sql A string of the SQL statement to execute.
#' @return A scalar numeric of the number of rows affected by the statement.
#' @family database functions
#' @export
sbf_query_db <- function(sql, db_name = sbf_get_db_name(),
sub = sbf_get_sub(),
main = sbf_get_main()) {
chk_string(sql)
chk_gt(nchar(sql))
conn <- sbf_open_db(db_name, sub = sub, main = main, exists = TRUE)
on.exit(sbf_close_db(conn))
DBI::dbGetQuery(conn, sql)
}
|
d9f70b7039d89ae614e207ebb37c7e416e8b1566
|
d283ddaddffd4938149732aee730db4d32ea4009
|
/TMB-survival/Survival&Maxstat.R
|
99fd2c6054aebb3050ec3e4268ba7fde7abc703c
|
[] |
no_license
|
kkang97/TMB-melanoma
|
f0bef605c3462b596354def1ad70fa025cd29444
|
62c6a0d5d96f09a403fd89c8c1acfca1b7dd1bc6
|
refs/heads/master
| 2022-11-30T18:40:21.863026
| 2020-08-06T12:15:26
| 2020-08-06T12:15:26
| 281,552,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,475
|
r
|
Survival&Maxstat.R
|
#install.packages('survival')
#source("https://bioconductor.org/biocLite.R")
#biocLite("qvalue")
setwd("C:\\Users")
options(stringsAsFactors=FALSE)
library(survival)
library(survminer)
outTab=data.frame()
picDir="picture"
dir.create(picDir)
library(survival)
library(qvalue)
rt=read.table("ExpTimeImmune.txt",#输入文件名
header=T,sep="\t",row.names=1,check.names=F)
rt[,"futime"]=rt[,"futime"]/365
## 寻找最佳点和分类
sur.cut <- surv_cutpoint(rt, time="futime", event="fustat",
variables=colnames(rt[,3:ncol(rt)])
)
summary(sur.cut)
labels <- paste(c(">", "<="),
rep(summary(sur.cut)[,"cutpoint"], each=2), sep="")
for(i in colnames(rt[,3:ncol(rt)])){
# outfile <- paste("picture/cox_cutoff_", i, ".jpg", sep="")
# jpeg(outfile, width=20, height=20, units="cm", res=350)
# print(plot(sur.cut, i, palette="npg"))
# dev.off()
outPdf=paste("picture/cox_cutoff", i,".pdf", sep="")
pdf(file=outPdf, onefile=FALSE)
print(plot(sur.cut, i, palette="npg"))
dev.off()
}
oldnames <- colnames(rt)
colnames(rt) <- gsub("-", ".", colnames(rt))
sur.cut <- surv_cutpoint(rt, time="futime", event="fustat",
variables=colnames(rt[,3:ncol(rt)])
)
summary(sur.cut)
sink("cutoff.unreadable.txt")
sur.cut
sink()
#End
rt <- surv_categorize(sur.cut, variables=colnames(rt[,3:ncol(rt)]))
head(rt)
colnames(rt) <- oldnames
outlst <- list()
for(i in colnames(rt[,3:ncol(rt)])){
cox <- coxph(Surv(futime, fustat) ~ rt[, i], data=rt)
coxSummary = summary(cox)
coxP=coxSummary$coefficients[,"Pr(>|z|)"]
rt1=rt[rt[, i] == "high",]
rt2=rt[rt[, i] == "low",]
n1=nrow(rt1)
n2=nrow(rt2)
surTab1=summary(survfit(Surv(futime, fustat) ~ 1, data = rt1))
surTab2=summary(survfit(Surv(futime, fustat) ~ 1, data = rt2))
medianTab1=surTab1$table
medianTab2=surTab2$table
model <- survdiff(Surv(futime, fustat) ~ rt[, i], data = rt)
chisq <- model[["chisq"]]
df <- length(model[["n"]]) - 1
pvalue <- pchisq(chisq, df, lower.tail=FALSE)
label <- labels[rep(colnames(rt[,3:ncol(rt)]), each=2) == i]
outlst[[i]] <- cbind(
data.frame(var=i, level=c("high", "low"), label=label),
rbind(medianTab1, medianTab2), pvalue=pvalue)
diff=survdiff(Surv(futime, fustat) ~ rt[,i],data = rt)
fit <- survfit(Surv(futime, fustat) ~ rt[,i], data = rt)
pValue=1-pchisq(diff$chisq, df=1)
outTab=rbind(outTab,cbind(gene=i,coxSummary$coefficients,coxSummary$conf.int,KM=pValue,
H_med=medianTab1["median"],H_0.95LCL=medianTab1["0.95LCL"],H_0.95UCL=medianTab1["0.95UCL"],
L_med=medianTab2["median"],L_0.95LCL=medianTab2["0.95LCL"],L_0.95UCL=medianTab2["0.95UCL"]))
pval=0
if(pValue<0.05){
pval=signif(pValue,4)
pval=format(pval, scientific = TRUE)
}else{
pval=round(pValue,3)
}
if(pValue<0.05){
fig <- ggsurvplot(fit, data=rt, conf.int=FALSE, pval=TRUE,risk.table=FALSE,
legend.title=i, legend.labs=label,
ggtheme=theme(
legend.key=element_rect(fill="white", colour=NA),
panel.border=element_rect(fill="transparent", colour="black"),
panel.background=element_rect(fill="white") #,
#panel.grid.major=element_line(colour="grey"),
#panel.grid.minor=element_line(colour="grey")
))
geneName=unlist(strsplit(i,"\\|"))[1]
tiffFile=paste(geneName,".survival.tiff",sep="")
outTiff=paste(picDir,tiffFile,sep="\\")
tiff(file=outTiff,width = 15,height = 15,units ="cm",compression="lzw",bg="white",res=600)
#plot(fit, col=c("blue","red"), xlab="Time (years)", ylab="Overall survival",
# main=paste(geneName,"(p=",pval, ")", sep=""),mark.time=T,ylim=c(0,1.1),
# lwd = 2, cex.main=1.3, cex.lab=1.2, cex.axis=1.2, font=1.2)
#legend("topright", c(paste("Low expression"),
# paste("High expression")),
# col=c("blue","red"), bty="n", lwd = 2, cex=0.8)
print(fig)
dev.off()
outPdf=paste("picture/", geneName,".survival.pdf",sep="")
pdf(file=outPdf, onefile=FALSE)
print(fig)
dev.off()
}
}
outdf <- do.call(rbind, outlst)
write.csv(outdf, "surv_median.csv", row.names=FALSE, na="")
|
38b9397b1a4a3364767db68f4d01b0f275b577fb
|
5b1fda0d0baf10a436c0f63ceec471417c5fe417
|
/R/Model.R
|
925b1df5586e5c31d5a0c92f15d38006367b97cb
|
[] |
no_license
|
DMKM1517/WebOfScience1
|
43fb592fe7ed355f164822fa401c555ef817fadc
|
def6a141b78acf0173bd8f2707a0f6b91ddb3277
|
refs/heads/master
| 2021-01-21T04:35:16.835218
| 2016-08-01T21:57:11
| 2016-08-01T21:57:11
| 53,050,005
| 1
| 0
| null | 2016-03-31T21:33:18
| 2016-03-03T12:52:36
|
Mathematica
|
UTF-8
|
R
| false
| false
| 5,740
|
r
|
Model.R
|
library(RMySQL)
library(dplyr)
library(tm)
library(plyr)
library(stringdist)
library(stringi)
library(stringr)
library(phonics)
################## FUNCTIONS ##################
phonetics<- function(author){
#Step1 - Strip accents from Authors
authors_noaccent <- stri_trans_general(author,"Latin-ASCII")
firstname <- gsub("([A-Za-z]+).*", "\\1", authors_noaccent)
#Step2 - Soundex
phonetic(firstname) #Deal with -Abdekabel Ab de kabel-
}
phon_nysiis<- function(author){
#Step1 - Strip accents from Authors
authors_noaccent <- stri_trans_general(author,"Latin-ASCII")
firstname <- gsub("([A-Za-z]+).*", "\\1", authors_noaccent)
#Step2 - Nysiis
nysiis(firstname, maxCodeLen = 8)
}
#Notes: For authors we can use Levenshtein distance or Jairo Wirkler
Combinations = function(data){
ids = combn(unique(data[,1]),2)
df = data.frame(data[match(ids[1,], data[,1]), ], data[match(ids[2,], data[,1]), ])
#Subset out combinations within the same phonetic
df = df %>% filter(id!=id.1)
#df = df %>% filter(phonetic == phonetic.1)
return(df)
}
year_distance<- function(data){
abs(as.numeric(as.character(data$year)) - as.numeric(as.character(data$year.1)))
}
jaccard_distance<- function(data,var1,var2){
x<-stringdist(data[,var1], data[,var2], method= "jaccard")
x[which(x==Inf)] <- 1
as.numeric(x)
}
cosine_distance<- function(data,var1,var2){
x<-stringdist(data[,var1], data[,var2], method= "cosine")
x[which(x==Inf)] <- 1
as.numeric(x)
}
jarowinker_distance<- function(data,var1,var2){
x<-stringdist(data[,var1], data[,var2], method= "jw")
x[which(x==Inf)] <- 1
as.numeric(x)
}
#Jaccard distance from the first name initial of the authors
fname_initial_distance <- function(var1, var2){
list1 <- strsplit(var1," ")
list2 <- strsplit(var2, " ")
t1 <- sapply(list1,function(x) x[2])
t2 <- sapply(list2,function(x) x[2])
stringdist(t1,t2, method = "jaccard")
}
features <- function(df){
##### Create Features for Training#####
#Author Last Name Distance
df$dist_author = jarowinker_distance(df,"author","author.1")
#Author Initial's Distance
df$dist_initials = fname_initial_distance(df$author,df$author.1)
#Title Distance
df$dist_title = cosine_distance(df, "title","title.1")
#Year
df$dist_year = year_distance(df)
#Coauthors Distance (jaccard)
df$dist_coauthor = jaccard_distance(df,"coauthors","coauthors.1")
#Keyword Distance (cosine)
df$dist_keyword = cosine_distance(df,"keyword","keyword.1")
#Journal Distance
df$dist_journal = cosine_distance(df,"journal","journal.1")
#Institution Distance
df$dist_institution = cosine_distance(df,"institution","institution.1")
#Label
#df$label = as.numeric(df$authorid==df$authorid.1)
df = df %>% select(sigID, author, id,title, sigID.1, author.1, id.1,title.1,
dist_author,dist_initials,dist_title,dist_year,dist_coauthor,dist_keyword,dist_journal,dist_institution)
return(df)
}
#######FUNCTIONS####
#######################################################
#############Connect and Read from DB##################
drv<- dbDriver("MySQL")
pw<- {"dmkm1234"}
ucscDb <- dbConnect( MySQL(), dbname="dmkm_articles",
host= "127.0.0.1", port=8889,
user="root", password=pw
)
rm(pw)
signature<- dbReadTable(ucscDb, "authors_signature")
#############Connect and Read from DB##################
#######################################################
#######################################################
############# MODEL ##################
#Get author and article
author = "Ben Ouezdou F"
title = "From Force Control and Sensory-Motor Informations to Mass Discrimination"
#### Create block ####
phon = phon_nysiis(author)
data = subset(signature, phonetic == phon)
df = Combinations(data)
df = df %>% filter(author == "Ben Ouezdou F" & title == "From Force Control and Sensory-Motor Informations to Mass Discrimination")
##### Create Features #####
df <- features(df)
# #Author Name Distance
# df$dist_author = jarowinker_distance(df,"author","author.1")
# #Author Initial's Distance
# df$dist_initials = fname_initial_distance(df$author,df$author.1)
# #Title Distance
# df$dist_title = jaccard_distance(df, "title","title.1")
# #Year
# df$dist_year = year_distance(df)
# #Coauthors Distance (jaccard)
# df$dist_coauthor = jaccard_distance(df,"coauthors","coauthors.1")
# #Keyword Distance (cosine)
# df$dist_keyword = cosine_distance(df,"keyword","keyword.1")
# #Journal Distance
# df$dist_journal = cosine_distance(df,"journal","journal.1")
# #Institution Distance
# df$dist_institution = cosine_distance(df,"institution","institution.1")
#Prediction
#Load Models
library(caTools)
library(randomForest)
library(rpart)
library(rpart.plot)
library(e1071)
set.seed(123)
load(file = "AuthorForest.rda")
load(file = "AuthorSVM.rda")
load("AuthorGLM.rda")
load(file = "AuthorCTree.rda")
load(file = "AuthorCART.rda")
load(file = "AuthorNB.rda")
PredictForest = predict(AuthorForest , newdata = df)
PredictSVM = predict(AuthorSVM, newdata= df)
PredictGLM = predict(AuthorGLM, newdata = df, type='response')
PredictGLM = ifelse(PredictGLM > 0.5,1,0)
PredictCTree = predict(AuthorCTree, newdata = df)
PredictCART = predict(AuthorCART, newdata = df, type="class")
PredictNB = predict(AuthorNB, newdata = df)
#Ensemble
factorToNum = function(factor){as.numeric(as.character(factor))}
predictEnsemble = ifelse(((factorToNum(PredictForest) + factorToNum(PredictGLM) + 2*factorToNum(PredictNB) + 2*factorToNum(PredictCART)) / 5) >= .5 , 1,0)
df$label = predictEnsemble
titles = df %>% filter(label == 1) %>% select("Author"=author.1, "ID" = id.1 , "Title" =title.1)
titles
|
5a37073309de3ac3cc7fda1dc48697a38f1d1dd9
|
6708c52f2ce9f941e92d6d741dd9b4c9c06a3834
|
/session2/02_vectors_and_lists.R
|
f34202feb3c96a9a1612cfba2a5182834366c90a
|
[] |
no_license
|
zhiconghu/am10.mam2022
|
a30cf78f6d263c25fe8128fd2ce06e9fca4806ed
|
25c19ec4d2bed417cf782bc468d4bfb01e72f8b9
|
refs/heads/main
| 2023-08-27T12:47:08.929208
| 2021-11-10T13:36:50
| 2021-11-10T13:36:50
| 427,504,394
| 0
| 0
| null | 2021-11-12T21:47:04
| 2021-11-12T21:47:03
| null |
UTF-8
|
R
| false
| false
| 2,381
|
r
|
02_vectors_and_lists.R
|
library(tidyverse)
library(gapminder)
# Vectors - one dimensional, one type
my_colours <- c("grey80", "tomato")
# subset and get the first element from this character vector
my_colours[1]
# [1] "grey80"
gapminder %>%
mutate(rwanda = ifelse(country == "Rwanda", TRUE, FALSE)) %>%
ggplot(aes(x = year, y = lifeExp, colour = rwanda, group = country)) +
geom_line(
# the data is a function that takes the original df, and filter(x,!rwanda)
data = function(x) filter(x, !rwanda),
# all lines where rwanda==FALSE, will be coloured 'grey80'
colour = my_colours[1]
) +
geom_line(
data = function(x) filter(x, rwanda),
# all lines where rwanda==TRUE, will be coloured 'tomato' and be size=3
colour = my_colours[2],
size = 3
) +
theme_minimal()
# Lists - many dimensions, many types
fubar <- list(a = "Hello world", b = 1, c = TRUE, d = rnorm(100), e = mean)
glimpse(fubar)
# List of 5
# $ a: chr "hello"
# $ b: num 1
# $ c: logi TRUE
# $ d: num [1:100] 3.361 1.246 0.549 1.334 -1.274 ...
# $ e:function (x, ...)
skimr::skim(fubar)
# Error in as.data.frame.default(x[[i]], optional = TRUE) :
# cannot coerce class ‘"function"’ to a data.frame
grades <- list(
a = rnorm(20, mean = 78, sd = 8.3),
b = rnorm(15, mean = 87, sd = 5.6),
c = rnorm(18, mean = 82, sd = 6.7)
)
grades[1]
grades$a
grades[[1]]
class(grades[1])
class(grades$a)
class(grades[[1]])
# dataframes/tibbles are lists, so when you subset with [] you get a tibble
head(gapminder["pop"])
# when you subset with [[]] or $ you get a vector
head(gapminder[["pop"]])
head(gapminder$pop)
# vectorized operations work on vectors
sum(grades$a)
sum(grades[[1]])
# vectorized operations don't work on lists
sum(grades[1])
# on to to purrr:map()
library(purrr)
set.seed(42)
x_list <- list(x = rnorm(100), # generates 100 random numbers from N(0,1)
y = rnorm(100),
z = rnorm(100))
map(x_list, mean) # take the list and apply the function 'mean' to each item on list
# find the standard deviation (sd) for all numeric variables in gapminder
# using map() returns a list
gapminder %>%
dplyr::select_if(is.numeric) %>%
map(sd)
# find the standard deviation (sd) for all numeric variables in gapminder
# using map_dbl() returns a vector
gapminder %>%
dplyr::select_if(is.numeric) %>%
map_dbl(sd)
|
81093a963735701c2a5994a0578a78c5613a9535
|
73ef006799b04993516d7852a5bb7d177526d4d0
|
/4-results-discussion/data-n-analyses/results-analysis/R/scripts/4-ordination-classification-fitted-vars.R
|
a23254e9096758080d2b9b24eecf99e14aad9568
|
[] |
no_license
|
sklayn/phd
|
8c1bb1fb375b00c27b8cbe94da919ddc12bfd323
|
46438fd0dd33255828433dd5c2501563fc406d2b
|
refs/heads/master
| 2021-08-16T07:32:52.391780
| 2021-05-08T10:54:06
| 2021-05-08T10:54:06
| 61,437,700
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,614
|
r
|
4-ordination-classification-fitted-vars.R
|
#### Ordintaion (nMDS) on the abundance data ####
# set seed in case we have to rerun the computation
set.seed(1)
mds.sand <- metaMDS(num.zoo.abnd.sand)
# basic summary of the MDS
mds.sand
# diagnostic plots for the MDS
pdf(file = file.path(figs.dir, "explor_diagnostic-plots-mds_sand.pdf"),
paper = "a4r",
width = 12,
height = 12,
useDingbats = FALSE)
par(mfrow = c(1, 2))
# stressplot
stressplot(mds.sand)
# goodness-of-fit plot
# first plot the nMDS ordination with sites
plot(mds.sand, display = 'sites', type = 't', main = 'Goodness of fit')
# then, add the points with size reflecting goodness of fit (smaller = better fit)
points(mds.sand, display = 'sites', cex = goodness(mds.sand)*200)
dev.off()
par(mfrow = c(1, 1))
# save the MDS result
saveRDS(mds.sand, file = file.path(save.dir, "mds_sand.rds"))
# plot and save the MDS (ggplot2)
pdf(file = file.path(figs.dir, "explor_mds_sand.pdf"), useDingbats = FALSE)
plot_mds(mds.sand, factors.zoo.sand$stations)
dev.off()
#### Fit environmental variables to the ordination (datasets from environmental data script) ####
## envfit (vegan) on the (imputed) environmental data.
# use the long-term water column data - summarized by station
water.sand.by.st <- ddply(water.sand.imp.df[, !names(water.sand.imp.df) %in% c("year", "month")],
.(station),
colwise(mean, .cols = is.numeric))
# repeat each row - to match the number of replicate zoobenthic samples
# (unfortunately, hardcoded here and below for expediency)
water.sand.envfit <- water.sand.by.st[rep(seq_len(nrow(water.sand.by.st)), each = 9), ]
rownames(water.sand.envfit) <- 1:nrow(water.sand.envfit)
# for the other environmental parameters, summarize the imputed datasets by month
# and year (these were only measured in 2013-2014 anyway)
other.env.sand.by.st <- ddply(other.env.sand.imp.df,
.(station, year, month),
colwise(mean, .cols = is.numeric))
# repeat each row 3 times to match the number of replicate zoobenthic samples
other.env.sand.envfit <- other.env.sand.st[rep(seq_len(nrow(other.env.sand.st)), each = 3), ]
rownames(other.env.sand.envfit) <- 1:nrow(other.env.sand.envfit)
# quick workspace cleanup
rm(water.sand.by.st, other.env.sand.by.st)
# add the heavy metals
# import heavy metal data, if not already imported
# heavy.metals.sand <- read.csv(file.path(data.dir, "heavy-metals-sand.csv"), header = TRUE)
# repeat each row - to match the number of replicate zoobenthic samples
heavy.metals.sand.envfit <- heavy.metals.sand[rep(seq_len(nrow(heavy.metals.sand)), each = 9), ]
rownames(heavy.metals.sand.envfit) <- 1:nrow(heavy.metals.sand.envfit)
# remove the month and year from the heavy metals dataset (only measured once, so
# not relevant) + the stations - not needed for envfit
heavy.metals.sand.envfit <- heavy.metals.sand.envfit[, !names(heavy.metals.sand.envfit) %in% c("month", "year", "station")]
# join all 3 (water column, sediments and heavy metals) together
env.all.sand.envfit <- cbind(other.env.sand.envfit,
heavy.metals.sand.envfit,
water.sand.envfit[, !names(water.sand.envfit) == "station"])
## SAVE IN CASE WE NEED IT
saveRDS(env.all.sand.envfit, file = file.path(save.dir, "env-all-for-envfit_clean_sand.rds"))
# set seed if we have to repeat the calculation
set.seed(1)
envfit.mds.sand <- envfit(mds.sand,
env.all.sand.envfit[, !names(env.all.sand.envfit) %in% c("station", "month", "year")],
permutations = 999)
# apply Bonferroni correction for multiple testing to the p-values, because there is a large number
# of tested variables (calls custom function!).
envfit.mds.sand <- p_adjust_envfit(envfit.mds.sand)
## SAVE THE ENVFIT
saveRDS(envfit.mds.sand, file = file.path(save.dir, "envfit-mds_sand.rds"))
# extract significant variables & order by r2
envfit.sand.sign.vars <- extract_envfit_scores(envfit.mds.sand, p = 0.05, r2 = TRUE)
envfit.sand.sign.vars <- arrange(envfit.sand.sign.vars, pvals, desc(r2))
#### PLOT VARIABLES AS SURFACES OVERLAID ON THE MDS (ORDISURF) ####
# ordisurf needs the original numeric values of the environmental variables chosen
# for plotting (= the significant variables from envfit, p < 0.05)
sign.vars.sand.ordisurf <- subset(env.all.sand.envfit,
select = names(env.all.sand.envfit) %in% envfit.sand.sign.vars$vars)
dim(sign.vars.sand.ordisurf) # just to check that everything we need is there
# apply ordisurf sequentially to all significant environmental variables,
# which serves no purpose (get back a list of ordisurf objects where each element
# is an environmental variable)
ordisurf.list.all.sand <- apply(sign.vars.sand.ordisurf,
MARGIN = 2,
FUN = function(x) ordi <- ordisurf(mds.sand ~ x, plot = FALSE))
# check out the summaries of the fits
lapply(ordisurf.list.all.sand, summary)
## SAVE FITTED ORDISURFS AS WELL (in case)
saveRDS(ordisurf.list.all.sand, file = file.path(save.dir, "ordisurf-mds_sand.rds"))
# rearrange list to have the plots in the desired order.
# Here: first the sediment parameters, then the water column parameters, and
# finally the heavy metals.
names(ordisurf.list.all.sand)
ordisurf.list.all.sand <- ordisurf.list.all.sand[c("Ninorg", "Ntot", "NH4", "NO2", "PO4",
"O2.bottom", "O2.average", "Secchi.depth",
"seston", "dist.innermost", "depth", "sand",
"gravel", "sorting", "mean.grain.size",
"org.matter", "heavy.metals.noFe", "heavy.metals.all",
"Mn", "Pb", "Ni", "Cu")]
var.labels.sand <- c("N-inorganic", "N-total", "NH4", "NO2", "PO4",
"O2 bottom", "O2 average", "Secchi depth",
"seston", "distance to innermost station", "depth", "% sand",
"% gravel", "sorting", "mean grain size",
"organic matter", "heavy metals (no Fe)", "total heavy metals",
"Mn", "Pb", "Ni", "Cu")
# set the file name and properties for the output graph --> REDO TO FIT MULTIPLE PLOTS/PAGE!
pdf(file = file.path(figs.dir, "mds_ordisurf_sand_most_sign_vars.pdf"),
paper = "a4",
useDingbats = FALSE)
# plot all variables, using the custom plot_mds_ordisurf function, and adding the
# corresponding main title (variable name) on each subplot
mapply(function(m, n) {
plot_mds_ordisurf(mds.sand, m)
title(main = n, col.main = "grey28")
},
ordisurf.list.all.sand,
var.labels.sand)
dev.off()
# clean up workspace
rm(envfit.sand.sign.vars,
ordisurf.list.all.sand,
sign.vars.sand.ordisurf,
var.labels.sand,
heavy.metals.sand.envfit,
water.sand.envfit,
other.env.sand.envfit)
#### Classification of the communities ####
# dendrogram of dissimilarities between samples
set.seed(1)
dendr.sand <- hclust(vegdist(sqrt(num.zoo.abnd.sand), method = "bray"),
"average")
# add station names as labels
dendr.sand$labels <- factors.zoo.sand$stations
# plot and examine the dendrogram
pdf(file.path(figs.dir, "explor_dendrogram-sand.pdf"), useDingbats = FALSE)
plot(dendr.sand, hang = -1,
main = "", ylab = "Distance (Bray-Curtis)", xlab = "")
rect.hclust(dendr.sand, k = 4) # here, it appears there are about 4 distinct groups
dev.off()
gr.dendr.sand <- cutree(dendr.sand, k = 4) # or the tree can be cut at any given height, too
# reorder dendrogram by some variable (variables - same as aggregated data frame
# used for ordisurf & plotting over ordination)
## => plots can be redone - leaves colored by cluster; values of the variable used
## for rearranging the tree plotted below as a colored bar..
dendr.sand.by.O2 <- with(sign.vars.mean, reorder(dendr.sand, O2.bottom))
pdf(file.path(figs.dir, "explor_dendrogram-by-O2_sand.pdf"), useDingbats = FALSE)
plot(dendr.sand.by.O2, hang = -1,
main = "O2 bottom", xlab = "", ylab = "Distance (Bray-Curtis)")
rect.hclust(dendr.sand.by.O2, k = 4)
dev.off()
# numerical analysis of the grouping
anova(lm(O2.bottom ~ gr.dendr.sand, data = sign.vars.mean))
#### ANOSIM - groups = stations ####
## This is a non-parametric permutation procedure applied to the rank (similarity)
## matrix underlying the ordination or classification of the samples. R statistic:
## -1 to 1; 1 = all replicates within sites are more similar to each other than
## to any other replicate from a different site; 0 = H0 is true (the same average
## similarities between and within sites). Usually 0 < R < 1 => some degree of
## difference observed between sites.
## Best course of analysis: 1) global ANOSIM - overall difference between groups;
## if significant - 2) where does the main between-group difference come from?
## => examine R values for each pairwise comparison: large = complete separation,
## small - little or no difference.
anosim.sand <- anosim(vegdist(sqrt(num.zoo.abnd.sand), method = "bray"),
grouping = factors.zoo.sand$stations)
anosim.sand
## import and reclassify environmental variables to use for grouping - can be used
## to colour/order classification or ordination, too
env.qualit <- read.csv(file.path(data.dir, "env-qualit-vars_sand.csv"), header = TRUE)
str(env.qualit)
names(env.qualit)
# copy each row (to match replicates in the zoo abundancee date frame)
env.qualit <- env.qualit[rep(seq_len(nrow(env.qualit)), each=3),]
rownames(env.qualit) <- 1:nrow(env.qualit)
## repeat the ANOSIM using these new groups
anosim.sand2 <- apply(env.qualit, 2, function(x)
anos <- anosim(vegdist(sqrt(num.zoo.abnd.sand), method = "bray"),
grouping = x))
anosim.sand2
#### SIMPER to id the species with the highest contribution to the differences between groups ####
## Good discriminating species - high contribution + small sd.
simper.sand <- simper(sqrt(num.zoo.abnd.sand),
group = factors.zoo.sand$stations)
summary(simper.sand, ordered = TRUE)
simper.sand2 <- apply(env.qualit, 2, function(x) simp <- simper(sqrt(num.zoo.abnd.sand),
group = x))
|
436591647b0ad168f68d580e1fc42219c2d34234
|
80c6644d745190dd3702b5668ef5eae423a27859
|
/funcion_hipsometria.R
|
46acf3574f91f2c49ed5df1554d39072638358e9
|
[] |
no_license
|
fmanquehual/proyecto_agua_en_R
|
356bfdf9c4156587a2ccfd3addaadfd634aed537
|
e4a51c5192f174708a982b62a42626fff51078a1
|
refs/heads/master
| 2023-01-14T13:22:57.448036
| 2020-11-13T05:15:59
| 2020-11-13T05:15:59
| 300,746,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,063
|
r
|
funcion_hipsometria.R
|
hipsometria <- function (x, absolute = TRUE, main = "", col = "blue", AddDen =T, colDen="red", ...)
{
require(raster)
require(rgdal)
z <- getValues(x)
zmin <- minValue(x)
zmax <- maxValue(x)
areaCN <- length(which(!is.na(z))) * xres(x) * yres(x) #Area of catchment
sfun <- plot.stepfun(ecdf(z))
if(any(which(sfun$y==0.5))){
zidmean <- which(sfun$y==0.5)[1]
zmean <- sfun$t[zidmean]}
x2id <- which(sfun$y>0.5)[1]
x1id <- which(sfun$y<0.5)[length(which(sfun$y<0.5))]
x2 <- sfun$y[x2id]
x1 <- sfun$y[x1id]
y1 <- sfun$t[x1id]
y2 <- sfun$t[x2id]
zmean <- (0.5 - x1) * (y2 - y1) / (x2 - x1) + y1
relative.area <- (1 - sfun$y[-1])
relative.elev <- (sfun$t[-c(1, length(sfun$t))] - zmin)/(zmax - zmin)
f <- splinefun(relative.area, relative.elev, method = "fmm")
integral <- integrate(f = f, lower = 0, upper = 1)
absolute.area <- (areaCN - sfun$y * areaCN )[-1]/1e+6
absolute.elev <- sfun$t[-c(1, length(sfun$t))]
if(absolute == TRUE){
if(AddDen == T){
plot(absolute.area, absolute.elev, main = main, type = "l", col = col, ...)
xyd <- density(getValues(x),na.rm=T,from=minValue(x),to=maxValue(x))
cte <- areaCN/max(xyd$y)
xden <- (xyd$y*cte) / 1e+6
yden <- xyd$x
lines(xden,yden,ty="l",col=colDen)
axis(3, at= pretty(xden) ,labels=format(pretty(xden)/cte * 1e+6, digits =2 ))
mtext("Densidad",side=3,line=2)
legend("topright", bty = "n",
c("Curva hipsométrica", "Función densidad"),
lty=c(1,1),col=c(col,colDen),xjust = 1, yjust = 1,cex=0.8)
legend("topleft", bty = "n", c(#paste("Elevación media:",round(zmean,2), "[m.s.n.m]"),
# paste("Elevación máxima:", round(zmax,2), "[m.s.n.m]"),
# paste("Elevación mínima:", round(zmin,2), "[m.s.n.m]"),
# paste("Área:", round(areaCN/1e+6,2),"[km2]"),
paste("Integral:",round(integral$value,3)),
paste("Error:",round(integral$abs.error,3))),cex=.8)
} else{
plot(absolute.area, absolute.elev, main = main, type = "l", col = col, ...)
legend("bottomleft", bty = "n", c(#paste("Elevación media:",round(zmean,2), "[m.s.n.m]"),
# paste("Elevación máxima:", round(zmax,2), "[m.s.n.m]"),
# paste("Elevación mínima:", round(zmin,2), "[m.s.n.m]"),
# paste("Área:", round(areaCN/1e+6,2),"[km2]"),
paste("Integral:",round(integral$value,3)),
paste("Error:",round(integral$abs.error,3))),cex=.8)}
} else {
if(AddDen == T){
plot(relative.area, relative.elev, main = main, type = "l", col = col, ...)
xyd <- density(getValues(x),na.rm=T)
cte <- 1/max(xyd$y)
xden <- (xyd$y*cte)
yden <- (xyd$x-xyd$x[1])/(xyd$x[length(xyd$x)]-xyd$x[1])
lines(xden,yden,ty="l",col=colDen)
axis(3, at= pretty(xden) ,labels=format(pretty(xden)/cte, digits =2 ))
mtext("Densidad",side=3,line=2)
legend("bottomleft", bty = "n",
c("Curva hipsométrica", "Función densidad"),
lty=c(1,1),col=c(col,colDen),xjust = 1, yjust = 1,cex=0.8)
legend("topright", bty = "n", c(#paste("Elevación media:",round(zmean,2), "[m.s.n.m]"),
# paste("Elevación máxima:", round(zmax,2), "[m.s.n.m]"),
# paste("Elevación mínima:", round(zmin,2), "[m.s.n.m]"),
# paste("Área:", round(areaCN/1e+6,2),"[km2]"),
paste("Integral:",round(integral$value,3)),
paste("Error:",round(integral$abs.error,3))),cex=.8)
} else{
plot(relative.area, relative.elev, main = main, type = "l", col = col, ...)
legend("bottomleft", bty = "n", c(#paste("Elevación media:",round(zmean,2), "[m.s.n.m]"),
# paste("Elevación máxima:", round(zmax,2), "[m.s.n.m]"),
# paste("Elevación mínima:", round(zmin,2), "[m.s.n.m]"),
# paste("Área:", round(areaCN/1e+6,2),"[km2]"),
paste("Integral:",round(integral$value,3)),
paste("Error:",round(integral$abs.error,3))),cex=.8)}
}
resultados <- c("Elev Media"=zmean, "Elev Max" = zmax, "Elev Min" = zmin,
"Area"=areaCN, "Integral"=integral$value, "Error"=integral$abs.error )
resultados
}
# fuente: https://rpubs.com/joeantonio/71409
# - https://rstudio-pubs-static.s3.amazonaws.com/71408_b030e34a487f46d4ac1a0e5ecf67f5d5.html
|
4f5d26569fdb99d7d088644f16a445990641f140
|
f0ae1581d7b319f995c1caef0adfc2d14d1071e5
|
/CpGisl.level.info.R
|
2e5f87295b83cb1557a8e28f79e55f62f4a9e34a
|
[] |
no_license
|
gluebeck/Scope-of-methylomic-drift-in-BE
|
2391d7e43352f716164a08b5e8a565efa7bf9b7d
|
07ac9e6ed587250575a2a40fe5c1b3423a33bcd4
|
refs/heads/master
| 2021-01-17T18:06:07.222666
| 2017-03-10T01:08:57
| 2017-03-10T01:08:57
| 70,959,079
| 1
| 3
| null | 2017-03-09T22:22:33
| 2016-10-15T01:39:20
|
R
|
UTF-8
|
R
| false
| false
| 2,673
|
r
|
CpGisl.level.info.R
|
CpGisl.level.info = function(set = ILS.hypo.drift5, isls = Islands.hypo.drift5,
cpgs = CpGs.hypo.drift5.isl, dat, full = T, prom = T) {
# input set: island names of interest
# input isls and cpgs: vectors of the same length with CpG names and associated island names
# input data: methylation data
# input str1 and str2: optional, to constrain selection of CpGs for mean calculation
len = length(set) # number of Islands
Mvals = sd.Mvals = matrix(0,ncol=ncol(dat),nrow=len)
bslope = sd.bslope = numeric()
# requires input: aux1 = slope.ccf, aux2 = CpGs.ccf
genes = list()
islands = vector()
Prom = rep(T,len)
ii = 1
for (i in 1:len) {
if(full==T) {
cpgsi = manifestData$Name[manifestData$Islands_Name == set[i]] # cpgs on one of the islands
} else {
cpgsi = cpgs[isls == set[i]] # cpgs on one of the islands
}
# remove shelves !!! already checked
dum = manifestData[cpgsi,"Relation_to_Island"]
cpgsi = cpgsi[dum!="N_Shelf" & dum!="S_Shelf"] # & dum!="N_Shore" & dum!="S_Shore"]
# check exclusions (only informative islands etc. )
dum = manifestData[cpgsi,"UCSC_RefGene_Group"]
tmp = cpgsi[grepl("TSS",dum)] # & !grepl("Body",dum) & !grepl("3'UTR",dum)]
if(prom == T) {
if(length(tmp)==0) {next} else {cpgsi = tmp}
}
if(length(tmp)==0) Prom[ii]=FALSE
# see which cpgs are in dat, if any ....
idum = na.omit(match(cpgsi,rownames(dat)))
len.idum = length(idum)
if(len.idum == 0) {next}
if(len.idum ==1) {
Mvals[ii,]=dat[idum,]; sd.Mvals[ii,]=0
} else {
Mvals[ii,] = apply(dat[idum,],2,mean,na.rm=T); sd.Mvals[ii,] = apply(dat[idum,],2,sd,na.rm=T)
}
## get b-slope from CCF data
idum = na.omit(match(cpgsi,CpGs.ccf))
bslope[ii] = mean(slope.ccf[idum])
# idum = na.omit(match(cpgsi,CpGs))
# bslope[ii] = mean(ancova.BE$rate[idum])
# sd.bslope[ii] = sd(aux1[idum])
# get gene name for island
genes[[ii]] = dum = unique(unlist(strsplit(manifestData[cpgsi,"UCSC_RefGene_Name"],split=';')))
islands[ii] = set[i]
ii = ii+1
}
rownames(Mvals) = rownames(sd.Mvals) = set
colnames(Mvals) = colnames(sd.Mvals) = colnames(dat)
Mvals = Mvals[1:(ii-1),]; sd.Mvals = sd.Mvals[1:(ii-1),]
bslope = bslope[1:(ii-1)]
tmp = lapply(genes,strsplit,';')
tmp = lapply(tmp,unlist)
genes = lapply(tmp,unique)
Prom = Prom[1:(ii-1)]
# return(list(Mvals=Mvals, sd.Mvals=sd.Mvals, bslope=bslope, sd.bslope=sd.bslope, genes=genes))
return(list(Mvals=Mvals, sd.Mvals=sd.Mvals, genes=genes, islands=islands, prom=Prom, bslope=bslope))
}
|
617907e4c639ea457c83661d84129794ff9ed666
|
948209c5a164e300d001bf5e5fb4d94e158c5759
|
/SSI Modeling.R
|
970cf5466b96f7150fae88cff2c28652dffc4fee
|
[
"MIT"
] |
permissive
|
samirsafwan/Predicting-Balloon-Altitude-Using-ML
|
64ee9a277713588e5d3191dfe23919ec2185a578
|
ef0089c8e6ce6f6186470119b85c95b78945c83d
|
refs/heads/master
| 2021-05-08T03:05:22.283364
| 2017-11-02T01:31:06
| 2017-11-02T01:31:06
| 108,226,273
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,220
|
r
|
SSI Modeling.R
|
setwd("~/Downloads")
data=read.csv("ssi54-3.csv")
# GBM
library(gbm)
set.seed(1)
boost.mse=rep(0,19)
predicted.trajectory=vector()
for (i in 1:19) {
sample=data[1:(1396*i),]
test=data[(1396*i+1):(1396*(i+1)),]
boost.fit=gbm(altitude~., data=sample, distribution='gaussian', n.trees=2000, shrinkage=0.1, cv.folds=5)
boost.pred=predict(boost.fit, test, n.trees=gbm.perf(boost.fit, method='cv'))
predicted.trajectory=c(predicted.trajectory, boost.pred)
boost.mse[i]=mean((boost.pred-test$altitude)^2)
}
boost.mse # mean mse = 414,710
plot(data$time, data$altitude, type='l')
lines(data[1397:27920,]$time, predicted.trajectory, type='l', col='red')
for (i in 1:18) {
abline(v=data[1396*i:1396*i,]$time, col='gray')
}
# Without temp
library(gbm)
set.seed(1)
boost.mse2=rep(0,19)
predicted.trajectory2=vector()
for (i in 1:19) {
sample=data[1:(1396*i),]
test=data[(1396*i+1):(1396*(i+1)),]
boost.fit=gbm(altitude~.-temperature, data=sample, distribution='gaussian', n.trees=2000, shrinkage=0.1, cv.folds=5)
boost.pred=predict(boost.fit, test, n.trees=gbm.perf(boost.fit, method='cv'))
predicted.trajectory2=c(predicted.trajectory2, boost.pred)
boost.mse2[i]=mean((boost.pred-test$altitude)^2)
}
boost.mse2 # mean mse = 3,415,947
plot(data$time, data$altitude, type='l')
lines(data[1397:27920,]$time, predicted.trajectory2, type='l', col='red')
for (i in 1:18) {
abline(v=data[1396*i:1396*i,]$time, col='gray')
}
# Lasso
library(glmnet)
set.seed(1)
lasso.mse=rep(0,19)
for (i in 1:19) {
sampleX=as.matrix(data[1:(1396*i),1:12])
sampleY=as.matrix(data[1:(1396*i),]$altitude)
testX=as.matrix(data[(1396*i+1):(1396*(i+1)),1:12])
lasso.fit=cv.glmnet(sampleX, sampleY, alpha=1)
lasso.pred=predict(lasso.fit, s=lasso.fit$lambda.min, newx=testX)
lasso.mse[i]=mean((lasso.pred-data[(1396*i+1):(1396*(i+1)),]$altitude)^2)
}
lasso.mse # mean mse = 3,415,296
# GAM with Splines
library(splines)
library(gam)
set.seed(1)
gam.mse=rep(0,19)
for (i in 1:19) {
sample=data[1:(1396*i),]
test=data[(1396*i+1):(1396*(i+1)),]
gam.fit=gam(altitude~s(time)+s(temperature), data=sample)
gam.pred=predict(gam.fit, test)
gam.mse[i]=mean((gam.pred-test$altitude)^2)
}
gam.mse # mean mse = 718,245
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.