blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c37e25f6040ff163f54141a99a6b3ea14ff50427
|
384de1d56a2b831e905712680e8a9ed70411051d
|
/binom.R
|
99cb5d961f9676c68bafbc642f030594f833ba85
|
[] |
no_license
|
grenaud/GenStat
|
facb409b8ea85e26185c8fc74b011107c7900f90
|
69d1a2c9f2f4914fa147153168122af76ec5a075
|
refs/heads/master
| 2016-09-06T11:16:41.145242
| 2014-05-15T20:40:31
| 2014-05-15T20:40:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 211
|
r
|
binom.R
|
#!/usr/bin/env Rscript
library(boot)
args=(commandArgs(TRUE))
success<-strtoi(args[1]);
total<-(strtoi(args[1])+strtoi(args[2]));
b<-binom.test(success,total);
cat(b$conf[1],"\n");
cat(b$conf[2],"\n");
|
b19b2ad8b7bddca7ac5f62620dc652ae15935bcf
|
5f9998837e6d5811fb1679189dee7832eb1fc2ca
|
/man/make_filename.Rd
|
ae1f3389045cb44b7540acda6ace78e1479df3ce
|
[] |
no_license
|
TRSperzel/Coursera-R-Package
|
47c79b6218edbe6416771022a30dc8f4d3e60c29
|
d950a45393e8ba812b5d08a1c0cabdc413a300d2
|
refs/heads/master
| 2021-05-04T12:59:29.864703
| 2018-02-07T14:24:44
| 2018-02-07T14:24:44
| 120,305,408
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,022
|
rd
|
make_filename.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_data.R
\name{make_filename}
\alias{make_filename}
\title{Make FARS Filename.}
\usage{
make_filename(year)
}
\arguments{
\item{year}{An integer, or a string or numeric that can be coerced to a string,
of the year of interest.}
}
\value{
this function returns a string that is the proper FARS data
filename for the given year. Will return a filename with NA for the
year slot in the name if the year parameter cannot be coerced to an
integer.
}
\description{
\code{make_filename} makes a properly formatted FARS filename given a year
as input.
}
\details{
This function takes a year as input and produces a valid FARS filename. This
function is "dumb" in that it will not check whether the file
actually exists, or if the year is reasonable -- you could enter any number
you like.
}
\examples{
make_filename(year = '2013')
make_filename(year = 2013)
\dontrun{
make_filename(year = 'two thousand thirteen') # error
}
}
|
561688f0bfae6c21d2775fdec2d0da1100796cac
|
2d5a117e58cd5c1c8e7dddb19aaf8fa1a73564fd
|
/R/tps.R
|
954ee0fe38f8152b011813fa56c6676d6d92e9ca
|
[] |
no_license
|
cran/funfits
|
1a04ea02b6c072bcab78850e5509d9446066ea49
|
a9d82048b8c59ae55a0bdac71e0fd41113f07518
|
refs/heads/master
| 2020-05-19T23:34:36.834062
| 1977-08-08T00:00:00
| 1977-08-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,659
|
r
|
tps.R
|
"tps"<-
function(x, y, lambda = NA, df = NA, cost = 1, knots, weights = rep(1, length(y
)), m, power, scale.type = "unit.sd", x.center, x.scale,
return.matrices = T, nstep.cv = 80, method = "GCV", rmse = NA,
link.matrix = NA, verbose = F, subset = NULL, tol = 0.0001,
print.warning = T, yname = NULL)
{
out <- list()
out$tag <- 1
class(out) <- c("tps", "funfits")
out$call <- match.call() ##
## S function to find minizier of
## || y- Xb||^2 + lambda b^T H b where H is a nonnegative definite
## matrix
## Solution for b is b= (X^T*X + lambda*H)^(-1) X^T*Y
## the H matrix is consructed to be the thin plate spline roughness
## matrix. (If the power =2m-d)
##
## First set up some constants
## and some values in the output list
##
x <- as.matrix(x)
y <- c(y) # make sure y is a vector!
if(!is.null(subset)) {
x <- x[subset, ]
y <- y[subset]
out$subset <- paste(substitute(subset))
}
out$x <- x
out$y <- y
N <- length(y)
out$N <- N
lambda.est <- NA
d <- ncol(x) ##
## make sure that 2m-d > 0
##
out$form <- T
with.constant <- T ## refers to weird constant for radial basis
if(missing(m)) {
m <- max(2, ceiling(d/2 + 0.10000000000000001))
}
if(missing(power)) {
power <- 2 * m - d
if(power < 1) {
power <- 1
out$form <- F
if(print.warning)
cat("Warning: Model is not a true thin plate spline",
fill = T)
}
}
## If not a true tps then do not find the weird constant for the basis
## functions
if(2 * m - d <= 0) {
with.constant <- F
}
if(2 * m - d != power) {
with.constant <- F
}
out$cost <- cost
out$m <- m
out$with.constant <- with.constant
out$trace <- NA
if(is.null(yname))
out$yname <- as.character(paste(substitute(y), collapse = ""))
else out$yname <- yname
out$weights <- weights ##
## Now find the estimate of sigma based on replicated points if this
## makes sense
rep.info <- cat.matrix(x) ## integer tags to indicate replications
if(verbose)
print(rep.info)
if(max(rep.info) == N | !is.na(link.matrix[1])) {
shat.rep <- NA
shat.pure.error <- NA
}
else {
##
## do a simple 1-way ANOVA to get the rep error
##
shat.pure.error <- sqrt(fast.1way(rep.info, y, weights)$MSE)
shat.rep <- shat.pure.error
out$shat.pure.error <- shat.pure.error
}
out$shat.rep <- shat.rep
out$shat.pure.error <- shat.pure.error
if(missing(knots))
knots <- x[!dup(rep.info), ]
knots <- as.matrix(knots)
out$knots <- knots ##
##
## scale the X's
x <- transformx(x, scale.type, x.center, x.scale)
transform <- attributes(x)
out$transform <- transform ## scale the knots int eh same way
knots <- scale(knots, center = transform$x.center, scale = transform$
x.scale) ##
####################### NOTE #############################
############ both the x and the knots must be scaled ################
################################################
##
just.solve <- (lambda[1] == 0)
if(is.na(just.solve))
just.solve <- F
out$power <- power ## make up the T and K matrices
## find the QR decopmposition of T matrix that spans null space with
## respect to the knots
qr.T <- qr(make.tmatrix(knots, m))
tmat <- make.tmatrix(x, m)
out$ptab <- attributes(tmat)$ptab
X <- cbind(tmat, qr.yq2(qr.T, make.rb(x, knots, power, with.constant =
with.constant)))
if(verbose) print(dim(X))
## transform the X evalution matrix by a linear transformation if
## the link matrix has been passed
##
if(!is.na(link.matrix[1])) X <- link.matrix %*% X ##
np <- ncol(X) ## the number of parameters
nr <- nrow(X)
N <- nr
nt <- qr.T$rank ## number of para. in NULL space
nk <- np - nt
out$np <- np
out$nt <- nt
## construct the roughness penalty matrix using radial basis
##functions and Qr decomposition of T
##
H <- matrix(0, ncol = np, nrow = np)
temp <- qr.yq2(qr.T, make.rb(knots, knots, power, with.constant =
with.constant))
temp <- qr.q2ty(qr.T, temp)
H[(nt + 1):np, (nt + 1):np] <- temp ##
## if lambda = 0 then just solve the system
if(just.solve) {
#
## just find the least squares fit using radial basis functions or
## the interpolation if knots are missing.
##
out$method <- "interpolation"
omega <- qr.coef(qr(X), y)
}
else {
##
## do all the heavy decompositions if lambda is not = 0
## or if it is omitted
##
##
## inverse symetric square root of X^T W X
##
temp <- svd(sqrt(weights) * X)[c("v", "d")] ##
if(max(temp$d)/min(temp$d) > 10000000000) {
if(verbose)
print(temp$d)
print("Must use a reduced set of\nknots because the radial basis functions are close to being singular"
)
out <- NULL
return(out)
}
##
##
B <- temp$v %*% diag(1/(temp$d)) %*% t(temp$v) ##
## eigenvalue eigenvector decomposition of BHB
##
temp <- svd(B %*% H %*% B)
U <- temp$u
D <- temp$d
if(verbose) print(D)
## We know that H has at least nt zero singular values ( see how H is
## filled)
## So make these identically zero.
## the singular values are returned from largest to smallest.
##
D[(1:nt) + (np - nt)] <- 0
G <- B %*% U ##
## with these these decompositions it now follows that
## b= B*U( I + lambda*D)^(-1) U^T * B * X^T*Y
## = G*( I + lambda*D)^(-1) G^T* X^T*Y
##
##
u <- t(X %*% G) %*% (y * weights) ##
## find the (weighted) pure error sums of squares by calculating
## predcited values when lambda=0
##
temp1 <- (X %*% G) %*% u
out$pure.ss <- sum(weights * (y - X %*% G %*% u)^2)
out$matrices <- list(D = D, G = G, u = u, qr.T = qr.T) ##
## find some estimates of lambda
##
gcv.out <- gcv.tps(out, cost = cost, nstep.cv = nstep.cv, rmse
= rmse, verbose = verbose, tol = tol) ##
out$gcv.grid <- gcv.out$gcv.grid ##
##
lambda.est <- gcv.out$lambda.est ##
if(verbose) print(lambda.est) ##
## find the one specified by the method but first fill in a
## possible user supplied value
##
##
if(!missing(lambda) | !missing(df)) {
method <- "user"
## is the df is supplied then find the corresponding lambda
if(!is.na(df)) {
lambda <- tps.df.to.lambda(df, D)
}
temp <- c(lambda, NA, NA, NA)
lab <- c(dimnames(lambda.est)[[1]], "user")
lambda.est <- rbind(lambda.est, temp)
row.names(lambda.est) <- lab
}
## find the best one.
##
lambda.best <- lambda.est[method, "lambda"]
if(verbose) print(lambda.best) ##
## To solve for the coefficients, recall: omega= G*( I + lambda*D)^(-1)*u
## predicted values are X%*% omega
omega <- G %*% ((1/(1 + lambda.best * D)) * u)
}
if(!just.solve) {
out$eff.df <- sum(1/(1 + lambda.best * D))
out$trA2 <- sum(1/(1 + lambda.best * D)^2)
temp <- X %*% out$matrices$G %*% sqrt(diag(1/(1 + lambda.best *
out$matrices$D)))
diagA <- c((temp^2) %*% rep(1, ncol(X))) * out$weights
out$diagA <- diagA
}
if(just.solve)
out$eff.df <- out$np
out$fitted.values <- c(X %*% omega)
out$residuals <- y - c(X %*% omega)
out$trace <- out$eff.df ##
if(verbose)
print(out$eff.df)
if(just.solve) {
out$lambda <- lambda
out$gcv.grid <- matrix(c(lambda, rep(NA, 4)), nrow = 1)
}
else {
out$lambda <- lambda.best
out$method <- method
}
out$best.model <- out$lambda
out$omega <- omega
out$d <- omega[1:nt]
## transform the omegas associated with the radial basis functions back
## into the c parameter vector.
##
##
temp <- omega
temp[1:nt] <- 0
out$c <- c(qr.qy(qr.T, temp))
out$coefficients <- c(omega[1:nt], out$c)
out$just.solve <- just.solve
res.df <- (N - out$trace) ##
##
## find an estimate of the residual standard deviation
## based on fitted spline
if(res.df > 0) {
out$GCV <- (sum(out$residuals^2 * weights)/N)/(1 - out$eff.df/N
)^2
out$shat <- sqrt(sum(out$residuals^2 * weights)/(res.df))
if(method == "user") {
## fill in the info for the lambda.est data frame
## for the user supplied value of lambda
lambda.est["user", 2] <- out$eff.df
lambda.est["user", 3] <- out$GCV
lambda.est["user", 4] <- out$shat
}
}
else {
out$shat <- 0
out$GCV <- NA
}
if(verbose) {
print("shat")
print(out$shat)
}
if(is.na(link.matrix[1])) {
r.square <- cor(out$fitted.values * sqrt(out$weights), (out$y) *
sqrt(out$weights))^2
out$r.square <- r.square ## calculate the q2 value
if(!just.solve) {
#
# don't do this if interplotating
#
cv.res <- out$residuals/(1 - diagA)
press <- sum((cv.res)^2)
rmse.press <- (press/length(cv.res))^0.5
ss.tot <- sum((y - mean(y))^2)
q2 <- (ss.tot - press)/ss.tot
out$q2 <- q2
out$press <- press
out$rmse.press <- rmse.press
}
}
else {
out$press <- NA
out$rmse.press <- NA
out$q2 <- NA
}
out$lambda.est <- lambda.est
out$best.model <- out$lambda #
#zap matrices if no return
#
if(!return.matrices)
out$matrices <- NA
out
}
|
9373e4d49fa3c22aa2a9c5b61d21a98cdfcc4c50
|
96b8bb4a1daa0ce00e4fdc8d0609d7df5f99f365
|
/03_Components/TableOfEachPrefectures/TableOfEachPrefectures.ui.R
|
b3ba3fbe96e7277240a44faa71b2fc3d46e932b8
|
[
"MIT"
] |
permissive
|
swsoyee/2019-ncov-japan
|
d7b8afab33c3a2869e1385e56239bb57efc0314d
|
c66a17ce4a40795f225cf02f0437906027a13e3e
|
refs/heads/master
| 2023-08-26T03:11:14.981392
| 2022-12-17T06:29:42
| 2022-12-17T06:29:42
| 237,152,814
| 433
| 104
|
MIT
| 2022-12-17T06:29:42
| 2020-01-30T06:26:57
|
R
|
UTF-8
|
R
| false
| false
| 489
|
r
|
TableOfEachPrefectures.ui.R
|
fluidRow(
box(
title = tagList(
icon("table"),
i18n$t("各都道府県の状況")
),
width = 12,
sidebar = boxSidebar(
id = "tableOfEachPrefecturesBoxSidebar",
width = 25,
materialSwitch(
inputId = "tableGrouping",
label = tagList(icon("object-group"), i18n$t("グルーピング表示")),
status = "danger",
value = TRUE
)
),
dataTableOutput("TableOfEachPrefectures") %>% withSpinner()
)
)
|
a1330cda55e178dd915b0d312a9aa1b5102ae3bf
|
c76871b52892f19035cc793514e0efc16e3d20f2
|
/basics/Help_Training.R
|
093108b10b105346ce4d2156f18274e05677aa94
|
[] |
no_license
|
ptorres001/r_language
|
c8db51af9380fb5c8a70044508132ea152e7ab94
|
d95e0cf04dfef21a488b81bfdc34c65ea6afcf27
|
refs/heads/main
| 2023-02-22T17:05:24.971203
| 2021-01-26T16:40:01
| 2021-01-26T16:40:01
| 322,654,805
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 806
|
r
|
Help_Training.R
|
# Getting Help in R
# you can documentation by using function help
help('vector')
??vector
help.search('vector')
# Basics Training
# 1. What is 2 to the power of 5?
2**5 # = 32
# 2. Create a vector called stock.prices with 23,27,23,21,34
stock.prices = c(23,27,23,21,34)
# 3. Assign names to the stock prices
days = c('Mon','Tues','Wed','Thurs','Fri')
names(stock.prices) = c(days)
stock.prices # all entries are now associated with their day
# 4. Average stock price
mean(stock.prices) # = 25.6
# 5. Create vector with logicals showing days prices were over 23
over.23 = stock.prices>23
# 6. Use over.23 to filter out days less than 23
high.prices = stock.prices[over.23]
# 7. Use function to find highest day
high.day = stock.prices == max(stock.prices)
week.high = stock.prices[high.day]
|
b70e7db8f5f8eaf2081abac9a0ed71c6cf32b2ff
|
7e2195da126a5d1dac46025c3e912b546298241c
|
/G41_HW1/hw1.R
|
0c388f5b316b7b12f75a8afece81e0b583ebfda5
|
[] |
no_license
|
RJAIN-27/ALDA_CSC522_HW1
|
65f29974bbc0b2fb5a3d0efeb13c34db99c299c4
|
28743d596783ecb2d5aa54f82e95a657535cf09f
|
refs/heads/master
| 2022-06-25T09:02:44.665801
| 2020-05-07T05:29:45
| 2020-05-07T05:29:45
| 261,954,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,109
|
r
|
hw1.R
|
###########
# HW1
# Mention your team details here
# Srujana Rachakonda (srachak)
# Rajshree Jain (rjain27)
#
############
# You may use the following librarie(s):
require(plyr)
# If you get an error when running these lines,
# make sure to install the respective libraries
# read data matrix
read_data <- function(path = "./iris.csv") {
# Note 1: DO NOT change the function arguments.
# Input: path: type: string, output: a matrix containing data from iris.csv
# Write code here to read the csv file as a data frame and return it.
return(read.csv(path))
}
# Part 1: Distance Measurement
calculate_euclidean <- function(p, q) {
# Input: p, q are numeric vectors of the same length
# output: a single value of type double, containing the euclidean distance between p and q.
combined_matrix <- rbind(p,q)
euclidean_distance <- dist(combined_matrix, method="euclidean")
return(euclidean_distance)
}
calculate_cosine <- function(p, q) {
# Input: p, q are numeric vectors of the same length
# output: a single value of type double, containing the cosine distance between p and q.
dot_product <- sqrt(sum(p^2)) * sqrt(sum(q^2))
cosine_distance <- sum(p*q) / dot_product
return(cosine_distance)
}
calculate_l_inf <- function(p, q) {
# Input: p, q are numeric vectors of the same length
# output: a single value of type double, containing the l_inf distance between p and q.
return(max(abs(p-q)))
}
# Part 2: principal Component Analysis
principal_component_analysis <- function(data, n){
# Input: data: the Iris dataframe, with 4 numeric attributes and a 5th nominal class variable
# n: the number of the principle component to calculate (e.g. 1 for first principal component)
# output: a 1 x 4 vector of type double, containing the weights (eigenvector) of the
# nth principal component of the dataset.
data <- subset(data, select = -c(5))
result <- prcomp(data, scale = FALSE)
output_vector <- as.numeric(result$rotation[,n])
return(output_vector)
}
principal_component_calculation <- function(p, component_weights){
# Input: p is a numeric vector of of length n, e.g. representing a row from the original dataset.
# component_weights is a vector length n, containing the weights of a principal component
# (e.g. the output from running principal_component_analysis)
# Output: a single value of type double, containing the first principal component value of the sample.
return(sum(p * component_weights))
}
pc1_distance <- function(p, q, component_weights) {
# Input: p, q are numeric vectors of of length n, e.g. representing rows from the original dataset.
# component_weights is a vector length n, containing the weights of a principal component
# (e.g. the output from running principal_component_analysis)
# output: a single value of type double, containing the distance between p and q, projected onto
# the first principal component (i.e. |PC1_p - PC1_q|)
return(abs(principal_component_calculation(p,component_weights) - principal_component_calculation(q,component_weights)))
}
|
119b41c76575e794ed6423842af4caf8e2fdf5cd
|
6330d1c33fb6688e3f74b7ea5aa0c377bd852090
|
/plot2.R
|
2103562d49966c7bec289c012a0e0ee5f832c42a
|
[] |
no_license
|
d4ndo/ExData_Plotting1
|
8292ffe536938161ed67bc2409e7fcffa96a022c
|
14132783eff9cfd7888804e83adf8a1151e361da
|
refs/heads/master
| 2021-01-15T13:30:14.338543
| 2014-12-07T21:07:45
| 2014-12-07T21:07:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,525
|
r
|
plot2.R
|
# Usage:
#
# To download the dataset to your working directory call the function:
# getDataSet()
#
# plot2("household_power_consumption.txt")
plot2 <- function(file) {
stopifnot(is.character(file))
if(!file.exists(file)) {
stop("File not found. PLease call the function getDataSet() to download
the Dataset house_power_consumption.zip or alternative download it from here
https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
and try again.")
}
#Set locale to US
Sys.setlocale('LC_TIME', 'C')
power_consumption <- read.csv(file, sep = ";",
na.strings = "?",
colClasses = c("character", "character",
"numeric", "numeric",
"numeric", "numeric",
"numeric", "numeric", "numeric"))
#Coercing $Date to Date POSIXct.
power_consumption[,1] <- as.Date(power_consumption[,1], "%d/%m/%Y")
#Add new column with POSIXlt to keep date and time.
datetime <- strptime(paste(power_consumption[,1], power_consumption[,2]), "%Y-%m-%d %H:%M:%S")
power_consumption <- cbind(datetime, power_consumption)
#Get subset of data from (2007-02-01 to 2007-02-02).
power_consumption <- subset(power_consumption,
Date >= as.Date(" 2007-02-01", "%Y-%m-%d") &
Date <= as.Date("2007-02-02", "%Y-%m-%d"))
#Clean up NA's here. No NA's allowed at Global_active_power
power_consumption <- power_consumption[!is.na(power_consumption$Global_active_power),]
#Open Graphic Device with res = 480 x 480 px
png(filename = "plot2.png", width = 480, height = 480, bg = "transparent")
#Plot here
plot(power_consumption$datetime,
power_consumption$Global_active_power,
type = "l", xlab="", ylab = "Global Active Power (kilowatts)")
dev.off()
print("done")
}
# getDataSet
# Download and verify dataset
# Depends on package (tools) and (utils)
getDataSet <- function() {
library(tools)
library(utils)
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
"household_power_consumption.zip")
hashdigest <- md5sum("household_power_consumption.zip")
stopifnot(identical(as.character(hashdigest), "41f51806846b6b567b8ae701a300a3de"))
unzip("household_power_consumption.zip")
}
|
69da6c620279f06b695887b6fd69e08e31064c4b
|
8d107a4dac3240917aab93f589b8f2833db63c10
|
/hclust.R
|
3fa2a8385646a78e5db2577a57ea35b33e1f2790
|
[] |
no_license
|
hnthirima/hierarchical_clustering
|
cbd32ba34a373b784d09b06b99222fae38f91dfb
|
a6d7bf35698dddc17e05a37f8f40cab0f5eb675f
|
refs/heads/main
| 2023-08-21T22:26:26.402406
| 2021-05-14T04:18:47
| 2021-05-14T04:18:47
| 367,245,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,078
|
r
|
hclust.R
|
library(dplyr)
library(ggplot2)
#### RNAseq clustering
Hct_RNAseq_WTDelR_pc <- read.csv("./Hct116_RNAseq_WTDelR_pcgenes.csv")
Hct_RNAseq_WTDelR_pc_sig <- Hct_RNAseq_WTDelR_pc %>% filter(Del_vs_WT.is.DE == 1 | R_vs_WT.is.DE == 1 | Del_vs_WT.is.DE == -1 | R_vs_WT.is.DE == -1)
Hct_RNAseq_WTDelR_pcsigCPM <- Hct_RNAseq_WTDelR_pc_sig %>% select(1, 10:18)
write.csv(Hct_RNAseq_WTDelR_pcsigCPM, file = "./Hct_RNAseq_WTDelR_pcsigCPM.csv")
#Remove the first column on csv file and re-read
Hct_RNAseq_WTDelR_pcsigCPM_edit <- read.csv("./Hct_RNAseq_WTDelR_pcsigCPM.csv")
#write.csv(Hct_RNAseq_WTDelR, file="./Hct_RNAsew_WTDelR.csv")
#Hct_RNAseq_WTDelR_rmdup <- read.csv("./Hct_RNAsew_WTDelR_edited.csv")
#Hct_RNAseq_WTDelR_edit1 <- Hct_RNAseq_WTDelR_rmdup[, -(9:11)]
#Hct_RNAseq_WTDelR_edit <- Hct_RNAseq_WTDelR_edit1[, -1]
#Reading in data
mydata <- data.frame(Hct_RNAseq_WTDelR_pcsigCPM_edit)
#Convert dataframe to a matrix
mydata_matrix <- mydata %>%
dplyr::select(-Gene_name) %>%
as.matrix()
#assign rownames
rownames(mydata_matrix) <- mydata$Gene_name
# Scale the data (to obtain z-scores).
mydata_matrix <- mydata_matrix %>%
# transpose the matrix so genes are as columns
t() %>%
# apply scalling to each column of the matrix (genes)
scale() %>%
# transpose back so genes are as rows again
t()
hclustfunc <- function(x) hclust(x, method = "complete")
distfunc <- function(x) dist(x, method = "euclidean")
d <- distfunc(mydata_matrix)
fit <- hclustfunc(d)
#plot dendogram only
plot(fit)
groups <- cutree(fit, k=6)
write.csv(groups, file = "./Hct116_RNAseq_WTDelRpcsig_clusters.csv" )
#Add a rectangle to identify cluster
#rect.hclust(fit, k=4, border = "red")
#Plot heatmap with dendogram
my_pallete <- colorRampPalette(c("blue", "white", "red"))(n=299)
heatmap.2(as.matrix(mydata_matrix), dendrogram ="both",trace="none",margin=c(8,9), hclust=hclustfunc, distfun = distfunc, RowSideColors =as.character(groups), col = my_pallete,
key = TRUE, density.info = "none", Colv = FALSE)
#Export heatmap as PDF to preserve resolution.
dev.off()
|
e547d8817ec7c7e42bccdd3b265f9f33dc7ded61
|
27e2240acae4c110b4a853f67eaabfb317d8ec93
|
/Projekt1/KomKruKur/get_from_git.R
|
0d0f35f6a2e18dafa1c3d7cee44cb6c23ba27864
|
[] |
no_license
|
pbiecek/AdvancedR2018
|
97dbf5fc71d650b8b3f3097f5d82ccb01adc3043
|
26bfd79c0c9fd8fd95185a9afe210e9f7a9d4627
|
refs/heads/master
| 2018-09-19T10:54:58.171788
| 2018-06-13T10:44:23
| 2018-06-13T10:44:23
| 125,171,416
| 9
| 28
| null | 2018-06-13T10:44:24
| 2018-03-14T07:17:46
|
HTML
|
UTF-8
|
R
| false
| false
| 2,237
|
r
|
get_from_git.R
|
options(stringsAsFactors = FALSE)
library(rvest)
library(httr)
library(jsonlite)
library(dplyr)
library(stringr)
save_from_GIT <- function(folder,downloaded_repos_limit = Inf,git_login,git_password) {
downloaded_repos <- 0
# Tworze liste uzytkownikow
users <- c()
for (i in 1:2){ # 200 uzytkownikow powinno wystarczyc
url <- paste0('https://api.github.com/search/users?q=type:user+language:r+repos:50..100&per_page=100&page=',i)
res <- GET(url,authenticate(git_login,git_password))
stop_for_status(res)
r <- as.data.frame(fromJSON(content(res,'text'), flatten = T))
users <- c(users,as.vector(r$items.login))
}
for (user in users[160:length(users)]) {
repos <- c()
Sys.sleep(2)
tryCatch({
url <- paste0('https://api.github.com/search/repositories?q=user:',user,'+language:r&per_page=100')
res <- GET(url,authenticate(git_login,git_password))
if (status_code(res) == 200) {
r <- as.data.frame(fromJSON(content(res,'text'), flatten = T))
repos <- c(repos,r$items.name)
if(length(repos) > 0 & !dir.exists(file.path(folder,user)))
{dir.create(file.path(folder,user))}
for (repo in repos){
Sys.sleep(2)
res1 <- GET(paste0('https://api.github.com/repos/',user,'/',repo,'/git/trees/master?'),
authenticate(git_login,git_password))
if (status_code(res) == 200) {
files <- unlist(lapply(content(res1)$tree, "[", "path"), use.names = F)
files <- files[str_detect(files,'.R$')]
if(length(files) > 0 & !dir.exists(file.path(folder,user,repo)))
{dir.create(file.path(folder,user,repo))}
for (fil in files){
tryCatch(
download.file(url =
paste0('https://raw.githubusercontent.com/',user,'/',repo,'/master/',fil),
destfile = paste0(file.path(folder,user,repo),'/',fil), quiet = TRUE),
error = function(e) {})
}
downloaded_repos <- downloaded_repos + 1
if (downloaded_repos > downloaded_repos_limit) {return()}
} }
} },error = function(e) {}) }
}
|
cf76bcda96d68c4b1fb3503b5bfc9bf98c544a1e
|
6abe25da7b2fb3975d4e42560677d7ab4d0c0d5f
|
/FactFinding_DemoFunctions/OptFitting.R
|
55b5a3a9ec220ab8dbb3898964c831bb791f60e1
|
[] |
no_license
|
hankdikeman/TESummerProj2020
|
c4eccb0719762ee61e49dd620c3ccf6af95c9f15
|
36fe9a3f36e52a88c4f7752961f66a648a06c1ba
|
refs/heads/master
| 2022-12-04T02:45:18.422787
| 2020-08-25T01:11:33
| 2020-08-25T01:11:33
| 275,434,489
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,024
|
r
|
OptFitting.R
|
# this is a simple driver file used to develop predicted models for yield
source("./RK4.R")
source("./dC.R")
#kvals dataframe
kvals <- data.frame(matrix(c(5.95,2.98,9.399,7.311,15.18,0.698,0.0133,0.3456,0.7474,0.4654), nrow = 1))/60
colnames(kvals) <- c("k1","k1r","k2","k2r","k3","k3r","k4","k5","k6","k7")
# initial species concentration values
initialvals <- data.frame(matrix(c(0,1,0,0,0,0,0,0.08), nrow = 1, ncol = 8))
colnames(initialvals) <- c("Ester","TG","DG","MG","ROH","G","S","OH")
# scale factor used to nondimensionalize all concentrations
scale_factor <- 1.035
# total time and time step
time <- 500
t_step <- 10
yield_by_alc <- data.frame(matrix(0, nrow = 50, ncol = 3))
colnames(yield_by_alc) <- c("Alc_IV","Yield","OH_Remain")
for(alc in 1:50){
# calculate initial alcohol concentration (nondimensional)
initialAlc <- 2 + 0.1*alc
yield_by_alc[alc,"Alc_IV"] <- initialAlc/3*100
initialvals[1,"ROH"] <- initialAlc
# integrate using RK4 to find yield
final_conc <- RK4(kvals,initialvals,time,t_step,scale_factor)[(time/t_step),]
yield_by_alc[alc,"Yield"] <- final_conc[1,1]/3*100
yield_by_alc[alc,"OH_Remain"] <- final_conc[1,8]/0.08*100
}
# get coefficients for modelling
yield_for_model <- transmute(yield_by_alc, Yield = Yield, ROH = Alc_IV, ROHsq = Alc_IV^2, ROHcub = Alc_IV^3)
# build 1st model, linear
linearMod1 <- lm(Yield ~ ROH, data=yield_for_model)
mod1_coeff <- summary(linearMod1)$coefficients
# build 2nd model, quadratic
linearMod2 <- lm(Yield ~ ROH + ROHsq, data=yield_for_model)
mod2_coeff <- summary(linearMod2)$coefficients
# build in predicted values from each model
yield_for_model <- mutate(yield_for_model, mod1Pred = mod1_coeff[1,1] + mod1_coeff[2,1]*ROH, mod2Pred = mod2_coeff[1,1] + mod2_coeff[2,1]*ROH + mod2_coeff[3,1]*ROHsq)
ggplot(data = yield_for_model) +
geom_line(mapping = aes(x = ROH, y = Yield), color = 'blue') +
geom_line(mapping = aes(x = ROH, y = mod1Pred), color = 'red') +
geom_line(mapping = aes(x = ROH, y = mod2Pred), color = 'green')
|
bcc28d88daff713f476e814b2d7d30d48e1e0bc4
|
df7bd78f5f6443d6b8357d4ff570839c58fb1985
|
/R/get_filereport.R
|
b27a111ac5102b89a50ad3260fd1b7cf53f9d782
|
[] |
no_license
|
uashogeschoolutrecht/structural_colours
|
1dcda70f4071fd2ea90e8996c79689a8dd3ed2dd
|
904037de54933abdaa47a18833f5d0c3021fa9a6
|
refs/heads/master
| 2022-10-21T19:54:11.554503
| 2020-06-15T09:42:47
| 2020-06-15T09:42:47
| 206,521,262
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,403
|
r
|
get_filereport.R
|
#' This function downloads the filereport of a study and saves it as an rda file in extdata. The intermediate file is deleted.
#'
#' @param url Url of the filereport. The url of a filereport of a study can be gotten from the corresponding ebi metagenomic website by saving the link address of the 'TEXT' button (e.g. see https://www.ebi.ac.uk/ena/data/view/PRJEB8968)
#' @param acc The study accession. This accession is also mentioned in the url (e.g. "PRJEB26733")
#'
#' @return The filereport as data frame. This is also stored as rda and can be loaded using load("extdata/filename")
#' @export
#'
#' @examples get_filereport(url = "https://www.ebi.ac.uk/ena/data/warehouse/filereport?accession=PRJEB8968&result=read_run&fields=study_accession,sample_accession,secondary_sample_accession,experiment_accession,run_accession,tax_id,scientific_name,instrument_model,library_layout,fastq_ftp,fastq_galaxy,submitted_ftp,submitted_galaxy,sra_ftp,sra_galaxy,cram_index_ftp,cram_index_galaxy&download=txt",
#' acc = "PRJEB8968")
get_filereport = function(url, acc) {
library(usethis)
command = paste0("wget ", "'", url, "'")
system(command)
x = list.files(path = "./")
for (filename in x){
if (grepl(acc, filename) == TRUE){
filepath = filename
}
}
filereport = read.csv(filepath, sep = "\t")
command = paste0("rm ", "'", filepath, "'")
system(command)
return(filereport)
}
|
bb6c54284f772792307bad92af99cfbddc9b189c
|
d69e83586753456a6bb387a0122479ceb2bdc6e0
|
/R/odds.R
|
65bda9c6ed8f355872fb4e3e76730c2016efcd2a
|
[] |
no_license
|
cran/swfscMisc
|
6f1e85dba988db1e235f3e2f58ef0b831567736d
|
3644ab0ba65c3e24f11627865d00910e4d97c82f
|
refs/heads/master
| 2022-05-13T09:41:35.823940
| 2022-04-21T12:30:02
| 2022-04-21T12:30:02
| 22,867,576
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 944
|
r
|
odds.R
|
#' @name odds
#' @title Odds Conversion
#' @description \tabular{ll}{
#' \code{odds} \tab converts probability to odds \cr
#' \code{logOdds} \tab converts odds to log-odds \cr
#' \code{invOdds} \tab converts odds to probability \cr
#' \code{invLogOdds} \tab converts log-odds to odds \cr
#' }
#'
#' @param x a numeric vector of probabilities (0 to 1), odds (0 to Inf), or log.odds (-Inf to Inf).
#'
#' @author Eric Archer \email{eric.archer@@noaa.gov}
#'
#' @examples
#' x <- sort(runif(10))
#' odds.df <- data.frame(x = x, odds = odds(x), logOdds = logOdds(x))
#' odds.df
#' invOdds(odds.df$odds)
#' invLogOdds(odds.df$logOdds)
#'
#' @export
#'
odds <- function(x) ifelse(x < 0 | x > 1, as.numeric(NA), x / (1 - x))
#' @rdname odds
#' @export
#'
logOdds <- function(x) log(odds(x))
#' @rdname odds
#' @export
#'
invOdds <- function(x) x / (1 + x)
#' @rdname odds
#' @export
#'
invLogOdds <- function(x) exp(x) / (1 + exp(x))
|
f521a2417c4275751d5973cd9232314adbb9a83d
|
7a7375245bc738fae50df9e8a950ee28e0e6ec00
|
/man/STE__Year_Sexunemployment.Rd
|
7e7d17bba275f8ca5c59f6b0a482b49fc4950c16
|
[] |
no_license
|
HughParsonage/Census2016.DataPack.TimeSeries
|
63e6d35c15c20b881d5b337da2f756a86a0153b5
|
171d9911e405b914987a1ebe4ed5bd5e5422481f
|
refs/heads/master
| 2021-09-02T11:42:27.015587
| 2018-01-02T09:01:39
| 2018-01-02T09:02:17
| 112,477,214
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 355
|
rd
|
STE__Year_Sexunemployment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STE__Year_Sexunemployment.R
\docType{data}
\name{STE__Year_Sexunemployment}
\alias{STE__Year_Sexunemployment}
\title{Sex by STE, Year}
\format{54 observations and 4 variables.}
\usage{
STE__Year_Sexunemployment
}
\description{
UnemploymentSex by STE, Year
}
\keyword{datasets}
|
074f96bd44b6a061931f8b13f8b5e98c00343871
|
d48a6be6d855db72443aa767d680e13596e2a180
|
/RMark/man/var.components.reml.Rd
|
7520e9d25db454d0b81f158e1ca523a400d1b4f9
|
[] |
no_license
|
jlaake/RMark
|
f77e79d6051f1abfd57832fd60f7b63540a42ab9
|
7505aefe594a24e8c5f2a9b0b8ac11ffbdb8a62d
|
refs/heads/master
| 2023-06-26T21:29:27.942346
| 2023-06-25T16:35:43
| 2023-06-25T16:35:43
| 2,009,580
| 17
| 15
| null | 2019-01-10T17:17:11
| 2011-07-06T23:44:02
|
R
|
UTF-8
|
R
| false
| true
| 3,372
|
rd
|
var.components.reml.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/var.components.reml.r
\name{var.components.reml}
\alias{var.components.reml}
\title{Variance components estimation using REML or maximum likelihood}
\usage{
var.components.reml(
theta,
design,
vcv = NULL,
rdesign = NULL,
initial = NULL,
interval = c(-25, 10),
REML = TRUE
)
}
\arguments{
\item{theta}{vector of parameter estimates}
\item{design}{design matrix for fixed effects combining parameter estimates}
\item{vcv}{estimated variance-covariance matrix for parameters}
\item{rdesign}{design matrix for random effect (do not use intercept form;
eg use ~-1+year instead of ~year); if NULL fits only iid error}
\item{initial}{initial values for variance components}
\item{interval}{interval bounds for log(sigma) to help optimization from
going awry}
\item{REML}{if TRUE uses reml else maximum likelihood}
}
\value{
A list with the following elements \item{neglnl}{negative
log-likelihood for fitted model} \item{AICc}{small sample corrected AIC for
model selection} \item{sigma}{variance component estimates; if rdesign=NULL,
only an iid error; otherwise, iid error and random effect error}
\item{beta}{dataframe with estimates and standard errors of betas for
design} \item{vcv.beta}{variance-covariance matrix for beta}
}
\description{
Computes estimated effects, standard errors and variance components for a
set of estimates
}
\details{
The function \code{\link{var.components}} uses method of moments to estimate
a single process variance but cannot fit a more complex example. It can
only estimate an iid process variance. However, if you have a more
complicated structure in which you have random year effects and want to
estimate a fixed age effect then \code{\link{var.components}} will not work
because it will assume an iid error rather than allowing a common error for
each year as well as an iid error. This function uses restricted maximum
likelihood (reml) or maximum likelihood to fit a fixed effects model with an
optional random effects structure. The example below provides an
illustration as to how this can be useful.
}
\examples{
\donttest{
# This example is excluded from testing to reduce package check time
# Use dipper data with an age (0,1+)/time model for Phi
data(dipper)
dipper.proc=process.data(dipper,model="CJS")
dipper.ddl=make.design.data(dipper.proc,
parameters=list(Phi=list(age.bins=c(0,.5,6))))
levels(dipper.ddl$Phi$age)=c("age0","age1+")
md=mark(dipper,model.parameters=list(Phi=list(formula=~time+age)),delete=TRUE)
# extract the estimates of Phi
zz=get.real(md,"Phi",vcv=TRUE)
# assign age to use same intervals as these are not copied
# across into the dataframe from get.real
zz$estimates$age=cut(zz$estimates$Age,c(0,.5,6),include=TRUE)
levels(zz$estimates$age)=c("age0","age1+")
z=zz$estimates
# Fit age fixed effects with random year component and an iid error
var.components.reml(z$estimate,design=model.matrix(~-1+age,z),
zz$vcv,rdesign=model.matrix(~-1+time,z))
# Fitted model assuming no covariance structure to compare to
# results with lme
xx=var.components.reml(z$estimate,design=model.matrix(~-1+age,z),
matrix(0,nrow=nrow(zz$vcv),ncol=ncol(zz$vcv)),
rdesign=model.matrix(~-1+time,z))
xx
sqrt(xx$sigmasq)
library(nlme)
nlme::lme(estimate~-1+age,data=z,random=~1|time)
}
}
\author{
Jeff Laake
}
|
ec3cb0fb9c3f6ddcf27848367682d420a91ccee4
|
55b247a9009dee1924c6d9095b84bd9a78845644
|
/R/FamiliasPosterior.R
|
c97a948d5ee82e7d50dadcc4469a813414d35fe5
|
[] |
no_license
|
thoree/Familias
|
b4f508e006426b72df31077f931711fd65b792b9
|
b92d41113a8af5740e1df7dd3ce75343cc286fe1
|
refs/heads/master
| 2022-12-21T11:42:26.796495
| 2022-12-15T12:17:34
| 2022-12-15T12:17:34
| 150,735,203
| 7
| 1
| null | 2022-12-15T12:17:36
| 2018-09-28T12:10:46
|
C++
|
UTF-8
|
R
| false
| false
| 12,269
|
r
|
FamiliasPosterior.R
|
FamiliasPosterior <- function (pedigrees, loci, datamatrix, prior, ref = 1, kinship = 0,
simplifyMutations = FALSE)
{
if (missing(pedigrees) || length(pedigrees) < 1)
stop("The pedigrees parameter must be an object of type 'pedigree' or 'FamiliasPedigree', or a list of such.")
if (class(pedigrees) == "pedigree" | class(pedigrees) ==
"FamiliasPedigree")
pedigrees <- list(pedigrees)
if (class(pedigrees) != "list")
stop("The pedigrees parameter must be an object of type 'pedigree' or 'FamiliasPedigree', or a list of such.")
for (i in pedigrees) {
if (class(i) != "pedigree" && class(i) != "FamiliasPedigree")
stop("The pedigrees parameter must be an object of type 'pedigree' or 'FamiliasPedigree', or a list of such.")
}
npeds <- length(pedigrees)
if (ref < 1 | ref > npeds)
stop("Impossible reference pedigree index.")
if (missing(prior))
prior <- rep(1/npeds, npeds)
if (length(prior) != npeds)
stop("The prior argument must be a vector of the same length as the number of pedigrees, if it is not missing.")
if (any(prior < 0) || round(sum(prior), 6) != 1)
stop("The prior must consist of non-negative numbers summing to 1.")
if (missing(datamatrix))
stop("The datamatrix must be supplied.")
if (length(rownames(datamatrix)) < 1)
stop("The row names of the datamatrix must contain the names of the persons you have data for.")
# ensure untyped persons in the pedigree are present in the datamatrix if kinship>0
# because pruning those persons would affect the likelihood if mutations are possible
npersDatamatrixOriginal <- nrow(datamatrix)
if (kinship>0){
pedigreePersons <- unique(unlist(lapply(pedigrees, function(x) x$id)))
personsToAdd <- pedigreePersons[!pedigreePersons %in% rownames(datamatrix)]
datamatrixBlanks <- matrix(NA, nrow = length(personsToAdd),
ncol = ncol(datamatrix),
dimnames = list(personsToAdd))
datamatrix <- rbind(datamatrix, datamatrixBlanks)
}
personsDatamatrix <- rownames(datamatrix)
npersDatamatrix <- length(personsDatamatrix)
pedPersonIndicesByDmPersonIndex <- matrix(0, npersDatamatrix, npeds)
for (i in 1:npeds) for (j in 1:npersDatamatrix) {
pedPersonIndicesByDmPersonIndex[j, i] <- match(personsDatamatrix[j], pedigrees[[i]]$id,
nomatch = 0)
if (pedPersonIndicesByDmPersonIndex[j, i] == 0 &&
(!all(is.na(datamatrix[j,]))))
{
stop(paste("Error: Person ", personsDatamatrix[j], "of the data matrix is typed and does not occur in pedigree",
i))
}
if (i > 1) {
firstObservedInPed <- 1
while(pedPersonIndicesByDmPersonIndex[j,firstObservedInPed]==0) firstObservedInPed <- firstObservedInPed + 1
if ((pedigrees[[i]]$sex[pedPersonIndicesByDmPersonIndex[j, i]] > 0) &&
(pedigrees[[i]]$sex[pedPersonIndicesByDmPersonIndex[j, i]] !=
pedigrees[[firstObservedInPed]]$sex[pedPersonIndicesByDmPersonIndex[j, firstObservedInPed]]))
stop("Persons common to all pedigrees must have the same sex in all pedigrees!")
}
}
.C("NewFamilias")
for (j in 1:npersDatamatrix) {
iPed <- 1
while(pedPersonIndicesByDmPersonIndex[j,iPed]==0) iPed <- iPed + 1
result <- .C("AddPerson", as.integer(!(pedigrees[[iPed]]$sex[pedPersonIndicesByDmPersonIndex[j,
iPed]] == "female")), as.integer(-1), as.integer(FALSE),
index = integer(1), error = integer(1))
if (result$error > 0)
stop("ERROR: Problems with common persons in pedigree.")
}
for (i in pedigrees) {
nPersonsPed <- length(i$sex)
neworder <- rep(0, nPersonsPed)
nExMales <- nExFemales <- 0
for (j in 1:nPersonsPed) {
mm <- match(i$id[j], personsDatamatrix, nomatch = 0)
if (mm > 0)
neworder[j] <- mm
else if (i$sex[j] == "female") {
nExFemales <- nExFemales + 1
neworder[j] <- nExFemales
}
else {
nExMales <- nExMales + 1
neworder[j] <- nExMales
}
}
for (j in 1:nPersonsPed) {
if (!(i$id[j] %in% personsDatamatrix)) {
if (i$sex[j] == "female")
neworder[j] <- neworder[j] + npersDatamatrix
else neworder[j] <- neworder[j] + npersDatamatrix + nExFemales
}
}
result <- .C("AddPedigree", as.integer(nExFemales), as.integer(nExMales),
index = integer(1), error = integer(1))
if (result$error > 0)
stop("ERROR: Wrong input in pedigrees.")
index <- result$index + 1
for (j in 1:nPersonsPed) {
if (i$findex[j] > 0) {
result <- .C("AddRelation", as.integer(neworder[i$findex[j]] -
1), as.integer(neworder[j] - 1), as.integer(index -
1), error = integer(1))
if (result$error == 1)
stop("ERROR: Wrong input.")
else if (result$error == 2)
stop("ERROR: Illegal relation based on Year-of-birth or is-Child data.")
else if (result$error == 3)
stop("ERROR: Cycle in the pedigree or duplicate parent.")
}
if (i$mindex[j] > 0) {
result <- .C("AddRelation", as.integer(neworder[i$mindex[j]] -
1), as.integer(neworder[j] - 1), as.integer(index -
1), error = integer(1))
if (result$error == 1)
stop("ERROR: Wrong input.")
else if (result$error == 2)
stop("ERROR: Illegal relation based on Year-of-birth or is-Child data.")
else if (result$error == 3)
stop("ERROR: Cycle in the pedigree or duplicate parent.")
}
}
}
if (missing(loci) || length(loci) < 1)
stop("The loci argument must be a FamiliasLocus object or a list of such.")
if (class(loci) == "FamiliasLocus")
loci <- list(loci)
if (class(loci) != "list")
stop("The loci argument must be a FamiliasLocus object or a list of such.")
nloci <- length(loci)
nms <- rep("", nloci)
for (i in 1:nloci) {
if (class(loci[[i]]) != "FamiliasLocus")
stop("The loci argument must be a FamiliasLocus object or a list of such.")
nms[i] <- loci[[i]]$locusname
}
if (anyDuplicated(nms))
stop("There can be no duplicated names of the loci.")
for (i in loci) {
if (any(i$alleles <= 0))
stop(paste("ERROR: Problems with allele frequencies in locus",
i$locusname))
if (round(sum(i$alleles), 6) != 1)
stop(paste("ERROR: Allele frequencies must sum to 1 in locus",
i$locusname))
nAlleles <- length(i$alleles)
if (!is.matrix(i$femaleMutationMatrix) | dim(i$femaleMutationMatrix)[1] !=
nAlleles | dim(i$femaleMutationMatrix)[2] != nAlleles)
stop(paste("The female mutation matrix must be of a dimension corresponding to the vector of frequencies in locus",
i$locusname))
if (any(as.vector(i$femaleMutationMatrix) < 0))
stop(paste("The female mutation matrix cannot have negative entries in locus",
i$locusname))
if (any(round(apply(i$femaleMutationMatrix, 1, sum),
6) != 1))
stop(paste("The rows in the female mutation matrix must sum to 1 in locus",
i$locusname))
if (!is.matrix(i$maleMutationMatrix) | dim(i$maleMutationMatrix)[1] !=
nAlleles | dim(i$maleMutationMatrix)[2] != nAlleles)
stop(paste("The male mutation matrix must be of a dimension corresponding to the vector of frequencies in locus",
i$locusname))
if (any(as.vector(i$maleMutationMatrix) < 0))
stop(paste("The male mutation matrix cannot have negative entries in locus",
i$locusname))
if (any(round(apply(i$maleMutationMatrix, 1, sum), 6) !=
1))
stop(paste("The rows in the male mutation matrix must sum to 1 in locus",
i$locusname))
lOfArrays <- nAlleles * nAlleles
simplifyMatrix <- i$simpleMutationMatrices | simplifyMutations
hasSilentAllele <- (names(i$alleles)[nAlleles] == "silent" |
names(i$alleles)[nAlleles] == "Silent")
result <- .C("AddAlleleSystem", as.integer(nAlleles),
as.integer(lOfArrays), as.double(i$femaleMutationMatrix),
as.double(i$maleMutationMatrix), as.integer(simplifyMatrix),
as.integer(nAlleles), as.double(i$alleles), as.integer(hasSilentAllele),
index = integer(1), error = integer(1))
if (result$error > 0)
stop(paste("ERROR: Problems with input of allele system.",
i$locusname))
}
if (dim(datamatrix)[2] != 2 * nloci)
stop("The datamatrix must have two columns for each locus.")
for (i in 1:nloci) {
for (j in 1:npersDatamatrix) {
A1 <- datamatrix[j, 2 * i - 1]
A2 <- datamatrix[j, 2 * i]
if (!(is.na(A1) && is.na(A2))) {
if (is.na(A1))
A1 <- A2
if (is.na(A2))
A2 <- A1
M1 <- match(A1, names(loci[[i]]$alleles), nomatch = 0)
if (M1 == 0)
stop(paste("Allele", A1, "is not found in locus",
loci[[i]]$locusname))
M2 <- match(A2, names(loci[[i]]$alleles), nomatch = 0)
if (M2 == 0)
stop(paste("Allele", A2, "is not found in locus",
loci[[i]]$locusname))
result <- .C("AddDNAObservation", as.integer(j -
1), as.integer(i - 1), as.integer(M1 - 1),
as.integer(M2 - 1), error = integer(1))
if (result$error > 0)
stop("ERROR: Problems with input of marker data.")
}
}
}
if (kinship < 0)
stop("ERROR: Kinship cannot be negative.")
result <- .C("GetProbabilities", as.double(1), as.integer(-1),
as.double(1), as.double(1), as.integer(TRUE), as.double(kinship),
redundant = integer(npeds), probabilities = double(npeds),
likelihoods = double(nloci * npeds), error = integer(1))
if (result$error == 1)
stop("ERROR: Problems computing probabilities.")
if (result$error == 2)
stop("ERROR: All pedigrees have probability zero.")
pedigreeDuplicated <- as.logical(result$redundant)
if (any(pedigreeDuplicated))
stop(paste("ERROR: Some of the listed pedigrees were equivalent. Duplicated pedigrees are numbers",
(1:npeds)[pedigreeDuplicated]))
#prior <- result$probabilities
likelihoodsPerSystem <- matrix(result$likelihoods, nloci,
npeds)
likelihoods <- apply(likelihoodsPerSystem, 2, prod)
posterior <- prior * likelihoods
posterior <- posterior/sum(posterior)
LR <- likelihoods/likelihoods[ref]
LRperMarker <- likelihoodsPerSystem/likelihoodsPerSystem[,
ref]
.C("NewFamilias")
names(posterior) <- names(pedigrees)
names(prior) <- names(pedigrees)
names(LR) <- names(pedigrees)
colnames(LRperMarker) <- names(pedigrees)
locusnames <- rep("", nloci)
for (i in 1:nloci) locusnames[i] <- loci[[i]]$locusname
rownames(LRperMarker) <- locusnames
names(likelihoods) <- names(pedigrees)
colnames(likelihoodsPerSystem) <- names(pedigrees)
rownames(likelihoodsPerSystem) <- locusnames
list(posterior = posterior, prior = prior, LR = LR, LRperMarker = LRperMarker,
likelihoods = likelihoods, likelihoodsPerSystem = likelihoodsPerSystem)
}
|
09088c71c72a3bf23d178adaa2d8043f39ae3729
|
7e0d63da5e0c6006f4c7afb396e71c568b5b9421
|
/man/getColors.Rd
|
34698e5ba75a84eb4899b8b794ca8c17105a8c3d
|
[] |
no_license
|
vsoch/genetime
|
2678527a9afa81a1fb185db2df83453c2db7f37f
|
d9cbdaa193aa034afef4978009335a5fd5b4c07d
|
refs/heads/master
| 2021-01-01T15:35:38.275276
| 2014-12-18T18:36:18
| 2014-12-18T18:36:18
| 28,060,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 351
|
rd
|
getColors.Rd
|
% Generated by roxygen2 (4.1.0.9000): do not edit by hand
% Please edit documentation in R/getColors.R
\name{getColors}
\alias{getColors}
\title{getColors}
\usage{
getColors()
}
\description{
Returns 16 colors for brain regions in developmental enrichment package
}
\examples{
colors = getColors()
}
\keyword{brain}
\keyword{colors}
\keyword{region}
|
51bee291ee2757b6eea6fd5b168ec8a64c766780
|
3c258c7fe3244f4a41dea7d264098ac614eef19a
|
/man/renderVDiagram.Rd
|
18648066833444408e50ee792d6957abe79407c0
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
USGS-R/repgen
|
379be8577f3effbe7067e2f3dc5b5481ca69999e
|
219615189fb054e3b421b6ffba4fdd9777494cfc
|
refs/heads/main
| 2023-04-19T05:51:15.008674
| 2021-04-06T20:29:38
| 2021-04-06T20:29:38
| 31,678,130
| 10
| 25
|
CC0-1.0
| 2023-04-07T23:10:19
| 2015-03-04T20:24:02
|
R
|
UTF-8
|
R
| false
| true
| 342
|
rd
|
renderVDiagram.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vdiagram-render.R
\name{renderVDiagram}
\alias{renderVDiagram}
\title{Called from V diagram R Markdown files.}
\usage{
renderVDiagram(reportObject)
}
\arguments{
\item{reportObject}{V diagram report data.}
}
\description{
Called from V diagram R Markdown files.
}
|
9e497dcf56d2db9b3b701c46ba156da88fb54564
|
983dabf418aac94d6b5fe2fb311c8c5c0c5b01de
|
/hw5/hw5_2.R
|
889f8ee0cbf5554cf48462759abf0cba0221d879
|
[] |
no_license
|
thomas861205/Linear_models
|
4c050cc2260d3dd9af128799a4991e2530db498c
|
c09197e2897c7e1960a83217904909c08fceccfd
|
refs/heads/master
| 2020-07-30T15:04:35.800530
| 2019-12-25T04:42:56
| 2019-12-25T04:42:56
| 210,271,157
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,223
|
r
|
hw5_2.R
|
gala <- read.table("C:/Users/Thomas/Downloads/Linear_models/hw5/E6.10.txt", header=T)
# The variable x gives travel times which were computed from bus timetables augmented
# by walk times from zone centroids to bus-stops (assuming a walking speed of 3 m.p.h.)
# and expected waiting times for the bus (which were set at half the headway, i.e., the
# time between successive buses).
#
# The variable y was the average of travel times as reported to the U.S. Census Bureau by n travelers.
plot(gala$x, gala$y)
n <- gala[,2]
x <- gala[,3]
y <- gala[,4]
WLS <- lm(y ~ x, weights=n)
summary(WLS)
# plot(x, y)
# abline(WLS)
OLS <- lm(y ~ x)
summary(OLS)
# plot(n, WLS$residuals, pch=1)
# legend("bottomleft", legend = c("WLS"), pch = c(1), lty = c(1))
# abline(a=0, b=0)
sm <- lm(y ~ factor(x), weights=n)
summary(sm)
anova(WLS, sm)
# plot(n, OLS$residuals, pch=4)
# legend("bottomleft", legend = c("WLS", "OLS"), pch = c(1, 4), lty = c(1, 1))
# abline(a=0, b=0)
plot(n, WLS$residuals, pch=1, ylab="residuals")
points(n, OLS$residuals, pch=4)
legend("bottomleft", legend = c("WLS","OLS"), pch = c(1,4), lty = c(1,1))
abline(a=0, b=0)
WLS.RSS <- sum(WLS$residuals^2)
WLS.RSS
OLS.RSS <- sum(OLS$residuals^2)
OLS.RSS
|
3b251d905db16358a4bb29104f7085acdf28b5b0
|
5f93c27bf3ad41c2adbc4f2e6e56b3ff3b31cf89
|
/mmm_fit_code/mmm_fit_functions/tree_update_OLD.R
|
8c3430808a07dbaa2b1946c72415e3ce1eeed3af
|
[] |
no_license
|
jbischof/HPC_model
|
fd6726abf3130075e1354d2139212c00b04ee4b0
|
5dd2c6ae7e36e31a4a71751e38b9fe4ef88c0fcf
|
refs/heads/master
| 2021-01-01T19:07:31.094310
| 2014-05-06T04:18:26
| 2014-05-06T04:18:41
| 2,614,499
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,790
|
r
|
tree_update_OLD.R
|
# Functions to update mu vector in mixed membership model
# and other tree parameters
#require(Matrix,quietly=TRUE)
# Function to get params and data in useful format (anything that
# doesn't involve tree parameters and should not be recalculated)
get.data.for.tree <- function(word.id,current.param.list,
doc.length.vec,doc.topic.list,
feature.count.list,topic.address.book){
# Get full list of topics
topics <- topic.address.book[,"topic"]
# Grab need parameters from current.param.list
theta.param.vecs <- current.param.list$theta.param.vecs
K <- current.param.list$K
psi <- current.param.list$psi
gamma <- current.param.list$gamma
nu <- current.param.list$nu
sigma2 <- current.param.list$sigma2
# Make sure have word id as a string and numeric
word.id.str <- as.character(word.id)
# Get doc-specific counts for this feature
counts.feature <- feature.count.list[[word.id.str]]
# Active docs for this word
active.docs <- names(counts.feature)
# theta.param.vecs is DxK
# Save using sparse matrix representation since most
# topic memberships are zero
#X <- as(theta.param.vecs,"sparseMatrix")
# Now sparsification done by master node
X <- theta.param.vecs
tree.data.list <- list(active.docs=active.docs,
counts.feature=counts.feature,
doc.length.vec=doc.length.vec,K=K,X=X,
psi=psi,gamma=gamma,nu=nu,sigma2=sigma2)
return(tree.data.list)
}
# Function to get prior params for each mu
get.mu.prior.params <- function(topic,topic.address.book,mu.0.f,mu.f,tau2f.vec,
grad=FALSE,parent.child.list=NULL,
corpus.topic){
# Make sure topic is a string
topic <- toString(topic)
# Figure out parent topic
pos.topic.address.book <- which(topic.address.book[,"topic"]==topic)
parent <- topic.address.book[pos.topic.address.book,"parent"]
# Have to treat mus at highest level differently
if(parent==corpus.topic){
mu.parent <- mu.0.f
tau2.parent <- tau2f.vec[corpus.topic]
}
# Mus below highest level
else{
mu.parent <- mu.f[parent]
tau2.parent <- tau2f.vec[parent]
}
# Get child mus if this is for the gradient
if(grad){
if(is.null(parent.child.list)){
stop("Must supply parent.child.list to get params for gradient")}
child.topics <- parent.child.list[[topic]]
# Terminal nodes
is.terminal <- is.null(child.topics)
if(is.terminal){mu.children <- NULL}
# Non-terminal nodes
else{pos.children <- as.numeric(names(child.topics))
mu.children <- mu.f[pos.children]
tau2.self <- tau2f.vec[topic]}
}
mu.self <- mu.f[topic]
mu.prior.list <- list(mu.self=mu.self,mu.parent=mu.parent,
tau2.parent=tau2.parent)
# Add extra items to output list if gradient calculation
if(grad){mu.prior.list[["mu.children"]] <- mu.children
if(!is.terminal){mu.prior.list[["tau2.self"]] <- tau2.self}}
return(mu.prior.list)
}
# Function to evaluate prior for each mu.kf
eval.mu.prior <- function(topic,topic.address.book,mu.0.f,
mu.f,tau2f.vec,corpus.topic){
mu.prior.list <- get.mu.prior.params(topic=topic,
topic.address.book=topic.address.book,
corpus.topic=corpus.topic,
mu.0.f=mu.0.f,mu.f=mu.f,
tau2f.vec=tau2f.vec)
mu.self <- mu.prior.list$mu.self
mu.parent <- mu.prior.list$mu.parent
tau2.parent <- mu.prior.list$tau2.parent
mu.prior <- -0.5*log(tau2.parent)-(2*tau2.parent)^(-1)*(mu.self-mu.parent)^2
return(mu.prior)
}
# Function to evaluate prior gradient for each mu.kf
eval.mu.prior.grad <- function(topic,topic.address.book,
mu.0.f,mu.f,tau2f.vec,
parent.child.list,corpus.topic){
mu.prior.list <- get.mu.prior.params(topic=topic,
topic.address.book=topic.address.book,
mu.0.f=mu.0.f,mu.f=mu.f,
tau2f.vec=tau2f.vec,grad=TRUE,
parent.child.list=parent.child.list,
corpus.topic=corpus.topic)
mu.self <- mu.prior.list$mu.self
mu.parent <- mu.prior.list$mu.parent
tau2.parent <- mu.prior.list$tau2.parent
tau2.self <- mu.prior.list$tau2.self
mu.children <- mu.prior.list$mu.children
mu.prior.grad.self <- -tau2.parent^(-1)*(mu.self-mu.parent)
# Have to deal with terminal nodes separately
if(is.null(mu.children)){mu.prior.grad <- mu.prior.grad.self}
else{mu.prior.grad.child <- sum(tau2.self^(-1)*(mu.children-mu.self))
mu.prior.grad <- mu.prior.grad.self + mu.prior.grad.child}
return(mu.prior.grad)
}
# Functions to evaluate prior and grad for corpus-level mu
eval.mu.0.prior <- function(mu.0.f,psi,gamma){
mu.0.prior <- -(2*gamma^2)^(-1)*(mu.0.f-psi)^2
return(mu.0.prior)
}
eval.mu.0.grad <- function(mu.0.f,tau2f.vec,mu.f,psi,gamma,
parent.child.list,corpus.topic){
# Get parameters
child.topics <- parent.child.list[[corpus.topic]]
pos.children <- as.numeric(names(child.topics))
mu.children <- mu.f[pos.children]
tau2.0 <- tau2f.vec[corpus.topic]
# Evaluate gradient
mu.0.grad <- (tau2.0)^(-1)*sum(mu.children-mu.0.f) -
(gamma^2)^(-1)*(mu.0.f-psi)
return(mu.0.grad)
}
# Functions to evaluate prior and gradient for discrimination parameters
## # These versions for inverse chi-sq dist
## # Log prior for tau2f vector
## eval.tau2f.prior <- function(tau2f.vec,nu,sigma2){
## tau2f.prior.sum <- -(1+0.5*nu)*sum(log(tau2f.vec)) -
## 0.5*nu*sigma2*sum((tau2f.vec)^(-1))
## return(tau2f.prior.sum)
## }
## eval.tau2f.k.prior.grad <- function(tau2.self,nu,sigma2){
## prior.grad <- -(1 + 0.5*nu)*(tau2.self)^(-1) + 0.5*nu*sigma2*(tau2.self)^(-2)
## return(prior.grad)
## }
# Log prior for tau2f vector
eval.tau2f.prior <- function(tau2f.vec,nu,sigma2,dist="inv.chisq"){
if (dist=="inv.chisq") {tau2f.prior.sum <- -(1+0.5*nu)*sum(log(tau2f.vec)) -
0.5*nu*sigma2*sum((tau2f.vec)^(-1))
} else if (dist=="log.normal") {
tau2f.prior.sum <- -sum(log(tau2f.vec)) - (2*sigma2)^(-1)*sum((log(tau2f.vec)-nu)^2)}
return(tau2f.prior.sum)
}
eval.tau2f.k.prior.grad <- function(tau2.self,nu,sigma2,dist="inv.chisq"){
if (dist=="inv.chisq") {prior.grad <- -(1 + 0.5*nu)*(tau2.self)^(-1) +
0.5*nu*sigma2*(tau2.self)^(-2)
} else if (dist=="log.normal") {
prior.grad <- -tau2.self^(-1)*(1 + (sigma2)^(-1)*(log(tau2.self)-nu))}
return(prior.grad)
}
# Gradient for individual tau2f,k
eval.tau2f.k.grad <- function(n.child,tau2.self,mu.children,mu.self,nu,sigma2,dist="inv.chisq"){
prior.grad <- eval.tau2f.k.prior.grad(tau2.self=tau2.self,nu=nu,sigma2=sigma2)
grad <- -(n.child/2)*tau2.self^(-1) +
0.5*sum((mu.children-mu.self)^2)*tau2.self^(-2) + prior.grad
## -(1 + 0.5*nu)*tau2.self^(-1) + 0.5*nu*sigma2*tau2.self^(-2)
return(grad)
}
eval.tau2f.grad <- function(topic,tau2f.vec,mu.f,mu.0.f,nu,sigma2,
parent.child.list,corpus.topic,dist="inv.chisq"){
# Get parameters
child.topics <- parent.child.list[[topic]]
pos.children <- as.numeric(names(child.topics))
mu.children <- mu.f[pos.children]
if (topic==corpus.topic) {mu.self <- mu.0.f
} else {mu.self <- mu.f[topic]}
tau2.self <- tau2f.vec[topic]
n.child <- length(mu.children)
# Evaluate gradient
tau2f.k.grad <- eval.tau2f.k.grad(n.child=n.child,tau2.self=tau2.self,mu.children=mu.children,
mu.self=mu.self,nu=nu,sigma2=sigma2,dist="inv.chisq")
## tau2f.k.grad <- -(n.child/2)*(tau2.self)^(-1) +
## 0.5*sum((mu.children-mu.self)^2)*(tau2.self)^(-2) -
## (1 + 0.5*nu)*(tau2.self)^(-1) + 0.5*nu*sigma2*(tau2.self)^(-2)
return(tau2f.k.grad)
}
# Function to evaluate log conditional posterior of tree parameters
# for feature f
tree.log.posterior <- function(par,tree.data.list,topic.address.book,
parent.child.list,corpus.topic,
dist.tau2="inv.chisq"){
# Get vector of topics
topics <- topic.address.book[,"topic"]
# Unpack needed data
active.docs <- tree.data.list$active.docs
counts.feature <- tree.data.list$counts.feature
X <- tree.data.list$X
K <- tree.data.list$K
psi <- tree.data.list$psi
gamma <- tree.data.list$gamma
nu <- tree.data.list$nu
sigma2 <- tree.data.list$sigma2
doc.length.vec <- tree.data.list$doc.length.vec
# Extract parameters from par
mu.f <- par[1:K]
beta.f <- exp(mu.f)
mu.0.f <- par[K+1]
tau2f.vec <- exp(par[(K+2):length(par)])
# Get linear predictor
x.beta.vec <- as.vector(X%*%beta.f)
l.x.beta.vec <- doc.length.vec*x.beta.vec
names(x.beta.vec) <- names(l.x.beta.vec) <- names(doc.length.vec)
# Get log prior of mu vector
log.mu.prior.vec <- sapply(topics,eval.mu.prior,
topic.address.book=topic.address.book,
corpus.topic=corpus.topic,
mu.0.f=mu.0.f,mu.f=mu.f,
tau2f.vec=tau2f.vec)
# Get log prior of mu.0.f
log.mu.0.prior <- eval.mu.0.prior(mu.0.f=mu.0.f,psi=psi,gamma=gamma)
# Get log prior for discrimination parameters
log.tau2f.prior.sum <- eval.tau2f.prior(tau2f.vec=tau2f.vec,nu=nu,sigma2=sigma2,
dist=dist.tau2)
# Evaluate log posterior
log.posterior <- -sum(l.x.beta.vec) +
sum(counts.feature*log(x.beta.vec[active.docs])) +
sum(log.mu.prior.vec) + log.mu.0.prior + log.tau2f.prior.sum
#if(is.na(log.posterior)){browser()}
return(as.numeric(log.posterior))
}
# Function to evaluate gradient of log conditional posterior of mu.f
tree.log.post.gradient <- function(par,tree.data.list,topic.address.book,
parent.child.list,corpus.topic,
dist.tau2="inv.chisq"){
# Get vector of topics
topics <- topic.address.book[,"topic"]
# Unpack needed data
active.docs <- tree.data.list$active.docs
counts.feature <- tree.data.list$counts.feature
X <- tree.data.list$X
K <- tree.data.list$K
psi <- tree.data.list$psi
gamma <- tree.data.list$gamma
nu <- tree.data.list$nu
sigma2 <- tree.data.list$sigma2
doc.length.vec <- tree.data.list$doc.length.vec
# Extract parameters from par
mu.f <- par[1:K]
beta.f <- exp(mu.f)
mu.0.f <- par[K+1]
tau2f.vec <- exp(par[(K+2):length(par)])
# Get linear predictor
x.beta.vec <- as.vector(X%*%beta.f)
names(x.beta.vec) <- rownames(X)
# Get count ratio for active docs
count.ratio <- counts.feature/x.beta.vec[active.docs]
# Get column sums of X
l.X.col.sums <- colSums(doc.length.vec*X)
# Get log prior gradient of mu vector
log.mu.prior.grad <- sapply(topics,eval.mu.prior.grad,
topic.address.book=topic.address.book,
mu.0.f=mu.0.f,mu.f=mu.f,
tau2f.vec=tau2f.vec,
parent.child.list=parent.child.list,
corpus.topic=corpus.topic)
# Get log prior gradient of corpus-level mu
log.mu.0.prior.grad <- eval.mu.0.grad(mu.0.f=mu.0.f,
tau2f.vec=tau2f.vec,
mu.f=mu.f,psi=psi,gamma=gamma,
parent.child.list=parent.child.list,
corpus.topic=corpus.topic)
# Get log prior gradient for tau2 vector
parent.topics <- names(tau2f.vec)
log.tau2.prior.grad <- sapply(parent.topics,eval.tau2f.grad,
tau2f.vec=tau2f.vec,
mu.f=mu.f,mu.0.f=mu.0.f,
nu=nu,sigma2=sigma2,
parent.child.list=parent.child.list,
corpus.topic=corpus.topic,
dist=dist.tau2)
# Evaluate log posterior gradient
## # Need to make exception for words active in only one doc
## if(length(active.docs)==1){X.active <- matrix(X[active.docs,],nrow=1)}
## else{X.active <- X[active.docs,]}
X.active <- X[active.docs,,drop=FALSE]
gradient.likelihood <- -l.X.col.sums + as.vector(count.ratio%*%X.active)
# Convert gradient wrt beta to mu space and add prior grad
gradient.mu <- gradient.likelihood*beta.f + log.mu.prior.grad
# Get gradient for entire tree of parameters
# Include chain rule for discrim params since guessing in log space
gradient.tree <- c(gradient.mu,log.mu.0.prior.grad,
log.tau2.prior.grad*tau2f.vec)
names(gradient.tree) <- names(par)
return(gradient.tree)
}
optim.tree <- function(job.id,current.param.list,doc.length.vec,
doc.topic.list,feature.count.list,topic.address.book,
corpus.topic="CORPUS",hessian=FALSE,tree.data.out=FALSE,
dist.tau2="inv.chisq"){
# Get old parameter values from last update
mu.f.old <- current.param.list$mu.param.vecs[job.id,]
mu.0.f.old <- current.param.list$mu.corpus.vec[job.id]
tau2f.vec.old <- current.param.list$tau2.param.vecs[job.id,]
old.param.vec <- c(mu.f.old,mu.0.f.old,tau2f.vec.old)
# Start optimizer at old parameter values
par <- c(mu.f.old,mu.0.f.old,log(tau2f.vec.old))
#par <- old.param.vec
tree.data.list <- get.data.for.tree(word.id=job.id,
current.param.list=current.param.list,
doc.length.vec=doc.length.vec,
doc.topic.list=doc.topic.list,
feature.count.list=feature.count.list,
topic.address.book=topic.address.book)
## # Get number of topics (and mu.f parameters)
## K <- tree.data.list$K
# Get starting value of posterior
post.old <- tree.log.posterior(par=par,tree.data.list=tree.data.list,
topic.address.book=topic.address.book,
parent.child.list=
current.param.list$parent.child.list,
corpus.topic=corpus.topic)
optim.out.bfgs <- optim(par=par,fn=tree.log.posterior,
gr=tree.log.post.gradient,
control=list(fnscale=-1),
tree.data.list=tree.data.list,
topic.address.book=topic.address.book,
parent.child.list=
current.param.list$parent.child.list,
corpus.topic=corpus.topic,
method="L-BFGS-B",
# Need lower bound on params to ensure stability
# of convergence check
# Note that optim guessing everything in log space
lower=0.5*log(.Machine$double.eps),
hessian=hessian,
dist.tau2=dist.tau2)
optim.bfgs.par <- optim.out.bfgs$par
K <- tree.data.list$K
mu.f.new <- optim.bfgs.par[1:K]
mu.0.f.new <- optim.bfgs.par[K+1]
tau2f.vec.new <- exp(optim.bfgs.par[(K+2):length(optim.bfgs.par)])
#tau2f.vec.new <- optim.bfgs.par[(K+2):length(optim.bfgs.par)]
new.param.vec <- c(mu.f.new,mu.0.f.new,tau2f.vec.new)
if(hessian){hessian.tree <- optim.out.bfgs$hessian}
# Get new value of posterior
post.new <- tree.log.posterior(par=optim.bfgs.par,
tree.data.list=tree.data.list,
topic.address.book=topic.address.book,
parent.child.list=
current.param.list$parent.child.list,
corpus.topic=corpus.topic)
#print(c(post.old,post.new))
# Check global convergence of tree parameters
global.conv <- check.conv(old.param.vec=post.old,
new.param.vec=post.new,
reltol=1e-6)
out.list <- list(mu.f=mu.f.new,mu.0.f=mu.0.f.new,
tau2f.vec=tau2f.vec.new,global.conv=global.conv,
tree.post=post.new,tree.post.old=post.old)
if(hessian){out.list$hessian.tree <- hessian.tree}
if(tree.data.out){out.list$tree.data.list <- tree.data.list}
return(out.list)
}
|
e5c677675f8d095ac6c54093392f4c4282de44df
|
007a5459cc41c25d6450fdb2d6ac331587b6b439
|
/man/APLA_Map.Rd
|
2405c3ef1db7400ef545548c28676fb3ee9c192e
|
[] |
no_license
|
Edouard-Legoupil/APLA_Dataset
|
7637fa1f6b29def80ee3b9e9a8c28478a7106575
|
338c79e20ceb9be38b77de685ba76bfbe90658f4
|
refs/heads/main
| 2022-12-31T12:11:27.220906
| 2020-10-12T20:29:28
| 2020-10-12T20:29:28
| 303,468,494
| 0
| 0
| null | 2020-10-12T17:44:37
| 2020-10-12T17:44:36
| null |
UTF-8
|
R
| false
| true
| 2,272
|
rd
|
APLA_Map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{APLA_Map}
\alias{APLA_Map}
\title{APLA_Map}
\format{
A data frame with 667 rows and 34 variables:
\describe{
\item{\code{Country}}{character COLUMN_DESCRIPTION}
\item{\code{Year}}{integer COLUMN_DESCRIPTION}
\item{\code{Art}}{integer COLUMN_DESCRIPTION}
\item{\code{Total}}{integer COLUMN_DESCRIPTION}
\item{\code{Regulatory_Complexity}}{integer COLUMN_DESCRIPTION}
\item{\code{Polity2}}{integer COLUMN_DESCRIPTION}
\item{\code{IntMigStock}}{double COLUMN_DESCRIPTION}
\item{\code{Left1_Other0}}{integer COLUMN_DESCRIPTION}
\item{\code{Liberalisation}}{double COLUMN_DESCRIPTION}
\item{\code{MigSpain}}{integer COLUMN_DESCRIPTION}
\item{\code{MigUS}}{integer COLUMN_DESCRIPTION}
\item{\code{MigSpainUS}}{integer COLUMN_DESCRIPTION}
\item{\code{TotalPopinY}}{integer COLUMN_DESCRIPTION}
\item{\code{MigSpainUSPerc}}{double COLUMN_DESCRIPTION}
\item{\code{GrowthGDPperCap}}{double COLUMN_DESCRIPTION}
\item{\code{Trade_Perc_GDP}}{integer COLUMN_DESCRIPTION}
\item{\code{RefugeeAndLikeSit}}{integer COLUMN_DESCRIPTION}
\item{\code{T}}{integer COLUMN_DESCRIPTION}
\item{\code{RefAsPerc}}{double COLUMN_DESCRIPTION}
\item{\code{GDPperCapPPP}}{double COLUMN_DESCRIPTION}
\item{\code{Lib100}}{integer COLUMN_DESCRIPTION}
\item{\code{x}}{double COLUMN_DESCRIPTION}
\item{\code{y}}{double COLUMN_DESCRIPTION}
\item{\code{CountryID}}{character COLUMN_DESCRIPTION}
\item{\code{South_America}}{integer COLUMN_DESCRIPTION}
\item{\code{MigSpainUSLog}}{double COLUMN_DESCRIPTION}
\item{\code{lag1}}{integer COLUMN_DESCRIPTION}
\item{\code{lag2}}{integer COLUMN_DESCRIPTION}
\item{\code{X_est_est1}}{integer COLUMN_DESCRIPTION}
\item{\code{X_est_est2}}{integer COLUMN_DESCRIPTION}
\item{\code{X_est_est3}}{integer COLUMN_DESCRIPTION}
\item{\code{X_est_est4}}{integer COLUMN_DESCRIPTION}
\item{\code{VDEM_Polyarchy}}{double COLUMN_DESCRIPTION}
\item{\code{Codified}}{integer COLUMN_DESCRIPTION}
}
}
\source{
\url{http://somewhere.important.com/}
}
\usage{
APLA_Map
}
\description{
APLA_Map is slightly modified versions of the dataset Data_APLA,
which allows to better map certain variables.
}
\keyword{datasets}
|
78c996f1e18feecf7fc7de6db6095690e4a6c525
|
2d277476733ba48dee4bec8bacc6c8dfbb86717b
|
/man/BuyseTTEM.Rd
|
9b16c6ac3a575fa0866cf547dc0018c68b56cf7d
|
[] |
no_license
|
cran/BuyseTest
|
ef75b3c7f93a476b35786e485ae4ab2e56c8d90f
|
a3dfe49778c8d5e2f0b987dd2e9cfbd6f01cb479
|
refs/heads/master
| 2023-04-14T00:41:38.778354
| 2023-03-20T21:30:02
| 2023-03-20T21:30:02
| 135,258,510
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,584
|
rd
|
BuyseTTEM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BuyseTTEM.R
\name{BuyseTTEM}
\alias{BuyseTTEM}
\alias{BuyseTTEM.formula}
\alias{BuyseTTEM.prodlim}
\alias{BuyseTTEM.survreg}
\alias{BuyseTTEM.BuyseTTEM}
\title{Time to Event Model}
\usage{
BuyseTTEM(object, ...)
\method{BuyseTTEM}{formula}(object, treatment, iid, iid.surv = "exp", ...)
\method{BuyseTTEM}{prodlim}(object, treatment, iid, iid.surv = "exp", ...)
\method{BuyseTTEM}{survreg}(object, treatment, n.grid = 1000, iid, ...)
\method{BuyseTTEM}{BuyseTTEM}(object, ...)
}
\arguments{
\item{object}{time to event model.}
\item{...}{additional arguments passed to lower lever methods.}
\item{treatment}{[character] Name of the treatment variable.}
\item{iid}{[logical] Should the iid decomposition of the predictions be output.}
\item{iid.surv}{[character] Estimator of the survival used when computing the influence function.
Can be the product limit estimator (\code{"prodlim"}) or an exponential approximation (\code{"exp"}, same as in \code{riskRegression::predictCoxPL}).}
\item{n.grid}{[integer, >0] Number of timepoints used to discretize the time scale. Not relevant for prodlim objects.}
}
\description{
Pre-compute quantities of a time to event model useful for predictions.
Only does something for prodlim objects.
}
\examples{
library(prodlim)
library(data.table)
tau <- seq(0,3,length.out=10)
#### survival case ####
set.seed(10)
df.data <- simBuyseTest(1e2, n.strata = 2)
e.prodlim <- prodlim(Hist(eventtime,status)~treatment+strata, data = df.data)
## plot(e.prodlim)
e.prodlim2 <- BuyseTTEM(e.prodlim, treatment = "treatment", iid = TRUE)
predict(e.prodlim2, time = tau, treatment = "T", strata = "a")
predict(e.prodlim, times = tau, newdata = data.frame(treatment = "T", strata = "a"))
predict(e.prodlim2, time = tau, treatment = "C", strata = "a")
predict(e.prodlim, times = tau, newdata = data.frame(treatment = "C", strata = "a"))
#### competing risk case ####
df.dataCR <- copy(df.data)
df.dataCR$status <- rbinom(NROW(df.dataCR), prob = 0.5, size = 2)
e.prodlimCR <- prodlim(Hist(eventtime,status)~treatment+strata, data = df.dataCR)
## plot(e.prodlimCR)
e.prodlimCR2 <- BuyseTTEM(e.prodlimCR, treatment = "treatment", iid = TRUE)
predict(e.prodlimCR2, time = tau, treatment = "T", strata = "a")
predict(e.prodlimCR, times = tau, newdata = data.frame(treatment = "T", strata = "a"), cause = 1)
predict(e.prodlimCR2, time = tau, treatment = "C", strata = "a")
predict(e.prodlimCR, times = tau, newdata = data.frame(treatment = "C", strata = "a"), cause = 1)
}
|
c2caa9dce8e712e1a93ca3b569796b2cc1e7f913
|
d9485ffc49f5b8309bf5ce34abd76e86abc721ff
|
/R-libraries/myUtilities/R/dbdt.R
|
e8cbe37661919e43425e932b74330fe31d1fd388
|
[] |
no_license
|
jyqalan/myUtilities
|
a42944cd6e370c23ce8d232017f6db4949f09cca
|
73de6f8defc617c1fdf5537f4d159d653d876a0d
|
refs/heads/master
| 2020-12-10T23:35:22.603147
| 2018-04-20T13:50:58
| 2018-04-20T13:50:58
| 55,338,987
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36
|
r
|
dbdt.R
|
dbdt <-
function(b) r*b*(1-(b/K)^p)
|
8c59d85119aa994098a265e93714f096326784f3
|
2bf844c54a9a433aacd7384a4fcb3dd3f71b5469
|
/R/trash/Clipping_script.R
|
eebc2f2af7bccdb92ce1139d6ae58ed317e75acb
|
[
"MIT"
] |
permissive
|
JamesDMSpeed/LidarMoose
|
a144f6a33eac141d2dd1ee15ddf4443544083b36
|
7e9c10785e9c86a359acd58087c5fb45d98cdcca
|
refs/heads/master
| 2021-06-10T12:02:32.192615
| 2021-04-29T19:35:47
| 2021-04-29T19:35:47
| 146,406,974
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 47,970
|
r
|
Clipping_script.R
|
#Clipping_script
require(lidR)
require(raster)
require(rasterVis)
require(rgeos)
############TRONDELAG##################################################################
#BRATSBERG
#Import plot coords
plotcoords<-read.csv('Troendelag_20m_flater_pkt.csv',header=T,sep=';',dec=',')
#Import las file
bratsberglas <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/bratsberg.las')
bratsberglas
plot(bratsberglas)
#This function tells the order of the points to make a complete ring
bratsberg_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Brb',4:5]))
#This function makes a polygon using the bratsberg_b coordinates, ordered by the convex hull
bratsberg_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Brb',4:5][bratsberg_b_order,]))
#Making the polygon bigger to identify large trees on the edge - 10m
#Expand plot by Xm to include overhanging trees
#Make it a spatial polygon
bbpl<-Polygons(list(bratsberg_b_poly),1)
bbpsp<-SpatialPolygons(list(bbpl))
bratsberg_b_outerpoly<-gBuffer(bbpsp,width=6)
plot(bratsberg_b_outerpoly)
#This one clips the las to the polygon
bratsberg_b_plot<-lasclip(bratsberglas,bratsberg_b_poly)
plot(bratsberg_b_plot)
bratsberg_b_plot
bratsberg_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Brub',4:5]))
bratsberg_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Brub',4:5][bratsberg_ub_order,]))
bratsberg_ub_plot<-lasclip(bratsberglas,bratsberg_ub_poly))
plot(bratsberg_ub_plot)
bratsberg_ub_plot
writeLAS(bratsberg_b_plot,'Trondelag/clipped_las/bratsberg_b.las')
writeLAS(bratsberg_ub_plot,'Trondelag/clipped_las/bratsberg_ub.las')
#############################################################################
#Hi_Tydal
hi_tydal_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Hi_tydal.las')
hi_tydal_las
plot(hi_tydal_las)
hi_tydal_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Hib',4:5]))
hi_tydal_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Hib',4:5][hi_tydal_b_order,]))
hi_tydal_b_cut<-lasclip(hi_tydal_las,hi_tydal_b_poly)
hi_tydal_b_cut
plot(hi_tydal_b_cut)
hi_tydal_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Hiub',4:5]))
hi_tydal_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Hiub',4:5][hi_tydal_ub_order,]))
hi_tydal_ub_cut<-lasclip(hi_tydal_las,hi_tydal_ub_poly)
hi_tydal_ub_cut
plot(hi_tydal_ub_cut)
writeLAS(hi_tydal_b_cut,'Trondelag/clipped_las/hi_tydal_b.las')
writeLAS(hi_tydal_ub_cut,'Trondelag/clipped_las/hi_tydal_ub.las')
####################################################################
#Malvik
malvik_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Malvik.las')
malvik_las
plot(malvik_las)
malvik_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Mab',4:5]))
malvik_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Mab',4:5][malvik_b_order,]))
malvik_b_cut<-lasclip(malvik_las,malvik_b_poly)
malvik_b_cut
plot(malvik_b_cut)
malvik_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Maub',4:5]))
malvik_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Maub',4:5][malvik_ub_order,]))
malvik_ub_cut<-lasclip(malvik_las,malvik_ub_poly)
malvik_ub_cut
plot(malvik_ub_cut)
writeLAS(malvik_b_cut,'Trondelag/clipped_las/malvik_b.las')
writeLAS(malvik_ub_cut,'Trondelag/clipped_las/malvik_ub.las')
##########################################################################
#Namdalseid_1kub
namdalseid_1kub_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/namdalseid_1kub.las')
namdalseid_1kub_las
plot(namdalseid_1kub_las)
namdalseid_1kub_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='1Kb',4:5]))
namdalseid_1kub_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='1Kb',4:5][namdalseid_1kub_b_order,]))
namdalseid_1kub_b_cut<-lasclip(namdalseid_1kub_las,namdalseid_1kub_b_poly)
namdalseid_1kub_b_cut
plot(namdalseid_1kub_b_cut)
namdalseid_1kub_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='1Kub',4:5]))
namdalseid_1kub_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='1Kub',4:5][namdalseid_1kub_ub_order,]))
#Make it a spatial polygon, and then expand polygon to include overhanging trees
namdalseid_1kub_ub_pl <- Polygons(list(namdalseid_1kub_ub_poly),1)
namdalseid_1kub_ub_sp <- SpatialPolygons(list(namdalseid_1kub_ub_pl))
namdalseid_1kub_ub_polybuf <- gBuffer(namdalseid_1kub_ub_sp, width=6)
#The polygon is now a spatial polygon, need to make it a SpatialPolygonsDataFrame
df1<-data.frame(ID=1)
rownames(df1)<-'buffer'
namdalseid_1kub_ub_spdf <- SpatialPolygonsDataFrame(namdalseid_1kub_ub_polybuf,data=df1,match.ID = TRUE)
#test <- (namdalseid_1kub_ub_outerpoly)
namdalseid_1kub_ub_outerpoly<-lasclip(namdalseid_1kub_las,namdalseid_1kub_ub_spdf)
namdalseid_1kub_ub_outerpoly<-namdalseid_1kub_ub_outerpoly$`1`
plot(namdalseid_1kub_ub_outerpoly)
writeLAS(namdalseid_1kub_b_cut,'Trondelag/clipped_las/namdalseid_1kub_b.las')
writeLAS(namdalseid_1kub_ub_outerpoly,'Trondelag/clipped_las/namdalseid_1kub_ub.las')
###################################################################################
#Nsb_Verdal
nsb_verdal_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Nsb_Verdal.las')
nsb_verdal_las
plot(nsb_verdal_las)
nsb_verdal_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='1Nsb',4:5]))
nsb_verdal_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='1Nsb',4:5][nsb_verdal_b_order,]))
nsb_verdal_b_cut<-lasclip(nsb_verdal_las,nsb_verdal_b_poly)
nsb_verdal_b_cut
plot(nsb_verdal_b_cut)
nsb_verdal_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name==' 1Nsub',4:5]))
nsb_verdal_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name==' 1Nsub',4:5][nsb_verdal_ub_order,]))
nsb_verdal_ub_cut<-lasclip(nsb_verdal_las,nsb_verdal_ub_poly)
nsb_verdal_ub_cut
plot(nsb_verdal_ub_cut)
writeLAS(nsb_verdal_b_cut,'Trondelag/clipped_las/nsb_verdal_b.las')
writeLAS(nsb_verdal_ub_cut,'Trondelag/clipped_las/nsb_verdal_ub.las')
###########################################################################
#Selbu_flub
selbu_flub_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Selbu_flub.las')
selbu_flub_las
plot(selbu_flub_las)
selbu_flub_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Flb',4:5]))
selbu_flub_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Flb',4:5][selbu_flub_b_order,]))
selbu_flub_b_cut<-lasclip(selbu_flub_las,selbu_flub_b_poly)
selbu_flub_b_cut
plot(selbu_flub_b_cut)
selbu_flub_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Flub',4:5]))
selbu_flub_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Flub',4:5][selbu_flub_ub_order,]))
selbu_flub_ub_cut<-lasclip(selbu_flub_las,selbu_flub_ub_poly)
selbu_flub_ub_cut
plot(selbu_flub_ub_cut)
writeLAS(selbu_flub_b_cut,'Trondelag/clipped_las/selbu_flub_b.las')
writeLAS(selbu_flub_ub_cut,'Trondelag/clipped_las/selbu_flub_ub.las')
########################################################################################
#Selbu_kl
selbu_kl_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Selbu_kl.las')
selbu_kl_las
plot(selbu_kl_las)
selbu_kl_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Klb',4:5]))
selbu_kl_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Klb',4:5][selbu_kl_b_order,]))
#Make it a spatial polygon, and then expand polygon to include overhanging trees
selbu_kl_b_pl <- Polygons(list(selbu_kl_b_poly),1)
selbu_kl_b_sp <- SpatialPolygons(list(selbu_kl_b_pl))
selbu_kl_b_polybuf <- gBuffer(selbu_kl_b_sp, width=6)
#The polygon is now a spatial polygon, need to make it a SpatialPolygonsDataFrame
df1_klb<-data.frame(ID=1)
rownames(df1_klb)<-'buffer'
selbu_kl_b_spdf <- SpatialPolygonsDataFrame(selbu_kl_b_polybuf,data=df1,match.ID = TRUE)
#test <- (namdalseid_1kub_ub_outerpoly)
#selbu_kl_b_cut<-lasclip(selbu_kl_las,selbu_kl_b_poly)
#selbu_kl_b_cut
#plot(selbu_kl_b_cut)
selbu_kl_b_outerpoly<-lasclip(selbu_kl_las,selbu_kl_b_spdf)
selbu_kl_b_outerpoly<-selbu_kl_b_outerpoly$`1`
plot(selbu_kl_b_outerpoly)
selbu_kl_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Klub',4:5]))
selbu_kl_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Klub',4:5][selbu_kl_ub_order,]))
selbu_kl_ub_cut<-lasclip(selbu_kl_las,selbu_kl_ub_poly)
selbu_kl_ub_cut
plot(selbu_kl_ub_cut)
writeLAS(selbu_kl_b_outerpoly,'Trondelag/clipped_las/selbu_kl_b.las')
writeLAS(selbu_kl_ub_cut,'Trondelag/clipped_las/selbu_kl_ub.las')
############################################################################
#Selbu_sl
selbu_sl_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Selbu_sl.las')
selbu_sl_las
plot(selbu_sl_las)
selbu_sl_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Slb',4:5]))
selbu_sl_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Slb',4:5][selbu_sl_b_order,]))
selbu_sl_b_cut<-lasclip(selbu_sl_las,selbu_sl_b_poly)
selbu_sl_b_cut
plot(selbu_sl_b_cut)
selbu_sl_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Slub',4:5]))
selbu_sl_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Slub',4:5][selbu_sl_ub_order,]))
selbu_sl_ub_cut<-lasclip(selbu_sl_las,selbu_sl_ub_poly)
selbu_sl_ub_cut
plot(selbu_sl_ub_cut)
writeLAS(selbu_sl_b_cut,'Trondelag/clipped_las/selbu_sl_b.las')
writeLAS(selbu_sl_ub_cut,'Trondelag/clipped_las/selbu_sl_ub.las')
#############################################################################
#Singsaas
singsaas_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Singsaas.las')
singsaas_las
plot(singsaas_las)
singsaas_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Lab',4:5]))
singsaas_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Lab',4:5][singsaas_b_order,]))
singsaas_b_cut<-lasclip(singsaas_las,singsaas_b_poly)
singsaas_b_cut
plot(singsaas_b_cut)
singsaas_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Laub',4:5]))
singsaas_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Laub',4:5][singsaas_ub_order,]))
singsaas_ub_cut<-lasclip(singsaas_las,singsaas_ub_poly)
singsaas_ub_cut
plot(singsaas_ub_cut)
writeLAS(singsaas_b_cut,'Trondelag/clipped_las/singsaas_b.las')
writeLAS(singsaas_ub_cut,'Trondelag/clipped_las/singsaas_ub.las')
#################################################################################
#Sl_Tydal
sl_tydal_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Sl_Tydal.las')
sl_tydal_las
plot(sl_tydal_las) #all blue??
sl_tydal_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Seb',4:5]))
sl_tydal_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Seb',4:5][sl_tydal_b_order,]))
sl_tydal_b_cut<-lasclip(sl_tydal_las,sl_tydal_b_poly)
sl_tydal_b_cut
plot(sl_tydal_b_cut)
sl_tydal_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='Seub',4:5]))
sl_tydal_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='Seub',4:5][sl_tydal_ub_order,]))
sl_tydal_ub_cut<-lasclip(sl_tydal_las,sl_tydal_ub_poly)
sl_tydal_ub_cut
plot(sl_tydal_ub_cut)
writeLAS(sl_tydal_b_cut,'Trondelag/clipped_las/sl_tydal_b.las')
writeLAS(sl_tydal_ub_cut,'Trondelag/clipped_las/sl_tydal_ub.las')
#######################################################################
#Steinkjer_1BBb
steinkjer_1BBb_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Steinkjer_1BBb.las')
steinkjer_1BBb_las
plot(steinkjer_1BBb_las)
steinkjer_1BBb_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='1Bbb',4:5]))
steinkjer_1BBb_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='1Bbb',4:5][steinkjer_1BBb_b_order,]))
steinkjer_1BBb_b_cut<-lasclip(steinkjer_1BBb_las,steinkjer_1BBb_b_poly)
steinkjer_1BBb_b_cut
plot(steinkjer_1BBb_b_cut)
steinkjer_1BBb_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='1Bbub',4:5]))
steinkjer_1BBb_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='1Bbub',4:5][steinkjer_1BBb_ub_order,]))
steinkjer_1BBb_ub_cut<-lasclip(steinkjer_1BBb_las,steinkjer_1BBb_ub_poly)
steinkjer_1BBb_ub_cut
plot(steinkjer_1BBb_ub_cut)
writeLAS(steinkjer_1BBb_b_cut,'Trondelag/clipped_las/steinkjer_1BBb_b.las')
writeLAS(steinkjer_1BBb_ub_cut,'Trondelag/clipped_las/steinkjer_1BBb_ub.las')
#########################################################################################################
#Steinkjer_2BBb
steinkjer_2BBb_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Steinkjer_2BBb.las')
steinkjer_2BBb_las
plot(steinkjer_2BBb_las)
steinkjer_2BBb_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='2Bbb',4:5]))
steinkjer_2BBb_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='2Bbb',4:5][steinkjer_2BBb_b_order,]))
steinkjer_2BBb_b_cut<-lasclip(steinkjer_2BBb_las,steinkjer_2BBb_b_poly)
steinkjer_2BBb_b_cut
plot(steinkjer_2BBb_b_cut)
steinkjer_2BBb_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='2Bbub',4:5]))
steinkjer_2BBb_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='2Bbub',4:5][steinkjer_2BBb_ub_order,]))
steinkjer_2BBb_ub_cut<-lasclip(steinkjer_2BBb_las,steinkjer_2BBb_ub_poly)
steinkjer_2BBb_ub_cut
plot(steinkjer_2BBb_ub_cut)
writeLAS(steinkjer_2BBb_b_cut,'Trondelag/clipped_las/steinkjer_2BBb_b.las')
writeLAS(steinkjer_2BBb_ub_cut,'Trondelag/clipped_las/steinkjer_2BBb_ub.las')
##########################################################################################
# Sub_Namdalseid
sub_namdalseid_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Sub_Namdalseid.las')
sub_namdalseid_las
plot(sub_namdalseid_las)
sub_namdalseid_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='1Sb',4:5]))
sub_namdalseid_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='1Sb',4:5][sub_namdalseid_b_order,]))
sub_namdalseid_b_cut<-lasclip(sub_namdalseid_las,sub_namdalseid_b_poly)
sub_namdalseid_b_cut
plot(sub_namdalseid_b_cut)
sub_namdalseid_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='1Sub',4:5]))
sub_namdalseid_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='1Sub',4:5][sub_namdalseid_ub_order,]))
sub_namdalseid_ub_cut<-lasclip(sub_namdalseid_las,sub_namdalseid_ub_poly)
sub_namdalseid_ub_cut
plot(sub_namdalseid_ub_cut)
writeLAS(sub_namdalseid_b_cut,'Trondelag/clipped_las/sub_namdalseid_b.las')
writeLAS(sub_namdalseid_ub_cut,'Trondelag/clipped_las/sub_namdalseid_ub.las')
########################################################################################
#Verdal_1vb
verdal_1vb_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Verdal_1vb.las')
verdal_1vb_las
plot(verdal_1vb_las)
verdal_1vb_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='1Vbb',4:5]))
verdal_1vb_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='1Vbb',4:5][verdal_1vb_b_order,]))
verdal_1vb_b_cut<-lasclip(verdal_1vb_las,verdal_1vb_b_poly)
verdal_1vb_b_cut
plot(verdal_1vb_b_cut)
verdal_1vb_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='1Vbub',4:5]))
verdal_1vb_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='1Vbub',4:5][verdal_1vb_ub_order,]))
verdal_1vb_ub_cut<-lasclip(verdal_1vb_las,verdal_1vb_ub_poly)
verdal_1vb_ub_cut
plot(verdal_1vb_ub_cut)
writeLAS(verdal_1vb_b_cut,'Trondelag/clipped_las/verdal_1vb_b.las')
writeLAS(verdal_1vb_ub_cut,'Trondelag/clipped_las/verdal_1vb_ub.las')
#########################################################################################
#Verdal_2vb
verdal_2vb_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Trondelag/Verdal_2vb.las')
verdal_2vb_las
plot(verdal_2vb_las)
verdal_2vb_b_order<-chull(as.matrix(plotcoords[plotcoords$Name=='2Vbb',4:5]))
verdal_2vb_b_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='2Vbb',4:5][verdal_2vb_b_order,]))
verdal_2vb_b_cut<-lasclip(verdal_2vb_las,verdal_2vb_b_poly)
verdal_2vb_b_cut
plot(verdal_2vb_b_cut)
verdal_2vb_ub_order<-chull(as.matrix(plotcoords[plotcoords$Name=='2Vbub',4:5]))
verdal_2vb_ub_poly<-Polygon(as.matrix(plotcoords[plotcoords$Name=='2Vbub',4:5][verdal_2vb_ub_order,]))
verdal_2vb_ub_cut<-lasclip(verdal_2vb_las,verdal_2vb_ub_poly)
verdal_2vb_ub_cut
plot(verdal_2vb_ub_cut)
writeLAS(verdal_2vb_b_cut,'Trondelag/clipped_las/verdal_2vb_b.las')
writeLAS(verdal_2vb_ub_cut,'Trondelag/clipped_las/verdal_2vb_ub.las')
##############################TELEMARK#################################################################
#Import plot coords
plotcoords_telemark<-read.csv('Koordinater_20x20_Telemark.csv',header=T,sep=';',dec=',')
#Fritsoe1
fritsoe1_1FR_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Fritsoe1.las')
fritsoe1_1FR_las
plot(fritsoe1_1FR_las)
fritsoel_1FR_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fritzøe 1 B',10:9]))
fritsoel_1FR_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fritzøe 1 B',10:9][fritsoel_1FR_b_order,]))
fritsoel_1FR_b_cut<-lasclip(fritsoe1_1FR_las,fritsoel_1FR_b_poly)
fritsoel_1FR_b_cut
plot(fritsoel_1FR_b_cut)
fritsoel_1FR_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fritzøe 1 UB',10:9]))
fritsoel_1FR_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fritzøe 1 UB',10:9][fritsoel_1FR_ub_order,]))
fritsoel_1FR_ub_cut<-lasclip(fritsoe1_1FR_las,fritsoel_1FR_ub_poly)
fritsoel_1FR_ub_cut
plot(fritsoel_1FR_ub_cut)
writeLAS(fritsoel_1FR_b_cut,'Telemark/clipped_las/fritsoel_1FR_b.las')
writeLAS(fritsoel_1FR_ub_cut,'Telemark/clipped_las/fritsoel_1FR_ub.las')
######################################################################################
#Fritsoe2
fritsoe2_2FR_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Fritsoe2.las')
fritsoe2_2FR_las
plot(fritsoe2_2FR_las)
fritsoe2_2FR_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fritzøe 2 B',10:9]))
fritsoe2_2FR_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fritzøe 2 B',10:9][fritsoe2_2FR_b_order,]))
fritsoe2_2FR_b_cut<-lasclip(fritsoe2_2FR_las,fritsoe2_2FR_b_poly)
fritsoe2_2FR_b_cut
plot(fritsoe2_2FR_b_cut)
fritsoe2_2FR_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fritzøe 2 UB',10:9]))
fritsoe2_2FR_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fritzøe 2 UB',10:9][fritsoe2_2FR_ub_order,]))
fritsoe2_2FR_ub_cut<-lasclip(fritsoe2_2FR_las,fritsoe2_2FR_ub_poly)
fritsoe2_2FR_ub_cut
plot(fritsoe2_2FR_ub_cut)
writeLAS(fritsoe2_2FR_b_cut,'Telemark/clipped_las/fritsoe2_2FR_b.las')
writeLAS(fritsoe2_2FR_ub_cut,'Telemark/clipped_las/fritsoe2_2FR_ub.las')
##############################################################################
#Nome_Cappelen_1
nome_cappelen_1_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Nome_Cappelen1.las')
nome_cappelen_1_las
plot(nome_cappelen_1_las)
nome_cappelen_1_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Cappelen 1 B',10:9]))
nome_cappelen_1_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Cappelen 1 B',10:9][nome_cappelen_1_b_order,]))
nome_cappelen_1_b_cut<-lasclip(nome_cappelen_1_las,nome_cappelen_1_b_poly)
nome_cappelen_1_b_cut
plot(nome_cappelen_1_b_cut)
nome_cappelen_1_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Cappelen 1 UB',10:9]))
nome_cappelen_1_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Cappelen 1 UB',10:9][nome_cappelen_1_ub_order,]))
nome_cappelen_1_ub_cut<-lasclip(nome_cappelen_1_las,nome_cappelen_1_ub_poly)
nome_cappelen_1_ub_cut
plot(nome_cappelen_1_ub_cut)
writeLAS(nome_cappelen_1_b_cut,'Telemark/clipped_las/nome_cappelen_1_b.las')
writeLAS(nome_cappelen_1_ub_cut,'Telemark/clipped_las/nome_cappelen_1_ub.las')
#####################################################################################################################
#Notodden 1 Something wrong with the las file
#notodden1_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Notodden1_new.las')
#notodden1_las
#plot(notodden1_las)
#notodden1_las@data$Z[notodden1_las@data$Z<300]<-NA
#notodden1_las@data$Z[notodden1_las@data$Z>600]<-NA
#notodden1_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 1 B',10:9]))
#notodden1_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 1 B',10:9][notodden1_b_order,]))
#notodden1_b_cut<-lasclip(notodden1_las,notodden1_b_poly)
#notodden1_b_cut
#plot(notodden1_b_cut)
#notodden1_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 1 UB',10:9]))
#notodden1_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 1 UB',10:9][notodden1_ub_order,]))
#notodden1_ub_cut<-lasclip(notodden1_las,notodden1_ub_poly)
#notodden1_ub_cut
#plot(notodden1_ub_cut)
#writeLAS(notodden1_b_cut,'Telemark/clipped_las/notodden1_b.las')
#writeLAS(notodden1_ub_cut,'Telemark/clipped_las/notodden1_ub.las')
#########################################################################################################
#Notodden3
notodden3_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Notodden3.las')
notodden3_las
plot(notodden3_las)
notodden3_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 3 B',10:9]))
notodden3_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 3 B',10:9][notodden3_b_order,]))
notodden3_b_cut<-lasclip(notodden3_las,notodden3_b_poly)
notodden3_b_cut
plot(notodden3_b_cut)
notodden3_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 3 UB',10:9]))
notodden3_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 3 UB',10:9][notodden3_ub_order,]))
notodden3_ub_cut<-lasclip(notodden3_las,notodden3_ub_poly)
notodden3_ub_cut
plot(notodden3_ub_cut)
writeLAS(notodden3_b_cut,'Telemark/clipped_las/notodden3_b.las')
writeLAS(notodden3_ub_cut,'Telemark/clipped_las/notodden3_ub.las')
############################################################################################
#Notodden 4
#notodden4_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Notodden4_new.las')
#notodden4_las
#plot(notodden4_las) #helt blått,med noen røde prikker høyt oppe
#notodden4_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 4 B',10:9]))
#notodden4_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 4 B',10:9][notodden4_b_order,]))
#notodden4_b_cut<-lasclip(notodden4_las,notodden4_b_poly)
#notodden4_b_cut
#plot(notodden4_b_cut)#ser normal ut, normalt areal
#notodden4_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 4 UB',10:9]))
#notodden4_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 4 UB',10:9][notodden4_ub_order,]))
#notodden4_ub_cut<-lasclip(notodden4_las,notodden4_ub_poly)
#notodden4_ub_cut
#plot(notodden4_ub_cut) #areal 182m^2?
#writeLAS(notodden4_b_cut,'Telemark/clipped_las/notodden4_b.las')
#writeLAS(notodden4_ub_cut,'Telemark/clipped_las/notodden4_ub.las')
###############################################################################################
#Notodden 5
notodden5_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Notodden5.las')
notodden5_las
plot(notodden5_las)
notodden5_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 5 B',10:9]))
notodden5_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 5 B',10:9][notodden5_b_order,]))
notodden5_b_cut<-lasclip(notodden5_las,notodden5_b_poly)
notodden5_b_cut
plot(notodden5_b_cut)
notodden5_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 5 UB',10:9]))
notodden5_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 5 UB',10:9][notodden5_ub_order,]))
notodden5_ub_cut<-lasclip(notodden5_las,notodden5_ub_poly)
notodden5_ub_cut
plot(notodden5_ub_cut)
writeLAS(notodden5_b_cut,'Telemark/clipped_las/notodden5_b.las')
writeLAS(notodden5_ub_cut,'Telemark/clipped_las/notodden5_ub.las')
####################################################################################
#Notodden 6
notodden6_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Notodden6.las')
notodden6_las
plot(notodden6_las)
notodden6_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 6 B',10:9]))
notodden6_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 6 B',10:9][notodden6_b_order,]))
notodden6_b_cut<-lasclip(notodden6_las,notodden6_b_poly)
notodden6_b_cut
plot(notodden6_b_cut)
notodden6_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 6 UB',10:9]))
notodden6_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Notodden 6 UB',10:9][notodden6_ub_order,]))
notodden6_ub_cut<-lasclip(notodden6_las,notodden6_ub_poly)
notodden6_ub_cut
plot(notodden6_ub_cut)
writeLAS(notodden6_b_cut,'Telemark/clipped_las/notodden6_b.las')
writeLAS(notodden6_ub_cut,'Telemark/clipped_las/notodden6_ub.las')
##################################################################################
#Drangedal1
drangedal1_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Drangedal1.las')
drangedal1_las
plot(drangedal1_las)
drangedal1_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 1 B',10:9]))
drangedal1_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 1 B',10:9][drangedal1_b_order,]))
drangedal1_b_cut<-lasclip(drangedal1_las,drangedal1_b_poly)
drangedal1_b_cut
plot(drangedal1_b_cut)
drangedal1_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 1 UB',10:9]))
drangedal1_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 1 UB',10:9][drangedal1_ub_order,]))
drangedal1_ub_cut<-lasclip(drangedal1_las,drangedal1_ub_poly)
drangedal1_ub_cut
plot(drangedal1_ub_cut)
writeLAS(drangedal1_b_cut,'Telemark/clipped_las/drangedal1_b.las')
writeLAS(drangedal1_ub_cut,'Telemark/clipped_las/drangedal1_ub.las')
#####################################################################################
#Drangedal 3
drangedal3_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Drangedal2.las') #lagret med annet navn på disk, men vi har ikke noe Drangedal 2 egentlig
drangedal3_las
plot(drangedal3_las)
drangedal3_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 3 B',10:9]))
drangedal3_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 3 B',10:9][drangedal3_b_order,]))
drangedal3_b_cut<-lasclip(drangedal3_las,drangedal3_b_poly)
drangedal3_b_cut
plot(drangedal3_b_cut)
drangedal3_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 3 UB',10:9]))
drangedal3_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 3 UB',10:9][drangedal3_ub_order,]))
drangedal3_ub_cut<-lasclip(drangedal3_las,drangedal3_ub_poly)
drangedal3_ub_cut
plot(drangedal3_ub_cut)
writeLAS(drangedal3_b_cut,'Telemark/clipped_las/drangedal3_b.las')
writeLAS(drangedal3_ub_cut,'Telemark/clipped_las/drangedal3_ub.las')
############################################################################################
#Drangedal 4
drangedal4_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Drangedal4.las') #lagret med annet navn på disk, men vi har ikke noe Drangedal 2 egentlig
drangedal4_las
plot(drangedal4_las)
drangedal4_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 4 B',10:9]))
drangedal4_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 4 B',10:9][drangedal4_b_order,]))
drangedal4_b_cut<-lasclip(drangedal4_las,drangedal4_b_poly)
drangedal4_b_cut
plot(drangedal4_b_cut)
drangedal4_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 4 UB',10:9]))
drangedal4_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Drangedal 4 UB',10:9][drangedal4_ub_order,]))
drangedal4_ub_cut<-lasclip(drangedal4_las,drangedal4_ub_poly)
drangedal4_ub_cut
plot(drangedal4_ub_cut)
writeLAS(drangedal4_b_cut,'Telemark/clipped_las/drangedal4_b.las')
writeLAS(drangedal4_ub_cut,'Telemark/clipped_las/drangedal4_ub.las')
############################################################################################
#Nome_Cappelen_2
nome_cappelen_2_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Nome_Cappelen2.las')
nome_cappelen_2_las
plot(nome_cappelen_2_las)
nome_cappelen_2_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Cappelen 2 B',10:9]))
nome_cappelen_2_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Cappelen 2 B',10:9][nome_cappelen_2_b_order,]))
nome_cappelen_2_b_cut<-lasclip(nome_cappelen_2_las,nome_cappelen_2_b_poly)
nome_cappelen_2_b_cut
plot(nome_cappelen_2_b_cut)
nome_cappelen_2_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Cappelen 2 UB',10:9]))
nome_cappelen_2_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Cappelen 2 UB',10:9][nome_cappelen_2_ub_order,]))
nome_cappelen_2_ub_cut<-lasclip(nome_cappelen_2_las,nome_cappelen_2_ub_poly)
nome_cappelen_2_ub_cut
plot(nome_cappelen_2_ub_cut)
writeLAS(nome_cappelen_2_b_cut,'Telemark/clipped_las/nome_cappelen_2_b.las')
writeLAS(nome_cappelen_2_ub_cut,'Telemark/clipped_las/nome_cappelen_2_ub.las')
###########################################################################################
#Kviteseid1
kviteseid1_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Kviteseid1.las')
kviteseid1_las
plot(kviteseid1_las)
kviteseid1_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 1 B',10:9]))
kviteseid1_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 1 B',10:9][kviteseid1_b_order,]))
kviteseid1_b_cut<-lasclip(kviteseid1_las,kviteseid1_b_poly)
kviteseid1_b_cut
plot(kviteseid1_b_cut)
kviteseid1_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 1 UB',10:9]))
kviteseid1_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 1 UB',10:9][kviteseid1_ub_order,]))
kviteseid1_ub_cut<-lasclip(kviteseid1_las,kviteseid1_ub_poly)
kviteseid1_ub_cut
plot(kviteseid1_ub_cut)
writeLAS(kviteseid1_b_cut,'Telemark/clipped_las/kviteseid1_b.las')
writeLAS(kviteseid1_ub_cut,'Telemark/clipped_las/kviteseid1_ub.las')
#####################################################################################
#Kviteseid2
kviteseid2_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Kviteseid2.las')
kviteseid2_las
plot(kviteseid2_las)
kviteseid2_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 2 B',10:9]))
kviteseid2_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 2 B',10:9][kviteseid2_b_order,]))
kviteseid2_b_cut<-lasclip(kviteseid2_las,kviteseid2_b_poly)
kviteseid2_b_cut
plot(kviteseid2_b_cut)
kviteseid2_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 2 UB',10:9]))
kviteseid2_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 2 UB',10:9][kviteseid2_ub_order,]))
kviteseid2_ub_cut<-lasclip(kviteseid2_las,kviteseid2_ub_poly)
kviteseid2_ub_cut
plot(kviteseid2_ub_cut)
writeLAS(kviteseid2_b_cut,'Telemark/clipped_las/kviteseid2_b.las')
writeLAS(kviteseid2_ub_cut,'Telemark/clipped_las/kviteseid2_ub.las')
#####################################################################################
#Kviteseid3
kviteseid3_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Kviteseid3.las')
kviteseid3_las
plot(kviteseid3_las)
kviteseid3_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 3 B',10:9]))
kviteseid3_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 3 B',10:9][kviteseid3_b_order,]))
kviteseid3_b_cut<-lasclip(kviteseid3_las,kviteseid3_b_poly)
kviteseid3_b_cut
plot(kviteseid3_b_cut)
kviteseid3_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 3 UB',10:9]))
kviteseid3_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Kviteseid 3 UB',10:9][kviteseid3_ub_order,]))
kviteseid3_ub_cut<-lasclip(kviteseid3_las,kviteseid3_ub_poly)
kviteseid3_ub_cut
plot(kviteseid3_ub_cut)
writeLAS(kviteseid3_b_cut,'Telemark/clipped_las/kviteseid3_b.las')
writeLAS(kviteseid3_ub_cut,'Telemark/clipped_las/kviteseid3_ub.las')
########################################################################################
#Furesdal
Furesdal_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Telemark/Furesdal.las')
Furesdal_las
plot(Furesdal_las)
Furesdal_b_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fyresdal 1 B',10:9]))
Furesdal_b_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fyresdal 1 B',10:9][Furesdal_b_order,]))
Furesdal_b_cut<-lasclip(Furesdal_las,Furesdal_b_poly)
Furesdal_b_cut
plot(Furesdal_b_cut)
Furesdal_ub_order<-chull(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fyresdal 1 UB',10:9]))
Furesdal_ub_poly<-Polygon(as.matrix(plotcoords_telemark[plotcoords_telemark$flatenavn=='Fyresdal 1 UB',10:9][Furesdal_ub_order,]))
Furesdal_ub_cut<-lasclip(Furesdal_las,Furesdal_ub_poly)
Furesdal_ub_cut
plot(Furesdal_ub_cut)
writeLAS(Furesdal_b_cut,'Telemark/clipped_las/Furesdal_b.las')
writeLAS(Furesdal_ub_cut,'Telemark/clipped_las/Furesdal_ub.las')
#########################HEDMARK_AKERSHUS#############################################################
#Import plot coords
plotcoords_hedmark_akershus<-read.csv('data/Koordinater_20x20_Hedmark_Akershus.csv',header=T,sep=';',dec=',')
#Didrik Holmsen
didrik_holmsen_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Hedmark_Akershus/Didrik_Holmsen.las')
didrik_holmsen_las
plot(didrik_holmsen_las)
didrik_holmsen_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='DH2',15:14]))
didrik_holmsen_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='DH2',15:14][didrik_holmsen_b_order,]))
didrik_holmsen_b_cut<-lasclip(didrik_holmsen_las,didrik_holmsen_b_poly)
didrik_holmsen_b_cut
plot(didrik_holmsen_b_cut)
didrik_holmsen_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='DH1',15:14]))
didrik_holmsen_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='DH1',15:14][didrik_holmsen_ub_order,]))
didrik_holmsen_ub_cut<-lasclip(didrik_holmsen_las,didrik_holmsen_ub_poly)
didrik_holmsen_ub_cut
plot(didrik_holmsen_ub_cut)
writeLAS(didrik_holmsen_b_cut,'Hedmark_Akershus/clipped_las/didrik_holmsen_b.las')
writeLAS(didrik_holmsen_ub_cut,'Hedmark_Akershus/clipped_las/didrik_holmsen_ub.las')
######################################################################################################
# Stangeskovene Aurskog
stangeskovene_aurskog_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Hedmark_Akershus/Stangeskovene_Aurskog.las')
stangeskovene_aurskog_las
plot(stangeskovene_aurskog_las)
stangeskovene_aurskog_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SSA1',15:14]))
stangeskovene_aurskog_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SSA1',15:14][stangeskovene_aurskog_b_order,]))
stangeskovene_aurskog_b_cut<-lasclip(stangeskovene_aurskog_las,stangeskovene_aurskog_b_poly)
stangeskovene_aurskog_b_cut
plot(stangeskovene_aurskog_b_cut)
stangeskovene_aurskog_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SSA2',15:14]))
stangeskovene_aurskog_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SSA2',15:14][stangeskovene_aurskog_ub_order,]))
stangeskovene_aurskog_ub_cut<-lasclip(stangeskovene_aurskog_las,stangeskovene_aurskog_ub_poly)
stangeskovene_aurskog_ub_cut
plot(stangeskovene_aurskog_ub_cut)
writeLAS(stangeskovene_aurskog_b_cut,'Hedmark_Akershus/clipped_las/stangeskovene_aurskog_b.las')
writeLAS(stangeskovene_aurskog_ub_cut,'Hedmark_Akershus/clipped_las/stangeskovene_aurskog_ub.las')
#######################################################################################################
#Stig Dæhlen
stig_dahlen_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Hedmark_Akershus/Stig_Dahlen.las')
stig_dahlen_las
plot(stig_dahlen_las)
stig_dahlen_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SD2',15:14]))
stig_dahlen_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SD2',15:14][stig_dahlen_b_order,]))
stig_dahlen_b_cut<-lasclip(stig_dahlen_las,stig_dahlen_b_poly)
stig_dahlen_b_cut
plot(stig_dahlen_b_cut)
stig_dahlen_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SD1',15:14]))
stig_dahlen_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SD1',15:14][stig_dahlen_ub_order,]))
stig_dahlen_ub_cut<-lasclip(stig_dahlen_las,stig_dahlen_ub_poly)
stig_dahlen_ub_cut
plot(stig_dahlen_ub_cut)
writeLAS(stig_dahlen_b_cut,'Hedmark_Akershus/clipped_las/stig_dahlen_b.las')
writeLAS(stig_dahlen_ub_cut,'Hedmark_Akershus/clipped_las/stig_dahlen_ub.las')
##################################################################################################
#Truls Holm
truls_holm_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Hedmark_Akershus/Truls_Holm.las')
truls_holm_las
plot(truls_holm_las)
truls_holm_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='TH1',15:14]))
truls_holm_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='TH1',15:14][truls_holm_b_order,]))
truls_holm_b_cut<-lasclip(truls_holm_las,truls_holm_b_poly)
truls_holm_b_cut
plot(truls_holm_b_cut)
truls_holm_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='TH2',15:14]))
truls_holm_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='TH2',15:14][truls_holm_ub_order,]))
truls_holm_ub_cut<-lasclip(truls_holm_las,truls_holm_ub_poly)
truls_holm_ub_cut
plot(truls_holm_ub_cut)
writeLAS(truls_holm_b_cut,'Hedmark_Akershus/clipped_las/truls_holm_b.las')
writeLAS(truls_holm_ub_cut,'Hedmark_Akershus/clipped_las/truls_holm_ub.las')
####################################################################################################
#Fet3
fet3_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Hedmark_Akershus/Fet3.las')
fet3_las
plot(fet3_las) #helt blått terreng, noen røde prikker høyt oppe -fugler?
fet3_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='FK2',15:14]))
fet3_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='FK2',15:14][fet3_b_order,]))
fet3_b_cut<-lasclip(fet3_las,fet3_b_poly)
fet3_b_cut
plot(fet3_b_cut)
fet3_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='FK1',15:14]))
fet3_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='FK1',15:14][fet3_ub_order,]))
fet3_ub_cut<-lasclip(fet3_las,fet3_ub_poly)
fet3_ub_cut
plot(fet3_ub_cut) #flatenes vi undersøker ser ok ut
writeLAS(fet3_b_cut,'Hedmark_Akershus/clipped_las/fet3_b.las')
writeLAS(fet3_ub_cut,'Hedmark_Akershus/clipped_las/fet3_ub.las')
#####################################################################################################
#Eidskog
eidskog_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Hedmark_Akershus/Eidskog.las')
eidskog_las
plot(eidskog_las) #helt blå flate
eidskog_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='STSKN2',15:14]))
eidskog_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='STSKN2',15:14][eidskog_b_order,]))
eidskog_b_cut<-lasclip(eidskog_las,eidskog_b_poly)
eidskog_b_cut
plot(eidskog_b_cut)
eidskog_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='STSKN1',15:14]))
eidskog_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='STSKN1',15:14][eidskog_ub_order,]))
eidskog_ub_cut<-lasclip(eidskog_las,eidskog_ub_poly)
eidskog_ub_cut
plot(eidskog_ub_cut)
writeLAS(eidskog_b_cut,'Hedmark_Akershus/clipped_las/eidskog_b.las')
writeLAS(eidskog_ub_cut,'Hedmark_Akershus/clipped_las/eidskog_ub.las')
################################################################################################
#Halvard Pramhus
halvard_pramhus_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Hedmark_Akershus/Halvard_Pramhus.las')
halvard_pramhus_las
plot(halvard_pramhus_las)
halvard_pramhus_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='HP1',15:14]))
halvard_pramhus_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='HP1',15:14][halvard_pramhus_b_order,]))
halvard_pramhus_b_cut<-lasclip(halvard_pramhus_las,halvard_pramhus_b_poly)
halvard_pramhus_b_cut
plot(halvard_pramhus_b_cut)
halvard_pramhus_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='HP2',15:14]))
halvard_pramhus_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='HP2',15:14][halvard_pramhus_ub_order,]))
halvard_pramhus_ub_cut<-lasclip(halvard_pramhus_las,halvard_pramhus_ub_poly)
halvard_pramhus_ub_cut
plot(halvard_pramhus_ub_cut)
#1 outlying very high point (like a bird?)-Fix in canopy terrain instead
#summary(halvard_pramhus_ub_cut@data$Z)
#Set the outliers to NA
#halvard_pramhus_ub_cut@data$Z[halvard_pramhus_ub_cut@data$Z>500]<-NA
#hist(halvard_pramhus_ub_cut@data$Z)
#plot(halvard_pramhus_ub_cut)
writeLAS(halvard_pramhus_b_cut,'Hedmark_Akershus/clipped_las/halvard_pramhus_b.las')
writeLAS(halvard_pramhus_ub_cut,'Hedmark_Akershus/clipped_las/halvard_pramhus_ub.las')
################################################################################################
#Stangeskovene Eidskog
stangeskovene_eidskog_las <- readLAS('C:/Users/Ingrid/Documents/Master - Sustherb/orginale_las/Hedmark_Akershus/Stangeskovene_Eidskog.las')
stangeskovene_eidskog_las
plot(stangeskovene_eidskog_las) #helt blå
stangeskovene_eidskog_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SSB1',15:14]))
stangeskovene_eidskog_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SSB1',15:14][stangeskovene_eidskog_b_order,]))
stangeskovene_eidskog_b_cut<-lasclip(stangeskovene_eidskog_las,stangeskovene_eidskog_b_poly)
stangeskovene_eidskog_b_cut
plot(stangeskovene_eidskog_b_cut)
stangeskovene_eidskog_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SSB2',15:14]))
stangeskovene_eidskog_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SSB2',15:14][stangeskovene_eidskog_ub_order,]))
stangeskovene_eidskog_ub_cut<-lasclip(stangeskovene_eidskog_las,stangeskovene_eidskog_ub_poly)
stangeskovene_eidskog_ub_cut
plot(stangeskovene_eidskog_ub_cut)
writeLAS(stangeskovene_eidskog_b_cut,'Hedmark_Akershus/clipped_las/stangeskovene_eidskog_b.las')
writeLAS(stangeskovene_eidskog_ub_cut,'Hedmark_Akershus/clipped_las/stangeskovene_eidskog_ub.las')
################################################################################################
#New sites added Oct 2019###
################################################################################################
#Sørum 1
sorem1_las <- readLAS('T:\\vm\\inh\\botanisk\\Bruker\\James\\Ingrid LAS files\\hedmark_new_las_version\\SKB_2013_2p_0_5m\\121\\data\\eksport_193575_121_1.laz')
sorem1_las
plot(sorem1_las)
sorem1_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SK2',15:14]))
sorem1_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SK2',15:14][sorem1_b_order,]))
sorem1_b_cut<-lasclip(sorem1_las,sorem1_b_poly)
sorem1_b_cut
plot(sorem1_b_cut)
sorem1_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SK1',15:14]))
sorem1_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='SK1',15:14][sorem1_ub_order,]))
sorem1_ub_cut<-lasclip(sorem1_las,sorem1_ub_poly)
sorem1_ub_cut
plot(sorem1_ub_cut)
writeLAS(sorem1_b_cut,'data/clipped_las/sorem1_b.las')
writeLAS(sorem1_ub_cut,'data/clipped_las/sorem1_ub.las')
################################################################################################
#Nes 1
nes1_las <- readLAS('T:\\vm\\inh\\botanisk\\Bruker\\James\\Ingrid LAS files\\hedmark_new_las_version\\DDB_2019_5p_0_25m\\1097\\data\\eksport_193592_1097_1.laz')
nes1_las
plot(nes1_las)
nes1_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='DD2',15:14]))
nes1_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='DD2',15:14][nes1_b_order,]))
nes1_b_cut<-lasclip(nes1_las,nes1_b_poly)
nes1_b_cut
plot(nes1_b_cut)
nes1_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='DD1',15:14]))
nes1_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='DD1',15:14][nes1_ub_order,]))
nes1_ub_cut<-lasclip(nes1_las,nes1_ub_poly)
nes1_ub_cut
plot(nes1_ub_cut)
writeLAS(nes1_b_cut,'data/clipped_las/nes1_b.las')
writeLAS(nes1_ub_cut,'data/clipped_las/nes1_ub.las')
################################################################################################
#Nes 2
nes2_las <- readLAS('T:\\vm\\inh\\botanisk\\Bruker\\James\\Ingrid LAS files\\hedmark_new_las_version\\OLB_2019_5p_0_25m\\1097\\data\\eksport_193604_1097_1.laz')
nes2_las
plot(nes2_las)
nes2_b_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='OL2',15:14]))
nes2_b_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='OL2',15:14][nes2_b_order,]))
nes2_b_cut<-lasclip(nes2_las,nes2_b_poly)
nes2_b_cut
plot(nes2_b_cut)
nes2_ub_order<-chull(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='OL1',15:14]))
nes2_ub_poly<-Polygon(as.matrix(plotcoords_hedmark_akershus[plotcoords_hedmark_akershus$Uthegningi=='OL1',15:14][nes2_ub_order,]))
nes2_ub_cut<-lasclip(nes2_las,nes2_ub_poly)
nes2_ub_cut
plot(nes2_ub_cut)
writeLAS(nes2_b_cut,'data/clipped_las/nes2_b.las')
writeLAS(nes2_ub_cut,'data/clipped_las/nes2_ub.las')
|
07d1fcaf9f4c2404a165c2bfb95a7fd416de8213
|
9a48ef3292c5a550effd3b7e9557f6db7a607678
|
/sclust-smc-het/sclust_reader_snv_mutect.R
|
35d14c6447a78b412116b173ab373e2975c16c9e
|
[] |
no_license
|
smc-het-challenge/6181088
|
1133f0c973e3077243e1240e67ebd273f502895e
|
e927205287cbc2681922fb662308fe29a2d1598b
|
refs/heads/main
| 2023-04-24T13:21:16.428952
| 2021-05-06T23:56:18
| 2021-05-06T23:56:18
| 365,068,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,432
|
r
|
sclust_reader_snv_mutect.R
|
# =============================================================================
# Title: Sclust (for the SMC-Het Challenge)
# Name: sclust_reader_snv_smc-het.R
# Author: Tsun-Po Yang (tyang2@uni-koeln.de)
# Last Modified: 15/06/16
# =============================================================================
# -----------------------------------------------------------------------------
# Sclust - 1. SNV reader - Preprocessing SMC-Het format
# -----------------------------------------------------------------------------
initSNV <- function(sample.snv) {
snv <- data.frame(matrix(".", nrow(sample.snv), 8))
names(snv) <- c("#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO")
snv[,1] <- paste("chr", sample.snv$CHROM, sep="")
snv$POS <- sample.snv$POS
snv$ID <- sample.snv$ID
snv$REF <- sample.snv$REF
snv$ALT <- sample.snv$ALT
snv$QUAL <- 255
snv$FILTER <- "PASS"
return(snv);
}
snvInfoDP <- function(format) {
format1 <- unlist(strsplit(format, ":"))
format2 <- unlist(strsplit(format1[2], ","))
ref <- as.numeric(format2[1])
alt <- as.numeric(format2[2])
return(alt + ref)
}
snvInfoAF <- function(format) {
format1 <- unlist(strsplit(format, ":"))
format2 <- unlist(strsplit(format1[2], ","))
ref <- as.numeric(format2[1])
alt <- as.numeric(format2[2])
return( round( alt / (alt + ref), 7) );
}
##
## Main
args <- commandArgs(T)
sample <- args[1]
if (length(readLines(sample)) != 0) {
sample.snv <- read.table(sample, header=F, sep="\t", fill=T, as.is=T, comment.char="#")
names(sample.snv) <- c("CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "NORMAL", "TUMOR")
snv <- initSNV(sample.snv)
## Read depth
snv$INFO <- paste("DP=", mapply(v = 1:nrow(snv), function(v) snvInfoDP(sample.snv[v, 11])), sep="")
snv$INFO <- paste(snv$INFO, "DP_N=.", sep=";")
## AF
snv$INFO <- paste(snv$INFO, paste("AF=", mapply(v = 1:nrow(snv), function(v) snvInfoAF(sample.snv[v, 11])), sep=""), sep=";")
snv$INFO <- paste(snv$INFO, "AF_N=.", sep=";")
## FR and TG
snv$INFO <- paste(snv$INFO, "FR=.", sep=";")
snv$INFO <- paste(snv$INFO, "TG=.", sep=";")
write.table(snv[,1:8], "sclust.vcf", col.names=names(snv[,1:8]), row.names=F, quote=F, sep="\t")
write.table(paste(snv[,1], snv[,2], sep=":"), "sclust_position.txt", col.names=F, row.names=F, quote=F, sep="\t")
}
|
78ea0ea2f0291cac3aadc9b6a40464c8128d45b6
|
29f8f3ee59c366ea408633d183614bc39b49b26d
|
/Duke_DGHI/br_zikamicrogir_code.R
|
364eee24d0bb4db7cdace82089a5be40d54c47b2
|
[] |
no_license
|
souzajvp/analytical_codes
|
92db345dc75f128c2f25fb7b28f0891139ffea98
|
dcc49662253ba1dbd4f54b8c4caea40232632783
|
refs/heads/master
| 2023-05-23T06:06:12.058469
| 2021-06-07T18:11:00
| 2021-06-07T18:11:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,197
|
r
|
br_zikamicrogir_code.R
|
library("plyr")
library("lubridate")
#WEEKS
data_zika1<-read.csv("/Users/joaovissoci/Box Sync/Home Folder jnv4/Data/Global EM/Brazil/zika_gis/br_zikaepi_data.csv")
data_micro<-read.csv("/Users/joaovissoci/Downloads/microcefalia.csv")
data_nascvivos<-read.csv("/Users/joaovissoci/Desktop/nascvivos.csv")
data_zika_weeks_merged0<-merge(x = data_zika1,
y = data_micro,
by = "X...IBGE_6",
all.x = TRUE)
data_zika_weeks_merged<-merge(x = data_zika_weeks_merged0,
y = data_nascvivos,
by = "X...IBGE_6",
all.x = TRUE)
#adjusting ZIka data
data_zika_weeks_clean<-remove.vars(data_zika_weeks_merged,
c("X...IBGE_6",
"REGIAO",
"UF",
"MUNICIPIO",
"ACUM_ZIKA_TRIM1",
"ACUM_ZIKA_TRIM2",
"ACUM_ZIKA_TRIM3",
"ACUM_ZIKA_TRIM4",
"POP_ZIKA",
"INCI_ZIKA",
"nascvivos"))
#transforming NAs to 0
NAto0<-function(x){
car::recode(x,"NA=0")
}
#applying function
data_zika_weeks_clean<-lapply(data_zika_weeks_clean,NAto0)
data_zika_weeks_clean<-as.data.frame(data_zika_weeks_clean) #return to data,frame format
#normalize by population
normalized_zika<-lapply(data_zika_weeks_clean[,-c(53:96)],
function(i) (i/data_zika_weeks_merged$POP_ZIKA)*10000) #zika by 100,000
normalized_zika<-as.data.frame(normalized_zika) #micro by 1,000,000
normalized_micro<-lapply(data_zika_weeks_clean[,-c(1:52)],
function(i) (i/data_zika_weeks_merged$nascvivos)*10000)
normalized_micro<-as.data.frame(normalized_micro)
data_zikamicro<-data.frame(
REGIAO=data_zika_weeks_merged$REGIAO,
normalized_zika,
normalized_micro)
data_zikamicro_melted <- melt(data_zikamicro,
id=c("REGIAO"))
time_series_data<-ddply(data_zikamicro_melted,
c("REGIAO", "variable"),
summarise,
cases = mean(value)
# micro_cases = mean(Microcefalia)
)
microdata<-ddply(data_zika_weeks_clean[,-c(1:52)],
c("REGIAO"),
summarise,
cases = sum(value)
# micro_cases = mean(Microcefalia)
)
time_series_data<-ddply(data_zikamicro_melted,
c("REGIAO", "variable"),
summarise,
cases = mean(value)
# micro_cases = mean(Microcefalia)
)
time_series_data$source<-c(rep("Zika cases/100,000",52),
rep("Microcephaly cases/1,000,000",44))
time_series_data$time<-rep(c(1:52,6:49),5)
#Option 1 - with facets for columns and lines
#plotting time series by month
ggplot(time_series_data,aes(time,
cases)) +
geom_line(aes(group=1)) +
# geom_line(data=data2,aes(color="Speeding")) +
# labs(color="Legend") +
# scale_colour_manual("", breaks = c("Distracted Driving", "Speeding"),
# values = c("blue", "brown")) +
#ggtitle("Closing Stock Prices: IBM & Linkedin") +
# scale_x_discrete(limits=) +
facet_grid(REGIAO ~ source, scales="free_x") +
theme(plot.title = element_text(lineheight=.7,
face="bold")) +
xlab("Surveillance Week") +
ylab("Average municipality incidences by region") +
theme_bw()
#Option 2 - mergin lines
#plotting time series by month
ggplot(time_series_data,aes(time,
cases)) +
geom_line(aes(group=source,color=source)) +
# geom_line(data=data2,aes(color="Speeding")) +
# labs(color="Legend") +
# scale_colour_manual("", breaks = c("Distracted Driving", "Speeding"),
# values = c("blue", "brown")) +
#ggtitle("Closing Stock Prices: IBM & Linkedin") +
# scale_x_discrete(limits=) +
facet_grid(REGIAO ~ ., scales="free_x") +
theme(plot.title = element_text(lineheight=.7,
face="bold")) +
xlab("Surveillance Week") +
ylab("Average municipality incidences by region") +
theme_bw()
#BIMESTERS
data_zika2<-read.csv("/Users/joaovissoci/Downloads/zika_stacked.csv")
# data_nascvivos<-read.csv("/Users/joaovissoci/Desktop/nascvivos.csv")
# data_zika3<-merge(x = data_zika2,
# y = data_nascvivos,
# by = "X...IBGE_6",
# all.x = TRUE)
# data_zika<-merge(x = data_zika1,
# y = data_micro,
# by = "X...IBGE_6",
# all.x = TRUE)
# time_series_month_O01<-ddply(data_zika,
# "REGIAO",
# # summarise,
# sem_01 = sum(SEM_01)
# )
#adjusting ZIka data
data_zika<-remove.vars(data_zika2,
c("X...ibge6",
"estado",
"income_level"))
# "ACUM_ZIKA_TRIM1",
# "ACUM_ZIKA_TRIM2",
# "ACUM_ZIKA_TRIM3",
# "ACUM_ZIKA_TRIM4",
# "POP_ZIKA",
# "INCI_ZIKA",
# "Total.Geral"))
# NAto0<-function(x){
# car::recode(x,"NA=0")
# }
# data_zika_weeks<-lapply(data_zika_weeks,NAto0)
# data_zika_weeks<-as.data.frame(data_zika_weeks)
# # x <- list(1:3, 4:6)
# normalized_zika<-lapply(data_zika_weeks[,-c(1,54:95)],
# function(i) (i/data_zika$POP_ZIKA)*100000)
# normalized_zika<-as.data.frame(normalized_zika)
# normalized_micro<-lapply(data_zika_weeks[,-c(1,2:53)],
# function(i) (i/data_zika$POP_ZIKA)*1000000)
# normalized_micro<-as.data.frame(normalized_micro)
# data_zikamicro<-data.frame(
# REGIAO=data_zika_weeks$REGIAO,
# normalized_zika,
# normalized_micro)
time_series_data<-ddply(data_zika,
c("regiao","bimester"),
summarise,
zika_cases = mean(Zika),
micro_cases = mean(Microcefalia)
)
time_series_data$zika_cases<-time_series_data$zika_cases/100
time_series_data2 <- melt(time_series_data, id=c("regiao","bimester"))
time_series_data2$variable<-car::recode(time_series_data2$variable,"
'zika_cases'='Zika cases/1,000';
'micro_cases'='Microcephaly cases/100,000'")
#Option 1 - with facets for columns and lines
#plotting time series by month
ggplot(time_series_data2,aes(bimester,
value)) +
geom_line(aes(group=1)) +
# geom_line(data=data2,aes(color="Speeding")) +
# labs(color="Legend") +
# scale_colour_manual("", breaks = c("Distracted Driving", "Speeding"),
# values = c("blue", "brown")) +
#ggtitle("Closing Stock Prices: IBM & Linkedin") +
scale_x_discrete(limits=c("First",
"Second",
"Third",
"Fourth",
"Fifth",
"Sixth")) +
facet_grid(regiao ~ variable, scales="free_x") +
theme(plot.title = element_text(lineheight=.7,
face="bold")) +
xlab("Bimesters") +
ylab("Average municipality incidences by region") +
theme_bw()
library(scales)
#Option 2 - mergin lines
#plotting time series by month
ggplot(time_series_data2,aes(bimester,
value)) +
geom_line(aes(group=variable, color=variable)) +
# geom_line(data=data2,aes(color="Speeding")) +
# labs(color="Legend") +
# scale_colour_manual("", breaks = c("Distracted Driving", "Speeding"),
# values = c("blue", "brown")) +
#ggtitle("Closing Stock Prices: IBM & Linkedin") +
scale_x_discrete(limits=c("First",
"Second",
"Third",
"Fourth",
"Fifth",
"Sixth")) +
facet_grid(regiao ~ ., scales="free_x") +
theme_bw() +
theme(plot.title = element_text(lineheight=.7,
face="bold"),
legend.position=c(.7,.95),
legend.title=element_blank(),
legend.background = element_rect(fill=alpha('white',0.1))) +
xlab("Bimester") +
ylab("Average municipality incidences by region")
|
e2ea2a91e9649f03ec05d9f105b040774cdd4b0f
|
6242962bfa0e8022cebc8822def9c611eea02132
|
/2021/2021_40.R
|
52ebd6ee4af7ac114060a042fa9710388bbd784c
|
[] |
no_license
|
nickopotamus/preppin_data
|
d2c12800252792a96e5c6d3ec311eab40064a058
|
fc19d5ed55e659bed65ecb2da580039846345032
|
refs/heads/main
| 2023-09-01T04:54:07.096793
| 2021-10-07T05:19:22
| 2021-10-07T05:19:22
| 414,340,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,150
|
r
|
2021_40.R
|
# 2021, Week 40
# Animal adoptions
library(tidyverse) # Data handling
library(curl) # Better import
temp <- tempfile()
source <- "https://data.austintexas.gov/resource/9t4d-g238.csv"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
cats_and_dogs <-
read_csv(temp)[-4] %>% # Remove duplicated field
# Filter to cats and dogs (and remove the NA)
filter(animal_type %in% c("Cat", "Dog") & !is.na(outcome_type)) %>%
mutate(outcome_type = fct_collapse(outcome_type, # Group outcome type
"Adopted, Returned to Owner or Transferred" =
c("Adoption", "Return to Owner", "Transfer"),
other_level = "Other")) %>%
group_by(animal_type, outcome_type) %>% # Summarize
summarise(count = n()) %>%
mutate(percentage = round(prop.table(count) * 100, 1)) %>%
select(-count) %>%
pivot_wider(names_from = outcome_type, # Neaten table
values_from = percentage) %>%
arrange(desc(animal_type)) %>% # Order as per example
rename("Animal Type" = animal_type) # Sort final name
|
dd834908041f5b03cc7c52048ea1e697d8c50708
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gamlss.countKinf/examples/gamlss.countKinf-package.Rd.R
|
acd20c140d6d54385985900e35a988538b3003a4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 454
|
r
|
gamlss.countKinf-package.Rd.R
|
library(gamlss.countKinf)
### Name: gamlss.countKinf-package
### Title: Generating and Fitting K-Inflated 'discrete gamlss.family'
### Distributions
### Aliases: gamlss.countKinf-package gamlss.countKinf
### Keywords: package distribution regression
### ** Examples
# generating one inflated distribution from SICHEL model
gen.Kinf(family=SICHEL, kinf=1)
# generating two inflated distribution from Delaporte model
gen.Kinf(family=DEL, kinf=1)
|
259cc09c3936db0673eae8abb1f43d141b029e95
|
d4a2668077fe1c2561e4fac54a1f3b36523fec3d
|
/R/ORF_to_UniIds.R
|
a45dc139023672ac3fc58049c798f71dbc3fbcf5
|
[] |
no_license
|
saha-shyamasree/Proteomics
|
23c58cc00b812140e85638911f603b1737599151
|
3c07a069c87dcc1c09f2665da0ac29e055e40da2
|
refs/heads/master
| 2020-12-06T20:36:11.158420
| 2016-05-27T12:53:56
| 2016-05-27T12:53:56
| 24,229,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,195
|
r
|
ORF_to_UniIds.R
|
#### Conrad's proteomics course.
### This code reads identified ORFs and BLAST result and write a list of uniprot proteins that were identified.
source("D:/Code/Proteomics/R/RLib.R")
rev=1
peptide=1
pepThreshold=1
upper=0.000000000000000000000000000001
f1="trinity_PITORF+fdr+th+grouping+prt.csv"
f2="trinity_PITORF_human_adeno_blast2.csv"
d1="D:/data/Results/Human-Adeno/GIOPaperResults/trinity/"
d2="D:/data/blast/blastCSV/"
readMat<- function(f1,f2,d1,d2,rev=1,peptide=1,pepThreshold=1,upper=0.1)
{
#print(f1)
#print(f2)
#print(d1)
#print(d2)
Mat1=proteinGroupFiltered(proteinGroup(f1,d1),rev,peptide,pepThreshold)
Mat1=replaceComma(Mat1,3)
Mat1[,'protein.accession']=sub("^sw","sp",Mat1[,'protein.accession'])
blastDB=blastFilterEval(blastFilterMatch(blast(f2,d2),1),upper)
print("Mat1")
print(dim(Mat1))
print("BLAST")
print(dim(blastDB))
Mat1[,'protein.accession']=replaceIds(Mat1,blastDB)
Mat1[,'protein.accession']=sub("^sp","sw",Mat1[,'protein.accession'])
list('Mat1'=Mat1,'blast'=blastDB)
}
write.table(Mats$Mat1[,'protein.accession'],file=paste(d1,"IdentifiedORFsUniprotIds.tsv"),col.names=F,row.names=F,quote=F)
|
13ca2962f7ae3be39f265c92bfd8a4eddf0b7733
|
bd72f4093a04c5abe7c54fa0dbcf7de0f0b39ed3
|
/testes/leitura_cadUnico.r
|
906cc268621c242458dbf06eb98f290be6deeab8
|
[] |
no_license
|
nataliabueno/microdadosBrasil
|
fdd8b53489468015a663776182342f4dafa03931
|
0a638fa5a718bf6c759a4a81873ae4c5770b2f32
|
refs/heads/master
| 2021-06-21T22:53:06.164782
| 2017-08-10T19:23:00
| 2017-08-10T19:23:00
| 100,613,774
| 5
| 0
| null | 2017-08-17T14:42:21
| 2017-08-17T14:42:21
| null |
UTF-8
|
R
| false
| false
| 321
|
r
|
leitura_cadUnico.r
|
library(dplyr);library(devtools)
wdMB <- "//Sbsb2/cadastro_unico_novo/microdadosBrasil-RA/microdadosBrasil-RA"
wdCadUnico<- "//Sbsb2/cadastro_unico_novo/CadUnico_V7"
setwd(wdMB)
load_all()
cadUnico_192013_01<- read_data("CadUnico",i = 2013, "domicilios", metadata = read_metadata("CadUnico"),root_path = wdCadUnico)
|
84c19506ad93097139dcdd58ca781a8f5d78bc36
|
22020d281cbb4e33716830d75bda413e0bdaa0ff
|
/run_analysis.R
|
b26a506d8a8bea2aab059179beda454cbb7d14ea
|
[] |
no_license
|
Monica1104/Getting-and-Cleaning-Data-Course-Project
|
c7ab493c7cd33ac9d8a7ecc83884b83a421975e6
|
b5cf93e7b117855648aa6fb890727c6a7efea85a
|
refs/heads/master
| 2021-01-01T16:41:19.914205
| 2017-07-21T03:35:49
| 2017-07-21T03:35:49
| 97,889,235
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,315
|
r
|
run_analysis.R
|
# 1.Download and unzipping dataset
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/Dataset.zip",method = "curl")
unzip(zipfile = "./data/Dataset.zip",exdir = "./data")
#2.Merges the training and the test data sets to create one data set
#Reading files
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
features <- read.table("./data/UCI HAR Dataset/features.txt")
activitylabels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
#Assign column names
colnames(x_train) <- features[,2]
colnames(y_train) <- "activityID"
colnames(subject_train) <- "subjectID"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityID"
colnames(subject_test) <- "subjectID"
colnames(activitylabels) <- c("activityID","activityType")
#Merge data in a data set
m_train <- cbind(y_train,subject_train,x_train)
m_test <- cbind(y_test,subject_test,x_test)
alltogether <- rbind(m_train,m_test)
#3.Extracts only the measurements on the mean and standard deviation for each measurement.
#read all the column names
colNames <- colnames(alltogether)
#use the regular expressions to define mean and std,and make subset
mean_and_std <- (grepl("activityID",colNames)|grepl("subjectID",colNames)|grepl("mean..",colNames)|grepl("std..",colNames))
onlyforMeanAndStd <- alltogether[,mean_and_std==TRUE]
#4.Uses descriptive activity names to name the activities in the data set
changeactivitynames<- merge(onlyforMeanAndStd,activitylabels,by="activityID",all.x=TRUE)
#5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
secTidySet <- aggregate(.~subjectID + activityID,changeactivitynames,mean)
secTidySet <- secTidySet[order(secTidySet$subjectID,secTidySet$activityID),]
write.table(secTidySet,"secTidySet.txt",row.name = FALSE)
|
dec1d68ac501928cb1fdb547fabc4d80197ead3b
|
e060a2fdce1ff7a7c8627fa5d8e18b80f3929de8
|
/Code/Plots/PIED_plots_resid.R
|
f7d113a35c6934f7a47f03fd840ace6b9479c44c
|
[] |
no_license
|
emilylschultz/DemographicRangeModel
|
0836a76ec86a643b5014d04d954b3ce0d06c7aa6
|
ccdb717c36b21f18a968a9a5ffed9577f9d31df0
|
refs/heads/master
| 2021-09-26T13:11:12.991896
| 2021-09-16T18:50:35
| 2021-09-16T18:50:35
| 164,713,091
| 2
| 1
| null | 2019-01-08T18:54:05
| 2019-01-08T18:54:05
| null |
UTF-8
|
R
| false
| false
| 73,029
|
r
|
PIED_plots_resid.R
|
library(raster)
library(rgdal)
library(rgeos)
library(dplyr)
library(glmmTMB)
library(coefplot)
library(ggplot2)
library(ggeffects)
library(cowplot)
library(effects)
library(DHARMa)
library(lme4)
library(grid)
library(MuMIn)
library(car)
load("./Code/IPM/GrRescaling.Rdata")
load("./Code/IPM/SurvRescaling.Rdata")
load("./Code/IPM/RecruitRescaling.Rdata")
load("./Code/IPM/recrstats.rda")
# Data prep
grdata <- read.csv("./Processed/Survival/SurvivalData.csv", header = T, stringsAsFactors = F)
# Only keep trees that didn't die
grdata <- subset(grdata, STATUSCD == 1) #18204
# Create increment columns
# note that growth increments need to be moved to the positive realm (by adding a constant)
# IF log transform is used
grdata$AGB_INCR <- grdata$DRYBIO_AG_DIFF / grdata$CENSUS_INTERVAL
grdata$DIA_INCR <- grdata$DIA_DIFF / grdata$CENSUS_INTERVAL
grdata$BA_INCR <- grdata$BA_DIFF / grdata$CENSUS_INTERVAL
grdata.scaled <- grdata %>% mutate_at(scale, .vars = vars(-CN, -PREV_TRE_CN, -PLT_CN, -PREV_PLT_CN, -CONDID,
-STATUSCD, -MEASYEAR, -PREV_MEASYEAR,
-CENSUS_INTERVAL,
-AGB_INCR, -DIA_INCR, -BA_INCR))
survData <- read.csv("./Processed/Survival/SurvivalData.csv", header = T, stringsAsFactors = F)
# Create increment columns
# not needed for survival/mort analysis
survData$AGB_INCR <- survData$DRYBIO_AG_DIFF / survData$CENSUS_INTERVAL
survData$DIA_INCR <- survData$DIA_DIFF / survData$CENSUS_INTERVAL
survData$BA_INCR <- survData$BA_DIFF / survData$CENSUS_INTERVAL
survData$log.size <- log(survData$PREVDIA)
survData$log.BALIVE <- log(survData$BALIVE)
# Recode status
survData$surv <- ifelse(survData$STATUSCD == 2, 0, 1)
survData$mort <- ifelse(survData$STATUSCD == 1, 0, 1)
# remove cases where BALIVE at time 1 = zero (should be impossible)
# survData <- subset(survData, log.BALIVE > 0)
survData.2 <- subset(survData, BALIVE > 0) # goes from 20329 to 20161
# remove conditions where fire or harvest occurred
survData.3 <- survData[!(survData$DSTRBCD1 %in% c(30, 31, 32, 80)), ] # goes from 20329 to 19867
# standardize covariates
survData.scaled <- survData %>% mutate_at(scale, .vars = vars(-CN, -PREV_TRE_CN, -PLT_CN, -PREV_PLT_CN, -CONDID,
-STATUSCD, -MEASYEAR, -PREV_MEASYEAR,
-CENSUS_INTERVAL,
-AGENTCD, -DSTRBCD1, -DSTRBCD2, -DSTRBCD3,
-AGB_INCR, -DIA_INCR, -BA_INCR,
-surv, -mort))
survData2.scaled <- survData.2 %>% mutate_at(scale, .vars = vars(-CN, -PREV_TRE_CN, -PLT_CN, -PREV_PLT_CN, -CONDID,
-STATUSCD, -MEASYEAR, -PREV_MEASYEAR,
-CENSUS_INTERVAL,
-AGENTCD, -DSTRBCD1, -DSTRBCD2, -DSTRBCD3,
-AGB_INCR, -DIA_INCR, -BA_INCR,
-surv, -mort))
survData3.scaled <- survData.3 %>% mutate_at(scale, .vars = vars(-CN, -PREV_TRE_CN, -PLT_CN, -PREV_PLT_CN, -CONDID,
-STATUSCD, -MEASYEAR, -PREV_MEASYEAR,
-CENSUS_INTERVAL,
-AGENTCD, -DSTRBCD1, -DSTRBCD2, -DSTRBCD3,
-AGB_INCR, -DIA_INCR, -BA_INCR,
-surv, -mort))
survData<-survData[which(!is.na(match(survData$PLT_CN,survData3.scaled$PLT_CN))),]
rdata <- read.csv("./Processed/Recruitment/RecruitData.csv", header = T, stringsAsFactors = F)
rdata <- subset(rdata, PIEDadults1 > 0)
rdata$BA.all <- rdata$BA.PIED + rdata$BA.notPIED
rdata.scaled <- rdata %>% mutate_at(scale, .vars = vars(-plot, -lat, -lon, -elev, -PApied,
-state, -county, -plotID, -CONDID,
-measYear, -plotPrev, -PREV_MEASYEAR,
-CENSUS_INTERVAL, -recruits1, -recruits12,
-AGB_intra, -BA.PIED, -PIEDadults1,
-PIEDadults4, -PIEDadults8, -cumDIA.PIED))
ppt_yr_raster <- raster("./ClimateData/PRISM/Normals/PPT_year.tif")
t_yr_raster <- raster("./ClimateData/PRISM/Normals/T_year.tif")
ba_raster <- raster("./BA/balive_RF.tif")
#Growth
grdata<-grdata[-which(grdata$PLT_CN==40383858010690|grdata$PLT_CN==40423861010690|
grdata$PLT_CN==40423872010690|grdata$PLT_CN==40424710010690|
grdata$PLT_CN==186092190020004|
grdata$PLT_CN==188784045020004|grdata$PLT_CN==188784634020004),]
g_fun<-function(dia,ba,ppt,t,plot,model,clampba=F,clampt=F){
gdata=data.frame(PREVDIA=dia,BALIVE = ifelse(clampba==T & ba >190, 190, ba),
PPT_yr_norm = ppt, T_yr_norm = ifelse(clampt==T & t >10.6, 10.6, t))
scaled.gdata = data.frame(scale(gdata,
scale = gr.scaling$scale[match(names(gdata),
names(gr.scaling$scale))],
center = gr.scaling$center[match(names(gdata),
names(gr.scaling$center))]))
scaled.gdata$PLT_CN = plot
return(predict(model, newdata = scaled.gdata))
}
g_fun_mean<-function(dia,ba,ppt,t,model,clampba=F,clampt=F){
gdata=data.frame(PREVDIA=dia,BALIVE = ifelse(clampba==T & ba >190, 190, ba),
PPT_yr_norm = ppt, T_yr_norm = ifelse(clampt==T & t >10.6, 10.6, t))
scaled.gdata = data.frame(scale(gdata,
scale = gr.scaling$scale[match(names(gdata),
names(gr.scaling$scale))],
center = gr.scaling$center[match(names(gdata),
names(gr.scaling$center))]))
return(predict(model, newdata = scaled.gdata, re.form = NA))
}
means<-c(mean(grdata$PREVDIA,na.rm=T),mean(grdata$BALIVE,na.rm=T),
mean(grdata$PPT_yr_norm,na.rm=T),mean(grdata$T_yr_norm,na.rm=T))
seq<-data.frame(dia=seq(0.5*min(grdata$PREVDIA),1.2*max(grdata$PREVDIA),length=50),
ba=seq(cellStats(ba_raster,stat="min",na.rm=T),
max(grdata$BALIVE,na.rm=T),length=50),
ppt=seq(cellStats(ppt_yr_raster,stat="min",na.rm=T),
cellStats(ppt_yr_raster,stat="max",na.rm=T),length=50),
t=seq(cellStats(t_yr_raster,stat="min",na.rm=T),
cellStats(t_yr_raster,stat="max",na.rm=T),length=50))
#Calculate residuals
grdata$resid_c<-grdata$DIA_INCR-g_fun(grdata$PREVDIA,grdata$BALIVE,grdata$PPT_yr_norm,grdata$T_yr_norm,
grdata$PLT_CN,gmodel.clim)
grdata$resid_cc<-grdata$DIA_INCR-g_fun(grdata$PREVDIA,grdata$BALIVE,grdata$PPT_yr_norm,grdata$T_yr_norm,
grdata$PLT_CN,gmodel.clim.comp)
grdata$resid_i<-grdata$DIA_INCR-g_fun(grdata$PREVDIA,grdata$BALIVE,grdata$PPT_yr_norm,grdata$T_yr_norm,
grdata$PLT_CN,gmodel.int)
ncuts=30
chopsize_dia<-cut(grdata$PREVDIA,ncuts)
chopsize_ba<-cut(grdata$BALIVE,ncuts)
chopsize_PPT<-cut(grdata$PPT_yr_norm,ncuts)
chopsize_T<-cut(grdata$T_yr_norm,ncuts)
count_binned_dia<-as.vector(sapply(split(grdata$resid_c,chopsize_dia),length))
dia_binned<-as.vector(sapply(split(grdata$PREVDIA,chopsize_dia),mean,na.rm=T))
grow_binned_dia_c<-as.vector(sapply(split(grdata$resid_c,chopsize_dia),mean,na.rm=T))+
as.vector(g_fun_mean(dia_binned,means[2],means[3],means[4],gmodel.clim))
grow_binned_dia_cc<-as.vector(sapply(split(grdata$resid_cc,chopsize_dia),mean,na.rm=T))+
as.vector(g_fun_mean(dia_binned,means[2],means[3],means[4],gmodel.clim.comp))
grow_binned_dia_i<-as.vector(sapply(split(grdata$resid_i,chopsize_dia),mean,na.rm=T))+
as.vector(g_fun_mean(dia_binned,means[2],means[3],means[4],gmodel.int))
count_binned_ba<-as.vector(sapply(split(grdata$resid_c,chopsize_ba),length))
ba_binned<-as.vector(sapply(split(grdata$BALIVE,chopsize_ba),mean,na.rm=T))
grow_binned_ba_c<-as.vector(sapply(split(grdata$resid_c,chopsize_ba),mean,na.rm=T))+
as.vector(g_fun_mean(means[1],ba_binned,means[3],means[4],gmodel.clim))
grow_binned_ba_cc<-as.vector(sapply(split(grdata$resid_cc,chopsize_ba),mean,na.rm=T))+
as.vector(g_fun_mean(means[1],ba_binned,means[3],means[4],gmodel.clim.comp))
grow_binned_ba_i<-as.vector(sapply(split(grdata$resid_i,chopsize_ba),mean,na.rm=T))+
as.vector(g_fun_mean(means[1],ba_binned,means[3],means[4],gmodel.int))
count_binned_PPT<-as.vector(sapply(split(grdata$resid_c,chopsize_PPT),length))
PPT_binned<-as.vector(sapply(split(grdata$PPT_yr_norm,chopsize_PPT),mean,na.rm=T))
grow_binned_PPT_c<-as.vector(sapply(split(grdata$resid_c,chopsize_PPT),mean,na.rm=T))+
as.vector(g_fun_mean(means[1],means[2],PPT_binned,means[4],gmodel.clim))
grow_binned_PPT_cc<-as.vector(sapply(split(grdata$resid_cc,chopsize_PPT),mean,na.rm=T))+
as.vector(g_fun_mean(means[1],means[2],PPT_binned,means[4],gmodel.clim.comp))
grow_binned_PPT_i<-as.vector(sapply(split(grdata$resid_i,chopsize_PPT),mean,na.rm=T))+
as.vector(g_fun_mean(means[1],means[2],PPT_binned,means[4],gmodel.int))
count_binned_T<-as.vector(sapply(split(grdata$resid_c,chopsize_T),length))
T_binned<-as.vector(sapply(split(grdata$T_yr_norm,chopsize_T),mean,na.rm=T))
grow_binned_T_c<-as.vector(sapply(split(grdata$resid_c,chopsize_T),mean,na.rm=T))+
as.vector(g_fun_mean(means[1],means[2],means[3],T_binned,gmodel.clim))
grow_binned_T_cc<-as.vector(sapply(split(grdata$resid_cc,chopsize_T),mean,na.rm=T))+
as.vector(g_fun_mean(means[1],means[2],means[3],T_binned,gmodel.clim.comp))
grow_binned_T_i<-as.vector(sapply(split(grdata$resid_i,chopsize_T),mean,na.rm=T))+
as.vector(g_fun_mean(means[1],means[2],means[3],T_binned,gmodel.int))
g_binned<-as.data.frame(cbind(grow_binned_dia_c,grow_binned_ba_c,grow_binned_PPT_c,grow_binned_T_c,
grow_binned_dia_cc,grow_binned_ba_cc,grow_binned_PPT_cc,grow_binned_T_cc,
grow_binned_dia_i,grow_binned_ba_i,grow_binned_PPT_i,grow_binned_T_i,
dia_binned,ba_binned,PPT_binned,T_binned,
count_binned_dia,count_binned_ba,count_binned_PPT,count_binned_T))
names(g_binned)<-c("grow_dia_c","grow_ba_c","grow_PPT_c","grow_T_c",
"grow_dia_cc","grow_ba_cc","grow_PPT_cc","grow_T_cc",
"grow_dia_i","grow_ba_i","grow_PPT_i","grow_T_i",
"PREVDIA","BALIVE","PPT","T",
"count_dia","count_ba","count_PPT","count_T")
grplot_data_clim<-cbind(data.frame(dia_pred=g_fun_mean(seq$dia,means[2],means[3],means[4],gmodel.clim),
ppt_pred=g_fun_mean(means[1],means[2],seq$ppt,means[4],gmodel.clim),
t_pred=g_fun_mean(means[1],means[2],means[3],seq$t,gmodel.clim),
dia_pred_c=g_fun_mean(seq$dia,means[2],means[3],means[4],gmodel.clim,
clampba=T,clampt=T),
ppt_pred_c=g_fun_mean(means[1],means[2],seq$ppt,means[4],gmodel.clim,
clampba=T,clampt=T),
t_pred_c=g_fun_mean(means[1],means[2],means[3],seq$t,gmodel.clim,
clampba=T,clampt=T)),seq)
grplot_data_climcomp<-cbind(data.frame(dia_pred=g_fun_mean(seq$dia,means[2],means[3],means[4],
gmodel.clim.comp),
ba_pred=g_fun_mean(means[1],seq$ba,means[3],means[4],gmodel.clim.comp),
ppt_pred=g_fun_mean(means[1],means[2],seq$ppt,means[4],gmodel.clim.comp),
t_pred=g_fun_mean(means[1],means[2],means[3],seq$t,gmodel.clim.comp),
dia_pred_c=g_fun_mean(seq$dia,means[2],means[3],means[4],gmodel.clim.comp,
clampba=T,clampt=F),
ba_pred_c=g_fun_mean(means[1],seq$ba,means[3],means[4],gmodel.clim.comp,
clampba=T,clampt=F),
ppt_pred_c=g_fun_mean(means[1],means[2],seq$ppt,means[4],gmodel.clim.comp,
clampba=T,clampt=F),
t_pred_c=g_fun_mean(means[1],means[2],means[3],seq$t,gmodel.clim.comp,
clampba=T,clampt=F)),seq)
grplot_data_int<-cbind(data.frame(dia_pred=g_fun_mean(seq$dia,means[2],means[3],means[4],
gmodel.int),
ba_pred=g_fun_mean(means[1],seq$ba,means[3],means[4],gmodel.int),
ppt_pred=g_fun_mean(means[1],means[2],seq$ppt,means[4],gmodel.int),
t_pred=g_fun_mean(means[1],means[2],means[3],seq$t,gmodel.int),
dia_pred_c=g_fun_mean(seq$dia,means[2],means[3],means[4],gmodel.int,
clampba=T,clampt=F),
ba_pred_c=g_fun_mean(means[1],seq$ba,means[3],means[4],gmodel.int,
clampba=T,clampt=F),
ppt_pred_c=g_fun_mean(means[1],means[2],seq$ppt,means[4],gmodel.int,
clampba=T,clampt=F),
t_pred_c=g_fun_mean(means[1],means[2],means[3],seq$t,gmodel.int,
clampba=T,clampt=F)),seq)
#Survival
s_fun<-function(dia,ba,ppt,t,ci,plot,model,clampt=F){
sdata=data.frame(PREVDIA=dia,BALIVE=ba,PPT_yr_norm = ppt,
T_yr_norm = ifelse(clampt==T & t >12.5, 12.5, t))
scaled.sdata = data.frame(scale(sdata,
scale = surv.scaling$scale[match(names(sdata),
names(surv.scaling$scale))],
center = surv.scaling$center[match(names(sdata),
names(surv.scaling$center))]))
scaled.sdata = cbind(scaled.sdata,CENSUS_INTERVAL = ci)
scaled.sdata$PLT_CN=plot
return(predict(model, newdata = scaled.sdata, type = "response"))
}
s_fun_mean<-function(dia,ba,ppt,t,ci,model,clampt=F){
sdata=data.frame(PREVDIA=dia,BALIVE=ba,PPT_yr_norm = ppt,
T_yr_norm = ifelse(clampt==T & t >12.5, 12.5, t))
scaled.sdata = data.frame(scale(sdata,
scale = surv.scaling$scale[match(names(sdata),
names(surv.scaling$scale))],
center = surv.scaling$center[match(names(sdata),
names(surv.scaling$center))]))
scaled.sdata = cbind(scaled.sdata,CENSUS_INTERVAL = ci)
return(predict(model, newdata = scaled.sdata, type = "response", re.form = NA))
}
means<-c(mean(survData$PREVDIA,na.rm=T),mean(survData$BALIVE,na.rm=T),
mean(survData$PPT_yr_norm,na.rm=T),mean(survData$T_yr_norm,na.rm=T),
mean(survData$CENSUS_INTERVAL))
seq<-data.frame(dia=seq(0.5*min(survData$PREVDIA),1.2*max(survData$PREVDIA),length=50),
ba=seq(cellStats(ba_raster,stat="min",na.rm=T),
max(survData$BALIVE,na.rm=T),length=50),
ppt=seq(cellStats(ppt_yr_raster,stat="min",na.rm=T),
cellStats(ppt_yr_raster,stat="max",na.rm=T),length=50),
t=seq(cellStats(t_yr_raster,stat="min",na.rm=T),
cellStats(t_yr_raster,stat="max",na.rm=T),length=50))
#Calculate residuals
survData$resid_c<-survData$mort-s_fun(survData$PREVDIA,survData$BALIVE,survData$PPT_yr_norm,
survData$T_yr_norm,survData$CENSUS_INTERVAL,
survData$PLT_CN,smodel.clim)
survData$resid_cc<-survData$mort-s_fun(survData$PREVDIA,survData$BALIVE,survData$PPT_yr_norm,
survData$T_yr_norm,survData$CENSUS_INTERVAL,
survData$PLT_CN,smodel.clim.comp)
survData$resid_ccf<-survData$mort-s_fun(survData$PREVDIA,survData$BALIVE,survData$PPT_yr_norm,
survData$T_yr_norm,survData$CENSUS_INTERVAL,
survData$PLT_CN,smodel.clim.comp.fire)
survData$resid_i<-survData$mort-s_fun(survData$PREVDIA,survData$BALIVE,survData$PPT_yr_norm,
survData$T_yr_norm,survData$CENSUS_INTERVAL,
survData$PLT_CN,smodel.int)
ncuts=50
chopsize_dia<-cut(survData$PREVDIA,ncuts)
chopsize_ba<-cut(survData$BALIVE,ncuts)
chopsize_PPT<-cut(survData$PPT_yr_norm,ncuts)
chopsize_T<-cut(survData$T_yr_norm,ncuts)
dia_binned<-as.vector(sapply(split(survData$PREVDIA,chopsize_dia),mean,na.rm=T))
count_binned_dia<-as.vector(sapply(split(survData$mort,chopsize_dia),length))
surv_binned_dia_c<-as.vector(sapply(split(survData$resid_c,chopsize_dia),mean,na.rm=T))+
as.vector(s_fun_mean(dia_binned,means[2],means[3],means[4],means[5],smodel.clim))
surv_binned_dia_cc<-as.vector(sapply(split(survData$resid_cc,chopsize_dia),mean,na.rm=T))+
as.vector(s_fun_mean(dia_binned,means[2],means[3],means[4],means[5],smodel.clim.comp))
surv_binned_dia_ccf<-as.vector(sapply(split(survData$resid_ccf,chopsize_dia),mean,na.rm=T))+
as.vector(s_fun_mean(dia_binned,means[2],means[3],means[4],means[5],smodel.clim.comp.fire))
surv_binned_dia_i<-as.vector(sapply(split(survData$resid_i,chopsize_dia),mean,na.rm=T))+
as.vector(s_fun_mean(dia_binned,means[2],means[3],means[4],means[5],smodel.int))
ba_binned<-as.vector(sapply(split(survData$BALIVE,chopsize_ba),mean,na.rm=T))
count_binned_ba<-as.vector(sapply(split(survData$mort,chopsize_ba),length))
surv_binned_ba_c<-as.vector(sapply(split(survData$resid_c,chopsize_ba),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],ba_binned,means[3],means[4],means[5],smodel.clim))
surv_binned_ba_cc<-as.vector(sapply(split(survData$resid_cc,chopsize_ba),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],ba_binned,means[3],means[4],means[5],smodel.clim.comp))
surv_binned_ba_ccf<-as.vector(sapply(split(survData$resid_ccf,chopsize_ba),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],ba_binned,means[3],means[4],means[5],smodel.clim.comp.fire))
surv_binned_ba_i<-as.vector(sapply(split(survData$resid_i,chopsize_ba),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],ba_binned,means[3],means[4],means[5],smodel.int))
PPT_binned<-as.vector(sapply(split(survData$PPT_yr_norm,chopsize_PPT),mean,na.rm=T))
count_binned_PPT<-as.vector(sapply(split(survData$mort,chopsize_PPT),length))
surv_binned_PPT_c<-as.vector(sapply(split(survData$resid_c,chopsize_PPT),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],means[2],PPT_binned,means[4],means[5],smodel.clim))
surv_binned_PPT_cc<-as.vector(sapply(split(survData$resid_cc,chopsize_PPT),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],means[2],PPT_binned,means[4],means[5],smodel.clim.comp))
surv_binned_PPT_ccf<-as.vector(sapply(split(survData$resid_ccf,chopsize_PPT),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],means[2],PPT_binned,means[4],means[5],smodel.clim.comp.fire))
surv_binned_PPT_i<-as.vector(sapply(split(survData$resid_i,chopsize_PPT),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],means[2],PPT_binned,means[4],means[5],smodel.int))
T_binned<-as.vector(sapply(split(survData$T_yr_norm,chopsize_T),mean,na.rm=T))
count_binned_T<-as.vector(sapply(split(survData$mort,chopsize_T),length))
surv_binned_T_c<-as.vector(sapply(split(survData$resid_c,chopsize_T),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],means[2],means[3],T_binned,means[5],smodel.clim))
surv_binned_T_cc<-as.vector(sapply(split(survData$resid_cc,chopsize_T),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],means[2],means[3],T_binned,means[5],smodel.clim.comp))
surv_binned_T_ccf<-as.vector(sapply(split(survData$resid_ccf,chopsize_T),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],means[2],means[3],T_binned,means[5],smodel.clim.comp.fire))
surv_binned_T_i<-as.vector(sapply(split(survData$resid_i,chopsize_T),mean,na.rm=T))+
as.vector(s_fun_mean(means[1],means[2],means[3],T_binned,means[5],smodel.int))
s_binned<-as.data.frame(cbind(surv_binned_dia_c,surv_binned_ba_c,surv_binned_PPT_c,surv_binned_T_c,
surv_binned_dia_cc,surv_binned_ba_cc,surv_binned_PPT_cc,surv_binned_T_cc,
surv_binned_dia_ccf,surv_binned_ba_ccf,surv_binned_PPT_ccf,surv_binned_T_ccf,
surv_binned_dia_i,surv_binned_ba_i,surv_binned_PPT_i,surv_binned_T_i,
dia_binned,ba_binned,PPT_binned,T_binned,
count_binned_dia,count_binned_ba,count_binned_PPT,count_binned_T))
names(s_binned)<-c("mort_dia_c","mort_ba_c","mort_PPT_c","mort_T_c",
"mort_dia_cc","mort_ba_cc","mort_PPT_cc","mort_T_cc",
"mort_dia_ccf","mort_ba_ccf","mort_PPT_ccf","mort_T_ccf",
"mort_dia_i","mort_ba_i","mort_PPT_i","mort_T_i",
"PREVDIA","BALIVE","PPT","T",
"count_dia","count_ba","count_PPT","count_T")
splot_data_clim<-cbind(data.frame(dia_pred=s_fun_mean(seq$dia,means[2],means[3],means[4],means[5],
smodel.clim),
ppt_pred=s_fun_mean(means[1],means[2],seq$ppt,means[4],means[5],smodel.clim),
t_pred=s_fun_mean(means[1],means[2],means[3],seq$t,means[5],smodel.clim),
dia_pred_c=s_fun_mean(seq$dia,means[2],means[3],means[4],means[5],
smodel.clim,clampt=T),
ppt_pred_c=s_fun_mean(means[1],means[2],seq$ppt,means[4],means[5],
smodel.clim,clampt=T),
t_pred_c=s_fun_mean(means[1],means[2],means[3],seq$t,means[5],smodel.clim,
clampt=T)),seq)
splot_data_climcomp<-cbind(data.frame(dia_pred=s_fun_mean(seq$dia,means[2],means[3],means[4],means[5],
smodel.clim.comp),
ba_pred=s_fun_mean(means[1],seq$ba,means[3],means[4],means[5],
smodel.clim.comp),
ppt_pred=s_fun_mean(means[1],means[2],seq$ppt,means[4],means[5],
smodel.clim.comp),
t_pred=s_fun_mean(means[1],means[2],means[3],seq$t,means[5],
smodel.clim.comp),
dia_pred_c=s_fun_mean(seq$dia,means[2],means[3],means[4],means[5],
smodel.clim.comp,clampt=T),
ba_pred_c=s_fun_mean(means[1],seq$ba,means[3],means[4],means[5],
smodel.clim.comp,clampt=T),
ppt_pred_c=s_fun_mean(means[1],means[2],seq$ppt,means[4],means[5],
smodel.clim.comp,clampt=T),
t_pred_c=s_fun_mean(means[1],means[2],means[3],seq$t,means[5],
smodel.clim.comp,clampt=T)),seq)
splot_data_fire<-cbind(data.frame(dia_pred=s_fun_mean(seq$dia,means[2],means[3],means[4],means[5],
smodel.clim.comp.fire),
ba_pred=s_fun_mean(means[1],seq$ba,means[3],means[4],means[5],
smodel.clim.comp.fire),
ppt_pred=s_fun_mean(means[1],means[2],seq$ppt,means[4],means[5],
smodel.clim.comp.fire),
t_pred=s_fun_mean(means[1],means[2],means[3],seq$t,means[5],
smodel.clim.comp.fire),
dia_pred_c=s_fun_mean(seq$dia,means[2],means[3],means[4],means[5],
smodel.clim.comp.fire,clampt=T),
ba_pred_c=s_fun_mean(means[1],seq$ba,means[3],means[4],means[5],
smodel.clim.comp.fire,clampt=T),
ppt_pred_c=s_fun_mean(means[1],means[2],seq$ppt,means[4],means[5],
smodel.clim.comp.fire,clampt=T),
t_pred_c=s_fun_mean(means[1],means[2],means[3],seq$t,means[5],
smodel.clim.comp.fire,clampt=T)),seq)
splot_data_int<-cbind(data.frame(dia_pred=s_fun_mean(seq$dia,means[2],means[3],means[4],means[5],
smodel.int),
ba_pred=s_fun_mean(means[1],seq$ba,means[3],means[4],means[5],smodel.int),
ppt_pred=s_fun_mean(means[1],means[2],seq$ppt,means[4],means[5],smodel.int),
t_pred=s_fun_mean(means[1],means[2],means[3],seq$t,means[5],smodel.int),
dia_pred_c=s_fun_mean(seq$dia,means[2],means[3],means[4],means[5],smodel.int,
clampt=T),
ba_pred_c=s_fun_mean(means[1],seq$ba,means[3],means[4],means[5],smodel.int,
clampt=T),
ppt_pred_c=s_fun_mean(means[1],means[2],seq$ppt,means[4],means[5],smodel.int,
clampt=T),
t_pred_c=s_fun_mean(means[1],means[2],means[3],seq$t,means[5],smodel.int,
clampt=T)),seq)
#Recruitment
r_fun<-function(ba,ppt,t,pa,ci,model,clampba=F){
rdata=data.frame(BALIVE = ifelse(clampba==T & ba >204, 204,
ifelse(clampba==T & ba <93, 93, ba)),
PPT_yr_norm = ppt, T_yr_norm = t)
scaled.rdata = data.frame(scale(rdata,
scale = r.scaling$scale[match(names(rdata),
names(r.scaling$scale))],
center = r.scaling$center[match(names(rdata),
names(r.scaling$center))]))
scaled.rdata$CENSUS_INTERVAL = ci
scaled.rdata$PIEDadults1 = pa
return(predict(model, newdata = scaled.rdata, type = "response"))
}
means<-c(mean(rdata$BALIVE,na.rm=T),
mean(rdata$PPT_yr_norm,na.rm=T),mean(rdata$T_yr_norm,na.rm=T),
mean(rdata$PIEDadults1),mean(rdata$CENSUS_INTERVAL))
seq<-data.frame(ba=seq(cellStats(ba_raster,stat="min",na.rm=T),
max(rdata$BALIVE,na.rm=T),length=50),
ppt=seq(cellStats(ppt_yr_raster,stat="min",na.rm=T),
cellStats(ppt_yr_raster,stat="max",na.rm=T),length=50),
t=seq(cellStats(t_yr_raster,stat="min",na.rm=T),
cellStats(t_yr_raster,stat="max",na.rm=T),length=50))
#Calculate residuals
rdata$resid_c<-rdata$recruits1-r_fun(rdata$BALIVE,rdata$PPT_yr_norm,rdata$T_yr_norm,
rdata$PIEDadults1,rdata$CENSUS_INTERVAL,rmodel.clim)
rdata$resid_cc<-rdata$recruits1-r_fun(rdata$BALIVE,rdata$PPT_yr_norm,rdata$T_yr_norm,
rdata$PIEDadults1,rdata$CENSUS_INTERVAL,rmodel.clim.comp)
rdata$resid_i<-rdata$recruits1-r_fun(rdata$BALIVE,rdata$PPT_yr_norm,rdata$T_yr_norm,
rdata$PIEDadults1,rdata$CENSUS_INTERVAL,rmodel.int)
ncuts=30
chopsize_ba<-cut(rdata$BALIVE,ncuts)
chopsize_PPT<-cut(rdata$PPT_yr_norm,ncuts)
chopsize_T<-cut(rdata$T_yr_norm,ncuts)
ba_binned<-as.vector(sapply(split(rdata$BALIVE,chopsize_ba),mean,na.rm=T))
count_binned_ba<-as.vector(sapply(split(rdata$recruits1,chopsize_ba),length))
recr_binned_ba_c<-as.vector(sapply(split(rdata$resid_c,chopsize_ba),mean,na.rm=T))+
as.vector(r_fun(ba_binned,means[2],means[3],means[4],means[5],rmodel.clim))
recr_binned_ba_cc<-as.vector(sapply(split(rdata$resid_cc,chopsize_ba),mean,na.rm=T))+
as.vector(r_fun(ba_binned,means[2],means[3],means[4],means[5],rmodel.clim.comp))
recr_binned_ba_i<-as.vector(sapply(split(rdata$resid_i,chopsize_ba),mean,na.rm=T))+
as.vector(r_fun(ba_binned,means[2],means[3],means[4],means[5],rmodel.int))
PPT_binned<-as.vector(sapply(split(rdata$PPT_yr_norm,chopsize_PPT),mean,na.rm=T))
count_binned_PPT<-as.vector(sapply(split(rdata$recruits1,chopsize_PPT),length))
recr_binned_PPT_c<-as.vector(sapply(split(rdata$resid_c,chopsize_PPT),mean,na.rm=T))+
as.vector(r_fun(means[1],PPT_binned,means[3],means[4],means[5],rmodel.clim))
recr_binned_PPT_cc<-as.vector(sapply(split(rdata$resid_cc,chopsize_PPT),mean,na.rm=T))+
as.vector(r_fun(means[1],PPT_binned,means[3],means[4],means[5],rmodel.clim.comp))
recr_binned_PPT_i<-as.vector(sapply(split(rdata$resid_i,chopsize_PPT),mean,na.rm=T))+
as.vector(r_fun(means[1],PPT_binned,means[3],means[4],means[5],rmodel.int))
as.vector(r_fun(means[1],PPT_binned,means[3],means[4],means[5],rmodel.int.lin))
T_binned<-as.vector(sapply(split(rdata$T_yr_norm,chopsize_T),mean,na.rm=T))
count_binned_T<-as.vector(sapply(split(rdata$recruits1,chopsize_T),length))
recr_binned_T_c<-as.vector(sapply(split(rdata$resid_c,chopsize_T),mean,na.rm=T))+
as.vector(r_fun(means[1],means[2],T_binned,means[4],means[5],rmodel.clim))
recr_binned_T_cc<-as.vector(sapply(split(rdata$resid_cc,chopsize_T),mean,na.rm=T))+
as.vector(r_fun(means[1],means[2],T_binned,means[4],means[5],rmodel.clim.comp))
recr_binned_T_i<-as.vector(sapply(split(rdata$resid_i,chopsize_T),mean,na.rm=T))+
as.vector(r_fun(means[1],means[2],T_binned,means[4],means[5],rmodel.int))
r_binned<-as.data.frame(cbind(recr_binned_ba_c,recr_binned_PPT_c,recr_binned_T_c,
recr_binned_ba_cc,recr_binned_PPT_cc,recr_binned_T_cc,
recr_binned_ba_i,recr_binned_PPT_i,recr_binned_T_i,
ba_binned,PPT_binned,T_binned,
count_binned_ba,count_binned_PPT,count_binned_T))
names(r_binned)<-c("recr_ba_c","recr_PPT_c","recr_T_c",
"recr_ba_cc","recr_PPT_cc","recr_T_cc",
"recr_ba_i","recr_PPT_i","recr_T_i",
"BALIVE","PPT","T","count_ba","count_PPT","count_T")
rplot_data_clim<-cbind(data.frame(ppt_pred=r_fun(means[1],seq$ppt,means[3],means[4],means[5],
rmodel.clim),
t_pred=r_fun(means[1],means[2],seq$t,means[4],means[5],rmodel.clim),
ppt_pred_c=r_fun(means[1],seq$ppt,means[3],means[4],means[5],
rmodel.clim,clampba=T),
t_pred_c=r_fun(means[1],means[2],seq$t,means[4],means[5],rmodel.clim,
clampba=T)),seq)
rplot_data_climcomp<-cbind(data.frame(ba_pred=r_fun(seq$ba,means[2],means[3],means[4],means[5],
rmodel.clim.comp),
ppt_pred=r_fun(means[1],seq$ppt,means[3],means[4],means[5],
rmodel.clim.comp),
t_pred=r_fun(means[1],means[2],seq$t,means[4],means[5],
rmodel.clim.comp),
ba_pred_c=r_fun(seq$ba,means[2],means[3],means[4],means[5],
rmodel.clim.comp,clampba=T),
ppt_pred_c=r_fun(means[1],seq$ppt,means[3],means[4],means[5],
rmodel.clim.comp,clampba=T),
t_pred_c=r_fun(means[1],means[2],seq$t,means[4],means[5],
rmodel.clim.comp,clampba=T)),seq)
rplot_data_int<-cbind(data.frame(ba_pred=r_fun(seq$ba,means[2],means[3],means[4],means[5],
rmodel.int),
ppt_pred=r_fun(means[1],seq$ppt,means[3],means[4],means[5],rmodel.int),
t_pred=r_fun(means[1],means[2],seq$t,means[4],means[5],rmodel.int),
ba_pred_c=r_fun(seq$ba,means[2],means[3],means[4],means[5],
rmodel.int,clampba=T),
ppt_pred_c=r_fun(means[1],seq$ppt,means[3],means[4],means[5],rmodel.int,
clampba=T),
t_pred_c=r_fun(means[1],means[2],seq$t,means[4],means[5],rmodel.int,
clampba=T)),seq)
g_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
legend
}
load("./Output/lambda_effects.rda")
FIA <- read.csv("./Processed/Survival/SurvivalData.csv", header = T, stringsAsFactors = F)
# when running IPM without trees killed by fire, technically should filter out those trees
# prolly makes ~0 difference
FIA <- FIA[!(FIA$DSTRBCD1 %in% c(30, 31, 32, 80)), ]
FIA_lambda<-read.csv("./Output/FIA_lambda.csv")
lambda_c<-raster("./Output/tifs/PIED.clim_lambda.tif")
lambda_ccl<-raster("./Output/tifs/PIED.climclamp_lambda.tif")
lambda_cc<-raster("./Output/tifs/PIED.climcomp_lambda.tif")
lambda_ccf<-raster("./Output/tifs/PIED.climcompfire_lambda.tif")
lambda_i<-raster("./Output/tifs/PIED.int_lambda.tif")
lambda_min<-min(c(minValue(lambda_c),minValue(lambda_ccl),minValue(lambda_cc),minValue(lambda_ccf)
,minValue(lambda_i)))
lambda_max<-max(c(maxValue(lambda_c),maxValue(lambda_ccl),maxValue(lambda_cc),maxValue(lambda_ccf),
maxValue(lambda_i)))
grow_c<-raster("./Output/tifs/PIED.clim_growth.tif")
grow_ccl<-raster("./Output/tifs/PIED.climclamp_growth.tif")
grow_cc<-raster("./Output/tifs/PIED.climcomp_growth.tif")
grow_ccf<-raster("./Output/tifs/PIED.climcompfire_growth.tif")
grow_i<-raster("./Output/tifs/PIED.int_growth.tif")
grow_min<-min(c(minValue(grow_c),minValue(grow_ccl),minValue(grow_cc),minValue(grow_ccf)
,minValue(grow_i)))
grow_max<-max(c(maxValue(grow_c),maxValue(grow_ccl),maxValue(grow_cc),maxValue(grow_ccf),
maxValue(grow_i)))
surv_c<-raster("./Output/tifs/PIED.clim_survival.tif")
surv_ccl<-raster("./Output/tifs/PIED.climclamp_survival.tif")
surv_cc<-raster("./Output/tifs/PIED.climcomp_survival.tif")
surv_ccf<-raster("./Output/tifs/PIED.climcompfire_survival.tif")
surv_i<-raster("./Output/tifs/PIED.int_survival.tif")
surv_min<-min(c(minValue(surv_c),minValue(surv_ccl),minValue(surv_cc),minValue(surv_ccf)
,minValue(surv_i)))
surv_max<-max(c(maxValue(surv_c),maxValue(surv_ccl),maxValue(surv_cc),maxValue(surv_ccf),
maxValue(surv_i)))
repr_c<-raster("./Output/tifs/PIED.clim_reproduction.tif")
repr_ccl<-raster("./Output/tifs/PIED.climclamp_reproduction.tif")
repr_cc<-raster("./Output/tifs/PIED.climcomp_reproduction.tif")
repr_ccf<-raster("./Output/tifs/PIED.climcompfire_reproduction.tif")
repr_i<-raster("./Output/tifs/PIED.int_reproduction.tif")
repr_min<-min(c(minValue(repr_c),minValue(repr_ccl),minValue(repr_cc),minValue(repr_ccf)
,minValue(repr_i)))
repr_max<-max(c(maxValue(repr_c),maxValue(repr_ccl),maxValue(repr_cc),maxValue(repr_ccf),
maxValue(repr_i)))
extrap<-raster("./Output/tifs/extrap.tif")
FIA_pa <- read.csv("./Processed/Recruitment/RecruitData.csv", header = T, stringsAsFactors = F)
FIA_pa.plot<-FIA_pa %>%
group_by(plot) %>%
summarise(lat=mean(lat),lon=mean(lon),PApied=mean(PApied))
FIA_pa.plot$PApied<-as.factor(FIA_pa.plot$PApied)
#Make points spatial and extract lambda for each point
Points <- SpatialPoints(coords = cbind(FIA_pa.plot$lon, FIA_pa.plot$lat),
proj4string = CRS("+proj=longlat +datum=NAD83"))
FIA_pa.plot$lambda_c <- raster::extract(lambda_c, Points)
FIA_pa.plot$lambda_ccl <- raster::extract(lambda_ccl, Points)
FIA_pa.plot$lambda_cc <- raster::extract(lambda_cc, Points)
FIA_pa.plot$lambda_ccf <- raster::extract(lambda_ccf, Points)
FIA_pa.plot$lambda_i <- raster::extract(lambda_i, Points)
l_means<-FIA_pa.plot %>%
group_by(PApied) %>%
summarise(lambda_c=median(lambda_c),lambda_ccl=median(lambda_ccl),lambda_cc=median(lambda_cc),
lambda_ccf=median(lambda_ccf),lambda_i=median(lambda_i))
mytheme<-theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
legend.text=element_text(size=11),legend.title=element_text(size=12),
legend.key = element_rect(fill = "white"),axis.text=element_text(size=12),
axis.title.x=element_text(size=14),axis.title.y=element_text(size=14),
axis.line.x = element_line(color="black", size = 0.3),
axis.line.y = element_line(color="black", size = 0.3))
## Climate only
# Growth
grow_c_d <- ggplot(data=grdata,aes(x=PREVDIA))+
geom_rect(aes(xmin=min(grplot_data_clim$dia),xmax=min(grdata$PREVDIA),ymin=-1.5,ymax=0.1),
fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(grplot_data_clim$dia),xmin=max(grdata$PREVDIA),ymin=-1.5,ymax=0.1),
fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=-1.6),size=0.1)+
geom_point(data=g_binned,aes(x=PREVDIA,y=grow_dia_c,size=count_dia))+
geom_line(data=grplot_data_clim,aes(x=dia,y=dia_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_clim,aes(x=dia,y=dia_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
labs(x="Previous diameter", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_c_d.png", plot=grow_c_d,dpi=400)
grow_c_p <- ggplot(data=grdata,aes(x=PPT_yr_norm))+
geom_rect(aes(xmin=min(grplot_data_clim$ppt),xmax=min(grdata$PPT_yr_norm),
ymin=0.015,ymax=max(grplot_data_clim$ppt_pred)),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(grplot_data_clim$ppt),xmin=max(grdata$PPT_yr_norm),
ymin=0.005,ymax=max(grplot_data_clim$ppt_pred)),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=0),size=0.1)+
geom_point(data=g_binned,aes(x=PPT,y=grow_PPT_c,size=count_PPT))+
geom_line(data=grplot_data_clim,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_clim,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year precipitation norm", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_c_p.png", plot=grow_c_p,dpi=400)
grow_c_t<-ggplot(data=grdata,aes(x=T_yr_norm))+
geom_rect(aes(xmin=min(grplot_data_clim$t),xmax=min(grdata$T_yr_norm),
ymin=-0.002,ymax=max(grplot_data_clim$t_pred)),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(grplot_data_clim$t),xmin=max(grdata$T_yr_norm),
ymin=-0.002,ymax=max(grplot_data_clim$t_pred)),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=-0.005),size=0.1)+
geom_point(data=g_binned,aes(x=T,y=grow_T_c,size=count_T))+
geom_line(data=grplot_data_clim,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_clim,aes(x=t,y=t_pred_c),linetype="dotted",col="#1b9e77",size=1.25)+
labs(x="30-year temperature norm", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_c_t.png", plot=grow_c_t,dpi=400)
# Survival
surv_c_d <- ggplot(data=survData,aes(x=PREVDIA))+
geom_rect(aes(xmin=min(splot_data_clim$dia),xmax=min(survData$PREVDIA),ymin=-0.02,ymax=0.7),
fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(splot_data_clim$dia),xmin=max(survData$PREVDIA),ymin=-0.02,ymax=0.7),
fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=PREVDIA,y=-0.05),size=0.1)+
geom_point(data=s_binned,aes(x=PREVDIA,y=mort_dia_c,size=count_dia))+
geom_line(data=splot_data_clim,aes(x=dia,y=dia_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_clim,aes(x=dia,y=dia_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=splot_data_clim.lin,aes(x=dia,y=dia_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_clim.lin,aes(x=dia,y=dia_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="Previous diameter", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_c_d.png", plot=surv_c_d,dpi=400)
surv_c_p <- ggplot(data=survData,aes(x=PPT_yr_norm))+
geom_rect(aes(xmin=min(splot_data_clim$ppt),xmax=min(survData$PPT_yr_norm),
ymin=-0.02,ymax=0.35),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(splot_data_clim$ppt),xmin=max(survData$PPT_yr_norm),
ymin=-0.02,ymax=0.35),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=PPT_yr_norm,y=-0.05),size=0.1)+
geom_point(data=s_binned,aes(x=PPT,y=mort_PPT_c,size=count_PPT))+
geom_line(data=splot_data_clim,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_clim,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=splot_data_clim.lin,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_clim.lin,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year precipitation norm", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_c_p.png", plot=surv_c_p,dpi=400)
surv_c_t <- ggplot(data=survData,aes(x=T_yr_norm))+
geom_rect(aes(xmin=min(splot_data_clim$t),xmax=min(survData$T_yr_norm),
ymin=-0.02,ymax=0.45),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(splot_data_clim$t),xmin=max(survData$T_yr_norm),
ymin=-0.02,ymax=0.45),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=T_yr_norm,y=-0.05),size=0.1)+
geom_point(data=s_binned,aes(x=T,y=mort_T_c,size=count_T))+
geom_line(data=splot_data_clim,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_clim,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=splot_data_clim.lin,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_clim.lin,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year temperature norm", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_c_t.png", plot=surv_c_t,dpi=400)
# Recruit
recr_c_p <- ggplot(data=rdata.scaled,aes(x=PPT_yr_norm))+
geom_rect(aes(xmin=min(rplot_data_clim$ppt),xmax=min(rdata$PPT_yr_norm),
ymin=-0.05,ymax=max(rplot_data_clim$ppt_pred)),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(rplot_data_clim$ppt),xmin=max(rdata$PPT_yr_norm),
ymin=-0.05,ymax=max(rplot_data_clim$ppt_pred)),fill="grey80",col="grey80",alpha=0.1)+
geom_point(data=r_binned,aes(x=PPT,y=recr_PPT_c,size=count_PPT))+
#geom_point(aes(y=-0.15),size=0.1)+
geom_line(data=rplot_data_clim,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_clim,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=rplot_data_clim.lin,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_clim.lin,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year precipitation norm", y="Number recruits")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_recr_c_p.png", plot=recr_c_p,dpi=400)
recr_c_t <- ggplot(data=rdata,aes(x=T_yr_norm))+
geom_rect(aes(xmin=min(rplot_data_clim$t),xmax=min(rdata$T_yr_norm),
ymin=-0.05,ymax=max(rplot_data_clim$t_pred)),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(rplot_data_clim$t),xmin=max(rdata$T_yr_norm),
ymin=-0.05,ymax=max(rplot_data_clim$t_pred)),fill="grey80",col="grey80",alpha=0.1)+
geom_point(data=r_binned,aes(x=T,y=recr_T_c,size=count_T),alpha=0.5)+
#geom_point(aes(y=-0.1),size=0.1)+
geom_line(data=rplot_data_clim,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_clim,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=rplot_data_clim.lin,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_clim.lin,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x=expression(paste("MAT (",degree,"C)")), y="Number recruits",tag="C")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_recr_c_t.png", plot=recr_c_t,
width=4,height=3,units="in",dpi=600)
## Climate + competition
# Growth
grow_cc_d <- ggplot(data=grdata,aes(x=PREVDIA))+
geom_rect(aes(xmin=min(grplot_data_climcomp$dia),xmax=min(grdata$PREVDIA),ymin=-1.5,ymax=0.1),
fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(grplot_data_climcomp$dia),xmin=max(grdata$PREVDIA),ymin=-1.5,ymax=0.1),
fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=-1.6),size=0.1)+
geom_point(data=g_binned,aes(x=PREVDIA,y=grow_dia_cc,size=count_dia))+
geom_line(data=grplot_data_climcomp,aes(x=dia,y=dia_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_climcomp,aes(x=dia,y=dia_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
labs(x="Previous diameter", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_cc_d.png", plot=grow_cc_d,dpi=400)
grow_cc_b <- ggplot(data=grdata,aes(x=BALIVE))+
geom_rect(aes(xmin=min(grplot_data_climcomp$ba),xmax=min(grdata$BALIVE),
ymin=-0.002,ymax=0.1),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=-0.005),size=0.1)+
geom_point(data=g_binned,aes(x=BALIVE,y=grow_ba_cc,size=count_ba))+
geom_line(data=grplot_data_climcomp,aes(x=ba,y=ba_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_climcomp,aes(x=ba,y=ba_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
labs(x="Live basal area", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_cc_b.png", plot=grow_cc_b,dpi=400)
grow_cc_p <- ggplot(data=grdata,aes(x=PPT_yr_norm))+
geom_rect(aes(xmin=min(grplot_data_climcomp$ppt),xmax=min(grdata$PPT_yr_norm),
ymin=0.015,ymax=max(grplot_data_climcomp$ppt_pred)),
fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(grplot_data_climcomp$ppt),xmin=max(grdata$PPT_yr_norm),
ymin=0.015,ymax=max(grplot_data_climcomp$ppt_pred)),
fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=0.01),size=0.1)+
geom_point(data=g_binned,aes(x=PPT,y=grow_PPT_cc,size=count_PPT))+
geom_line(data=grplot_data_climcomp,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_climcomp,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
labs(x="30-year precipitation norm", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_cc_p.png", plot=grow_cc_p,dpi=400)
grow_cc_t <- ggplot(data=grdata,aes(x=T_yr_norm))+
geom_rect(aes(xmin=min(grplot_data_climcomp$t),xmax=min(grdata$T_yr_norm),
ymin=-0.002,ymax=max(grplot_data_climcomp$t_pred)),
fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(grplot_data_climcomp$t),xmin=max(grdata$T_yr_norm),
ymin=-0.002,ymax=max(grplot_data_climcomp$t_pred)),
fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=-0.005),size=0.1)+
geom_point(data=g_binned,aes(x=T,y=grow_T_cc,size=count_T))+
geom_line(data=grplot_data_climcomp,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_climcomp,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year temperature norm", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_cc_t.png", plot=grow_cc_t,dpi=400)
# Survival
surv_cc_d <- ggplot(data=survData,aes(x=PREVDIA))+
geom_rect(aes(xmin=min(splot_data_climcomp$dia),xmax=min(survData$PREVDIA),
ymin=-0.02,ymax=0.7),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(splot_data_climcomp$dia),xmin=max(survData$PREVDIA),
ymin=-0.02,ymax=0.7),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=PREVDIA,y=-0.05),size=0.1)+
geom_point(data=s_binned,aes(x=PREVDIA,y=mort_dia_cc,size=count_dia))+
geom_line(data=splot_data_climcomp,aes(x=dia,y=dia_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_climcomp,aes(x=dia,y=dia_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
#geom_line(data=splot_data_fire,aes(x=dia,y=dia_pred),col="#d95f02",size=1.25)+
#geom_line(data=splot_data_fire,aes(x=dia,y=dia_pred_c),col="#d95f02",linetype="dotted",size=1.25)+
labs(x="Previous diameter", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_cc_d.png", plot=surv_cc_d,dpi=400)
surv_cc_b <- ggplot(data=survData,aes(x=BALIVE))+
geom_rect(aes(xmin=min(splot_data_climcomp$ba),xmax=min(survData$BALIVE),ymin=-0.02,ymax=1),
fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=BALIVE,y=-0.05),size=0.1)+
geom_point(data=s_binned,aes(x=BALIVE,y=mort_ba_cc,size=count_ba))+
geom_line(data=splot_data_climcomp,aes(x=ba,y=ba_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_climcomp,aes(x=ba,y=ba_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
#geom_line(data=splot_data_fire,aes(x=ba,y=ba_pred),col="#d95f02",size=1.25)+
#geom_line(data=splot_data_fire,aes(x=ba,y=ba_pred_c),col="#d95f02",linetype="dotted",size=1.25)+
labs(x="Live basal area", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_cc_b.png", plot=surv_cc_b,dpi=400)
surv_cc_p <- ggplot(data=survData,aes(x=PPT_yr_norm))+
geom_rect(aes(xmin=min(splot_data_climcomp$ppt),xmax=min(survData$PPT_yr_norm),
ymin=-0.02,ymax=0.35),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(splot_data_climcomp$ppt),xmin=max(survData$PPT_yr_norm),
ymin=-0.02,ymax=0.35),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=PPT_yr_norm,y=-0.05),size=0.1)+
geom_point(data=s_binned,aes(x=PPT,y=mort_PPT_cc,size=count_PPT))+
geom_line(data=splot_data_climcomp,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_climcomp,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
#geom_line(data=splot_data_climcomp.lin,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_climcomp.lin,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
#geom_line(data=splot_data_fire,aes(x=ppt,y=ppt_pred),col="#d95f02",size=1.25)+
#geom_line(data=splot_data_fire,aes(x=ppt,y=ppt_pred_c),col="#d95f02",linetype="dotted",size=1.25)+
#geom_line(data=splot_data_fire.lin,aes(x=ppt,y=ppt_pred),col="#d95f02",size=1.25)+
#geom_line(data=splot_data_fire.lin,aes(x=ppt,y=ppt_pred_c),col="#d95f02",linetype="dotted",size=1.25)+
labs(x="30-year precipitation norm", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_cc_p.png", plot=surv_cc_p,dpi=400)
surv_cc_t <- ggplot(data=survData,aes(x=T_yr_norm))+
geom_rect(aes(xmin=min(splot_data_climcomp$t),xmax=min(survData$T_yr_norm),
ymin=-0.02,ymax=0.45),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(splot_data_climcomp$t),xmin=max(survData$T_yr_norm),
ymin=-0.02,ymax=0.45),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=T_yr_norm,y=-0.05),size=0.1)+
geom_point(data=s_binned,aes(x=T,y=mort_T_cc,size=count_T))+
geom_line(data=splot_data_climcomp,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_climcomp,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=splot_data_climcomp.lin,aes(x=t,y=t_pred,linetype="No clamp",col="No fire"),size=1.25)+
#geom_line(data=splot_data_climcomp.lin,aes(x=t,y=t_pred_c,linetype="Clamp",col="No fire"),size=1.25)+
#geom_line(data=splot_data_fire,aes(x=t,y=t_pred),col="#d95f02",size=1.25)+
#geom_line(data=splot_data_fire,aes(x=t,y=t_pred_c),col="#d95f02",linetype="dotted",size=1.25)+
#geom_line(data=splot_data_fire.lin,aes(x=t,y=t_pred,linetype="No clamp",col="Fire"),size=1.25)+
#geom_line(data=splot_data_fire.lin,aes(x=t,y=t_pred_c,linetype="Clamp",col="Fire"),size=1.25)+
labs(x="30-year temperature norm", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_cc_t.png", plot=surv_cc_t,dpi=400)
# Recruit
recr_cc_b <- ggplot(data=rdata,aes(x=BALIVE))+
geom_rect(aes(xmin=min(rplot_data_climcomp$ba),xmax=min(rdata$BALIVE),
ymin=-0.05,ymax=2),fill="grey80",col="grey80",alpha=0.1)+
geom_point(data=r_binned,aes(x=BALIVE,y=recr_ba_cc,size=count_ba))+
#geom_point(aes(y=-0.2),size=0.1)+
geom_line(data=rplot_data_climcomp,aes(x=ba,y=ba_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_climcomp,aes(x=ba,y=ba_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
#geom_line(data=rplot_data_climcomp.lin,aes(x=ba,y=ba_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_climcomp.lin,aes(x=ba,y=ba_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
labs(x="Live basal area", y="Number recruits")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_recr_cc_b.png", plot=recr_cc_b,dpi=400)
recr_cc_p <- ggplot(data=rdata,aes(x=PPT_yr_norm))+
geom_rect(aes(xmin=min(rplot_data_climcomp$ppt),xmax=min(rdata$PPT_yr_norm),
ymin=-0.05,ymax=2),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(rplot_data_climcomp$ppt),xmin=max(rdata$PPT_yr_norm),
ymin=-0.05,ymax=2),fill="grey80",col="grey80",alpha=0.1)+
geom_point(data=r_binned,aes(x=PPT,y=recr_PPT_cc,size=count_PPT),alpha=0.5)+
#geom_point(aes(y=-0.15),size=0.1)+
geom_line(data=rplot_data_climcomp,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_climcomp,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
#geom_line(data=rplot_data_climcomp.lin,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_climcomp.lin,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",
# size=1.25)+
labs(x="MAP (mm)", y="Number recruits", tag="D")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_recr_cc_p.png", plot=recr_cc_p,
width=4,height=3,units="in",dpi=600)
recr_cc_t <- ggplot(data=rdata,aes(x=T_yr_norm))+
geom_rect(aes(xmin=min(rplot_data_climcomp$t),xmax=min(rdata$T_yr_norm),
ymin=-0.05,ymax=0.8),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(rplot_data_climcomp$t),xmin=max(rdata$T_yr_norm),
ymin=-0.05,ymax=0.8),fill="grey80",col="grey80",alpha=0.1)+
geom_point(data=r_binned,aes(x=T,y=recr_T_cc,size=count_T))+
#geom_point(aes(y=-0.1),size=0.1)+
geom_line(data=rplot_data_climcomp,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_climcomp,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=rplot_data_climcomp.lin,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_climcomp.lin,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year temperature norm", y="Number recruits")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_recr_cc_t.png", plot=recr_cc_t,dpi=400)
## Climate + competition, interactions
# Growth
grow_i_d <- ggplot(data=grdata,aes(x=PREVDIA))+
geom_rect(aes(xmin=min(grplot_data_int$dia),xmax=min(grdata$PREVDIA),ymin=-1.53,ymax=0.1),
fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(grplot_data_int$dia),xmin=max(grdata$PREVDIA),ymin=-1.53,ymax=0.1),
fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=-1.6),size=0.1)+
geom_point(data=g_binned,aes(x=PREVDIA,y=grow_dia_i,size=count_dia))+
geom_line(data=grplot_data_int,aes(x=dia,y=dia_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_int,aes(x=dia,y=dia_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="Previous diameter", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_i_d.png", plot=grow_i_d,dpi=400)
grow_i_b <- ggplot(data=grdata,aes(x=BALIVE))+
geom_rect(aes(xmin=min(grplot_data_int$ba),xmax=min(grdata$BALIVE),
ymin=-0.002,ymax=0.1),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=-0.005),size=0.1)+
geom_point(data=g_binned,aes(x=BALIVE,y=grow_ba_i,size=count_ba))+
geom_line(data=grplot_data_int,aes(x=ba,y=ba_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_int,aes(x=ba,y=ba_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="Live basal area", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_i_b.png", plot=grow_i_b,dpi=400)
grow_i_p <- ggplot(data=grdata,aes(x=PPT_yr_norm))+
geom_rect(aes(xmin=min(grplot_data_int$ppt),xmax=min(grdata$PPT_yr_norm),
ymin=0.015,ymax=max(grplot_data_int$ppt_pred)),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(grplot_data_int$ppt),xmin=max(grdata$PPT_yr_norm),
ymin=0.015,ymax=max(grplot_data_int$ppt_pred)),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=0.01),size=0.1)+
geom_point(data=g_binned,aes(x=PPT,y=grow_PPT_i,size=count_PPT))+
geom_line(data=grplot_data_int,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_int,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year precipitation norm", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_i_p.png", plot=grow_i_p,dpi=400)
grow_i_t <- ggplot(data=grdata,aes(x=T_yr_norm))+
geom_rect(aes(xmin=min(grplot_data_int$t),xmax=min(grdata$T_yr_norm),
ymin=-0.002,ymax=max(grplot_data_int$t_pred)),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(grplot_data_int$t),xmin=max(grdata$T_yr_norm),
ymin=-0.002,ymax=max(grplot_data_int$t_pred)),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(aes(y=-0.005),size=0.1)+
geom_point(data=g_binned,aes(x=T,y=grow_T_i,size=count_T))+
geom_line(data=grplot_data_int,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=grplot_data_int,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year temperature norm", y="Diameter increment")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_grow_i_t.png", plot=grow_i_t,dpi=400)
# Survival
surv_i_d <- ggplot(data=survData,aes(x=PREVDIA))+
geom_rect(aes(xmin=min(splot_data_int$dia),xmax=min(survData$PREVDIA),
ymin=-0.02,ymax=0.7),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(splot_data_int$dia),xmin=max(survData$PREVDIA),
ymin=-0.02,ymax=0.7),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=PREVDIA,y=-0.05),size=0.1)+
geom_point(data=s_binned,aes(x=PREVDIA,y=mort_dia_i,size=count_dia))+
geom_line(data=splot_data_int,aes(x=dia,y=dia_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_int,aes(x=dia,y=dia_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="Previous diameter", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_i_d.png", plot=surv_i_d,dpi=400)
surv_i_b <- ggplot(data=survData,aes(x=BALIVE))+
geom_rect(aes(xmin=min(splot_data_int$ba),xmax=min(survData$BALIVE),ymin=-0.02,ymax=1),
fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=BALIVE,y=-0.08),size=0.1)+
geom_point(data=s_binned,aes(x=BALIVE,y=mort_ba_i,size=count_ba))+
geom_line(data=splot_data_int,aes(x=ba,y=ba_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_int,aes(x=ba,y=ba_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="Live basal area", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_i_b.png", plot=surv_i_b,dpi=400)
surv_i_p <- ggplot(data=survData,aes(x=PPT_yr_norm))+
geom_rect(aes(xmin=min(splot_data_int$ppt),xmax=min(survData$PPT_yr_norm),
ymin=-0.02,ymax=0.35),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(splot_data_int$ppt),xmin=max(survData$PPT_yr_norm),
ymin=-0.02,ymax=0.35),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=PPT_yr_norm,y=-0.08),size=0.1)+
geom_point(data=s_binned,aes(x=PPT,y=mort_PPT_i,size=count_PPT))+
geom_line(data=splot_data_int,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_int,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=splot_data_int.lin,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_int.lin,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year precipitation norm", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_i_p.png", plot=surv_i_p,dpi=400)
surv_i_t <- ggplot(data=survData,aes(x=T_yr_norm))+
geom_rect(aes(xmin=min(splot_data_int$t),xmax=min(survData$T_yr_norm),
ymin=-0.02,ymax=0.45),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(splot_data_int$t),xmin=max(survData$T_yr_norm),
ymin=-0.02,ymax=0.45),fill="grey80",col="grey80",alpha=0.1)+
#geom_point(data=survData,aes(x=T_yr_norm,y=-0.07),size=0.1)+
geom_point(data=s_binned,aes(x=T,y=mort_T_i,size=count_T))+
geom_line(data=splot_data_int,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_int,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=splot_data_int.lin,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=splot_data_int.lin,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year temperature norm", y="Mortality")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_surv_i_t.png", plot=surv_i_t,dpi=400)
# Recruit
recr_i_b <- ggplot(data=rdata,aes(x=BALIVE))+
geom_rect(aes(xmin=min(rplot_data_int$ba),xmax=min(rdata$BALIVE),
ymin=-0.4,ymax=3),fill="grey80",col="grey80",alpha=0.1)+
geom_point(data=r_binned,aes(x=BALIVE,y=recr_ba_i,size=count_ba))+
#geom_point(aes(y=-1),size=0.1)+
geom_line(data=rplot_data_int,aes(x=ba,y=ba_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_int,aes(x=ba,y=ba_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=rplot_data_int.lin,aes(x=ba,y=ba_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_int.lin,aes(x=ba,y=ba_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="Live basal area", y="Number recruits")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_recr_i_b.png", plot=recr_i_b,dpi=400)
recr_i_p <- ggplot(data=rdata,aes(x=PPT_yr_norm))+
geom_rect(aes(xmin=min(rplot_data_int$ppt),xmax=min(rdata$PPT_yr_norm),
ymin=-0.05,ymax=2),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(rplot_data_int$ppt),xmin=max(rdata$PPT_yr_norm),
ymin=-0.05,ymax=2),fill="grey80",col="grey80",alpha=0.1)+
geom_point(data=r_binned,aes(x=PPT,y=recr_PPT_i,size=count_PPT))+
#geom_point(aes(y=-0.15),size=0.1)+
geom_line(data=rplot_data_int,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_int,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=rplot_data_int.lin,aes(x=ppt,y=ppt_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_int.lin,aes(x=ppt,y=ppt_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year precipitation norm", y="Number recruits")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_recr_i_p.png", plot=recr_i_p,dpi=400)
recr_i_t <- ggplot(data=rdata,aes(x=T_yr_norm))+
geom_rect(aes(xmin=min(rplot_data_int$t),xmax=min(rdata$T_yr_norm),
ymin=-0.05,ymax=0.8),fill="grey80",col="grey80",alpha=0.1)+
geom_rect(aes(xmax=max(rplot_data_int$t),xmin=max(rdata$T_yr_norm),
ymin=-0.05,ymax=0.8),fill="grey80",col="grey80",alpha=0.1)+
geom_point(data=r_binned,aes(x=T,y=recr_T_i,size=count_T))+
#geom_point(aes(y=-0.1),size=0.1)+
geom_line(data=rplot_data_int,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_int,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
#geom_line(data=rplot_data_int.lin,aes(x=t,y=t_pred),col="#1b9e77",size=1.25)+
#geom_line(data=rplot_data_int.lin,aes(x=t,y=t_pred_c),col="#1b9e77",linetype="dotted",size=1.25)+
labs(x="30-year temperature norm", y="Number recruits")+
guides(size=guide_legend(title="Count")) +
theme(legend.position="top")+mytheme
ggsave(file="PIED_manuscript_recr_i_t.png", plot=recr_i_t,dpi=400)
## Partial residual plots
# Climate only
est<-Effect("PREVDIA", partial.residuals=T, gmodel.clim)
grow_c_d_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, gmodel.clim)
grow_c_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, gmodel.clim)
grow_c_t_resid<-plot(est)
est<-Effect("PREVDIA", partial.residuals=T, smodel.clim)
surv_c_d_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, smodel.clim)
surv_c_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, smodel.clim)
surv_c_t_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, rmodel.clim)
recr_c_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, rmodel.clim)
recr_c_t_resid<-plot(est)
# Climate + comp, no interactions
est<-Effect("PREVDIA", partial.residuals=T, gmodel.clim.comp)
grow_cc_d_resid<-plot(est)
est<-Effect("BALIVE", partial.residuals=T, gmodel.clim.comp)
grow_cc_b_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, gmodel.clim.comp)
grow_cc_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, gmodel.clim.comp)
grow_cc_t_resid<-plot(est)
est<-Effect("PREVDIA", partial.residuals=T, smodel.clim.comp)
surv_cc_d_resid<-plot(est)
est<-Effect("BALIVE", partial.residuals=T, smodel.clim.comp)
surv_cc_b_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, smodel.clim.comp)
surv_cc_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, smodel.clim.comp)
surv_cc_t_resid<-plot(est)
est<-Effect("BALIVE", partial.residuals=T, rmodel.clim.comp)
recr_cc_b_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, rmodel.clim.comp)
recr_cc_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, rmodel.clim.comp)
recr_cc_t_resid<-plot(est)
# Climat + comp, no interactions, fire
est<-Effect("PREVDIA", partial.residuals=T, smodel.clim.comp.fire)
surv_ccf_d_resid<-plot(est)
est<-Effect("BALIVE", partial.residuals=T, smodel.clim.comp.fire)
surv_ccf_d_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, smodel.clim.comp.fire)
surv_ccf_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, smodel.clim.comp.fire)
surv_ccf_t_resid<-plot(est)
# Climate + comp, interactions
est<-Effect("PREVDIA", partial.residuals=T, gmodel.int)
grow_i_d_resid<-plot(est)
est<-Effect("BALIVE", partial.residuals=T, gmodel.int)
grow_i_b_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, gmodel.int)
grow_i_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, gmodel.int)
grow_i_t_resid<-plot(est)
est<-Effect("PREVDIA", partial.residuals=T, smodel.int)
surv_i_d_resid<-plot(est)
est<-Effect("BALIVE", partial.residuals=T, smodel.int)
surv_i_b_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, smodel.int)
surv_i_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, smodel.int)
surv_i_t_resid<-plot(est)
est<-Effect("BALIVE", partial.residuals=T, rmodel.int)
recr_i_b_resid<-plot(est)
est<-Effect("PPT_yr_norm", partial.residuals=T, rmodel.int)
recr_i_p_resid<-plot(est)
est<-Effect("T_yr_norm", partial.residuals=T, rmodel.int)
recr_i_t_resid<-plot(est)
# Best
est<-Effect("PREVDIA", smodel.best)
surv_b_d_resid<-plot(est,xlab="Tree diameter",ylab="Mortality",
main=F)
est<-Effect("BALIVE", smodel.best)
surv_b_b_resid<-plot(est)
est<-Effect("PPT_fs_norm", smodel.best)
png(file="./PIED_best_surv.png",4,3,units="in",type="cairo",res=600)
plot(est,xlab="Pre-monsoon precipitation (scaled)",ylab="Mortality",
main=F)
dev.off()
est<-Effect("T_c_norm", smodel.best)
surv_i_t_resid<-plot(est)
est<-Effect("BALIVE", rmodel.best)
recr_b_b_resid<-plot(est)
est<-Effect("PPT_c_norm", rmodel.best)
recr_b_pc_resid<-plot(est)
est<-Effect("PPT_wd_norm", rmodel.best)
recr_b_pwd_resid<-plot(est)
est<-Effect("PPT_m_norm", rmodel.best)
recr_b_pm_resid<-plot(est)
est<-Effect("T_c_norm", rmodel.best)
png(file="./PIED_best_recr.png",4,3,units="in",type="cairo",res=600)
plot(est,xlab="Cool season temperature (scaled)",ylab="Number recruits",
main=F,axes=list(y=list(ticks=list(at=c(1,10,100,1000,10000,100000)))))
dev.off()
est<-Effect("T_wd_norm", rmodel.best)
recr_b_twd_resid<-plot(est)
est<-Effect("T_m_norm", rmodel.best)
recr_b_tm_resid<-plot(est)
|
6f284d867344e84c4140e350f7e0e965a6482c2f
|
39770b7fdb336b49e1f46d3dec3928549b5aa37c
|
/R/makePolicyTechnologyChange.R
|
db9e1cd5f9d2586f2c24d8f3eee3141b3bd36ef0
|
[] |
no_license
|
cran/GE
|
43930f03e55c49b7ac5dec166b65f26a337022af
|
da036424c04569660da21d899f879749343ba145
|
refs/heads/master
| 2023-09-01T05:09:07.662756
| 2023-08-17T07:52:45
| 2023-08-17T10:30:33
| 252,856,081
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,663
|
r
|
makePolicyTechnologyChange.R
|
#' @export
#' @title Make a Policy of Technology Change
#' @aliases makePolicyTechnologyChange
#' @description This function returns a policy function that changes the attributes alpha and a of the demand structure trees of agents specified.
#' An attribute alpha is usually a parameter of a CES or CD function.
#' An attribute a is usually a parameter of a Leontief function.
#' For demand structure trees that do not contain these two attributes, this function has no effect.
#' @param adjumentment.ratio a scalar. The attributes alpha will be multiplied by adjumentment.ratio.
#' The attributes a will be divided by adjumentment.ratio.
#' @param agent a vector specifying the indices or names of agents.
#' @param time.win the time window vector, i.e. a 2-vector specifying the start time and end time of policy implementation.
#' @return A policy function, which is often used as an argument of the function sdm2.
#' @seealso \code{\link{sdm2}}
#' @examples
#' \donttest{
#' dst.firm <- node_new("output",
#' type = "CD", alpha = 1, beta = c(0.5, 0.5),
#' "prod", "lab"
#' )
#'
#' dst.consumer <- node_new("utility",
#' type = "Leontief", a = 1, "prod"
#' )
#'
#' B <- matrix(c(
#' 1, 0,
#' 0, 0
#' ), 2, 2, TRUE)
#' S0Exg <- matrix(c(
#' NA, NA,
#' NA, 100
#' ), 2, 2, TRUE)
#'
#' ge <- sdm2(
#' A = list(dst.firm, dst.consumer), B = B, S0Exg = S0Exg,
#' names.commodity = c("prod", "lab"),
#' names.agent = c("firm", "consumer"),
#' priceAdjustmentVelocity = 0,
#' policy = list(
#' makePolicyTechnologyChange(agent = "firm"),
#' makePolicyStickyPrice(stickiness = 0, time.win = c(1, 20)),
#' makePolicyStickyPrice(stickiness = 0.9, time.win = c(20, Inf))
#' ),
#' ts = TRUE,
#' maxIteration = 1,
#' numberOfPeriods = 40
#' )
#'
#' par(mfrow = c(1, 2))
#' matplot(ge$ts.z, type = "o", pch = 20)
#' matplot(ge$ts.p, type = "o", pch = 20)
#' }
#'
makePolicyTechnologyChange <- function(adjumentment.ratio = 1.1,
agent = 1,
time.win = c(20, 20)) {
function(time, A, state) {
if (is.character(agent)) {
agent.index <- match(agent, state$names.agent)
} else {
agent.index <- agent
}
if (time >= time.win[1] && time <= time.win[2]) {
for (index.k in agent.index) {
A[[index.k]]$Do(function(node) {
if (!is.null(node$alpha)) node$alpha <- node$alpha * adjumentment.ratio
if (!is.null(node$a)) node$a <- node$a / adjumentment.ratio
},
filterFun = isNotLeaf
)
}
}
}
}
|
b961faf359549405f55af4e45edef4fbdccd7ac9
|
cfa61c03961746223b4b4aab8434b8ee80a0df90
|
/Modeling.R
|
7910f03df35685a6c5578a9daad93418e2c8fd0d
|
[] |
no_license
|
crump1/Modeling
|
cf5310802c1cfda183e1a408cd60fe72b9287217
|
0a09998e3c2deb107b50a1daee63cd3056b26e0c
|
refs/heads/master
| 2020-04-14T22:25:22.818399
| 2019-01-06T20:43:48
| 2019-01-06T20:43:48
| 164,159,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,249
|
r
|
Modeling.R
|
#Linear regression with one predictor
#Load data from 2011 MLB season
library(statsr)
data(mlb11)
ml<- mlb11
head(ml)
#Using runs as the response variable - the obective of the game, after all,
#is to score more runs than the other team.
#But what is the key driver of runs scored?
#Let's first look at some traditional stats - at bats, and homeruns
#Build scatterplot - runs versus at bats
ggplot(ml, aes(x= at_bats, y= runs)) +
geom_point() + stat_smooth(method= "lm", se= FALSE)
#Correlation coefficient?
cor(ml$runs, ml$at_bats)
# 0.610627
#Build linear regression model with at bats as the single predictor
m1<- lm(runs ~ at_bats, data= ml)
summary(m1)
#p-value says at bats is a significant predictor or runs scored, however
# the R-squared only explains away ~ 37% of the variability in runs scored.
#Homeruns?
ggplot(ml, aes(x= homeruns, y= runs)) +
geom_point() + stat_smooth(method= "lm", se= FALSE)
m2<- lm(runs ~ homeruns, data= ml)
summary(m2)
#Better all around - stronger, positive, linear relationship, higher R-squared, and more...
#Let's do some inference on runs versus homeruns
pt(6.854, df= 28, lower.tail= FALSE) * 2
qt(0.025, df= 28)
1.8345 + 2.048407 * 0.2677
1.8345 - 2.048407 * 0.2677
confint(m2)
#So we are 95% confident the slope estimate falls between 1.29 and 2.38, on average.
#But all of the SABR talk is around getting on base and slugging, so good thing
#we have a variable - reflecting both of those - to test.
ggplot(ml, aes(x= new_obs, y= runs)) +
geom_point() + stat_smooth(method= "lm")
m3<- lm(runs ~ new_obs, data= ml)
#Oh my...Adjusted R-squared: 0.9326, and look how "linear" that relationship is.
#Partition the variability
anova(m3)
#Diagnostics - linearity, normality, constant variance
#Test conditions for the model using a mixed bag
library(car)
#Linearity
plot(m3$residuals ~ ml$new_obs)
crPlots(m3)
#Normality
hist(m3$residuals)
qqnorm(m3$residuals)
qqline(m3$residuals)
qqPlot(m3)
#Constant variance - homoscedasticity
plot(m3$residuals ~ m3$fitted.values)
spreadLevelPlot(m3)
#Everything checks out, looks good.
#So the SABR folks are right, runs are clearly a function of
#getting on base and slugging.
#Regression with multiple predictors
cognitive<- read.csv("http://bit.ly/dasi_cognitive")
#Dataset is cognitive test scores of 3-4 year-olds and the
#characteristics of their mothers.
#Two categorical variables - mom_hs and mom_work each have two levels
head(cognitive)
#build a full model
cog_fit<- lm(kid_score ~ ., data= cognitive)
summary(cog_fit)
#Model selection
#We want the most predictive model, so I will choose variables based on adjusted R-squared
summary(cog_fit_final)
#Interesting...whether mom works or not is not significant.
#However, it adds value to the model. And in keeping with the
#original objective of building a predictive model, the variable stays
#If I was selecting only signficant variables, mom_work would not be a part of the model.
#Now let's do some diagnostics
#mom_iq is the only numerical variable in the dataset, so
#that is what we will focus on for the linearity condition
#It's all about the residuals. To validate the model, we want random scatter around zero.
plot(cog_fit_final$residuals ~ cognitive$mom_iq)
#Normality
hist(cog_fit_final$residuals)
qqnorm(cog_fit_final$residuals)
qqline(cog_fit_final$residuals)
#Constant variance
#Independence - independent residuals
#Here we are looking for a time series structure, or patterns in the data.
#The model, if valid, should capture all of the pattern and just leave random scatter
plot(cog_fit_final$residuals)
durbinWatsonTest(cog_fit_final)
#It's a go.
# Regression with binary response, or "dependent" variables
#We'll dig into some econometrics, a probit model
library(AER)
data("SwissLabor")
#This is a well-worn econometrics dataset considering labor force
#participation for 872 Swiss women.
#use GLM - generalized linear model - instead of LM, to extend the linear model to categorical outcomes
#binary dependent variables are decidedly non-normal - do not follow a normal distribution
#don't forget the arguments for selecting response distribution and link function.
swiss_prob<- glm(participation ~ . + I(age^2),
data= SwissLabor, family= binomial(link= "probit"))
summary(swiss_prob)
#All variables except education are significant
#We can't visualize the binary outcome with a scatterplot, but
#we can use a spine plot to visualize participation versus other continuous variables in the dataset
plot(participation ~ age, data = SwissLabor, ylevels = 2:1)
plot(participation ~ income, data = SwissLabor, ylevels = 2:1)
plot(participation ~ education, data = SwissLabor, ylevels = 2:1)
#GLMs don't have a go-to R-squared, so you have to improvise a bit
# to see how good of a fit we have
#1.) Confusion matrix
table(true = SwissLabor$participation,
pred = round(fitted(swiss_prob)))
#2.) Pseudo R-squared
swiss_prob0 <- update(swiss_prob, formula = . ~ 1)
1 - as.vector(logLik(swiss_prob)/logLik(swiss_prob0))
# .155?
#This model is not a great fit, but if we are less concerned with prediction
#and more concerned with identifying significant predictors, then we are in better shape.
#Diagnostics
#Two ways of calculating residuals
deviance(swiss_prob)
anova(swiss_prob)
#Poisson Regression
#Another GLM model, Poisson is the standard model for count data.
#This dataset is from a survey of 2,000 leisure boat owners in eastern Texas.
#The dependent variable - trips - is the count of trips made by each survey participant
data("RecreationDemand")
rd<- RecreationDemand
dim(rd)
str(rd)
summary(rd)
#Let's look at the variable
ggplot(rd, aes(x= trips)) + geom_histogram()
#Need to pluck out some of those long tail values
ggplot(rd, aes(x= trips)) + geom_histogram() + coord_cartesian(ylim= c(0, 80))
ggplot(rd, aes(x= ski, y= trips)) + geom_boxplot()
max(rd$trips)
range(rd$trips)
#Wow! Somebody made 88 trips to the lake in one year.
#So we have a highly skewed distribution and an apparent outlier
#Fit a model
rd_pois <- glm(trips ~ ., data = RecreationDemand,
family = poisson)
summary(rd_pois)
#Need to check for overdispersion, a problem with Poisson models
#Overdispersion - more variance than the model will allow
#the AER package offers a test
dispersiontest(rd_pois)
dispersiontest(rd_pois, trafo = 2)
#What's the problem?
rd %>% filter(trips== "0") %>% summarise(n())
417/659
#~ 63% of trips were 0 - reported no trips to the lake
#count data regressions don't play well with lots of zeros.
#Let's try a ZIP model - zero-inflated Poisson
library(pscl)
rd_zinb <- zeroinfl(trips ~ . | quality + income,
data = rd, dist = "negbin")
summary(rd_zinb)
round(colSums(predict(rd_zinb, type = "prob")[,1:10]))
#This model captures the zeros much better than the other model
# Bayesian HLM
conjoint.df <- read.csv("http://goo.gl/G8knGV")
dim(conjoint.df)
conjoint.df$speed <- factor(conjoint.df$speed)
conjoint.df$height <- factor(conjoint.df$height)
summary(conjoint.df)
set.seed(1234)
# First fit non-hierarchical model (lm)
ride.mc1 <- MCMCregress(rating ~ speed + height + const + theme,
data=conjoint.df)
summary(ride.mc1)
set.seed(1234)
# Build Bayesian HLM model, and wait. But this comment never gets old:
# "Running ghe Gibbs sampler, It may be long, keep cool :)
ride.mc2 <- MCMChregress(
fixed = rating ~ speed +height + const + theme,
random = ~ speed + height + const + theme,
group="resp.id", data=conjoint.df, r=8, R=diag(8) )
summary(ride.mc2$mcmc[ , 1:8])
# Find an indvidual in the columns - what columns match.
cols <- grepl(".196", colnames(ride.mc2$mcmc), fixed=TRUE)
summary(ride.mc2$mcmc[ , cols])
# Select one effect - wood construction - and check variance
cols <- grepl("b.constWood", colnames(ride.mc2$mcmc))
ride.constWood <- summary(ride.mc2$mcmc[ , cols]
+ ride.mc2$mcmc[ , "beta.constWood"])
ride.constWood$statistics
# Plot variance
hist(ride.constWood$statistics[ , 1],
main="Preference for Wood vs. Steel",
xlab="Rating points", ylab="Count of respondents", xlim=c(-4,4))
|
1fe31b1ffa1fa205b54eb83d26a1a27673bfb821
|
5e486203c665acf112e514fe1642c1f16ba1b8dc
|
/Oct_02_2014/Ebola2014/MultLocTest.R
|
53cebb53eebeb6ec2cdd168958ebedc28f093d5b
|
[] |
no_license
|
grantbrown/Ebola-2014-Analysis-Archive
|
c8b2b3afbe0e24d63f998b22cbdd947ab428b5d6
|
591bab6a95b9595b25674bd2c1a7bb0c3b8364b2
|
refs/heads/master
| 2021-01-19T17:13:43.734207
| 2014-10-27T21:45:14
| 2014-10-27T21:45:14
| 22,686,537
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,793
|
r
|
MultLocTest.R
|
library(knitr)
library(coda) # Load the coda library for MCMC convergence diagnosis
library(spatialSEIR) # Load the spatialSEIR library to perform the modeling.
library(XML) # Load the XML library to read in data from Wikipedia
library(parallel) # Load the parallel library to enable multiple chains to be run simultaneously.
## Define Document Compilation Parameters
#documentCompilationMode = "release"
documentCompilationMode = "debug"
modelDF = 6
pred.days = 14
## Compute number of samples/batches
numBurnInBatches = ifelse(documentCompilationMode == "release", 1000, 1)
numConvergenceBatches = ifelse(documentCompilationMode == "release", 1000, 10)
convergenceBatchSize = ifelse(documentCompilationMode == "release", 1000, 50)
extraR0Iterations = ifelse(documentCompilationMode == "release", 500, 10)
iterationStride = ifelse(documentCompilationMode == "release", 1000, 50)
## Read in the data
url = 'http://en.wikipedia.org/wiki/West_Africa_Ebola_virus_outbreak'
tbls = readHTMLTable(url)
dat = tbls[[5]] # This line changes depending on the page formatting.
# One date is now duplicated on the Wikipedia page, due to using different sources.
# Clean that up first.
dup.indices = which(as.Date(dat[,1], "%d %b %Y") == as.Date("2014-06-05"))
dat[dup.indices[1],]$V2 = dat[dup.indices[2],]$V2
dat = rbind(dat[1:dup.indices[1],], dat[(dup.indices[2]+1):nrow(dat),])
charDate = as.character(dat[2:nrow(dat),1])
for (i in 1:length(charDate))
{
charDate[i] = gsub("Sept", "Sep", charDate[i])
}
rptDate = as.Date(charDate, "%d %b %Y")
numDays = max(rptDate) - min(rptDate) + 1
numDays.pred = numDays + pred.days
original.rptDate = rptDate
ascendingOrder = order(rptDate)
rptDate = rptDate[ascendingOrder][2:length(rptDate)]
original.rptDate = original.rptDate[ascendingOrder]
cleanData = function(dataColumn, ascendingOrder)
{
# Remove commas
dataColumn = gsub(",", "", dataColumn, fixed = TRUE)
# Remove +1 -1 stuff
charCol = as.character(
lapply(
strsplit(
as.character(
dataColumn)[ascendingOrder], "\n"), function(x){ifelse(length(x) == 0, "—", x[[1]])}
)
)
if (is.na(charCol[1]) || charCol[1] == "—")
{
charCol[1] = "0"
}
charCol = as.numeric(ifelse(charCol == "—", "", charCol))
for (i in 2:length(charCol))
{
if (is.na(charCol[i]))
{
charCol[i] = charCol[i-1]
}
}
charCol
}
Guinea = cleanData(dat$V4[2:nrow(dat)], ascendingOrder)
Liberia = cleanData(dat$V6[2:nrow(dat)], ascendingOrder)
Sierra.Leone = cleanData(dat$V8[2:nrow(dat)], ascendingOrder)
Nigeria = cleanData(dat$V10[2:nrow(dat)], ascendingOrder)
# Define the plot for the next section
ylim = c(min(c(Guinea, Sierra.Leone, Liberia, Nigeria)),
max(c(Guinea, Sierra.Leone, Liberia, Nigeria)))
figure1 = function()
{
plot(original.rptDate, Guinea, type = "l",
main = "Raw Data: Case Counts From Wikipedia",
xlab = "Date",
ylab = "Total Cases",
ylim = ylim, lwd = 3)
abline(h = seq(0,100000, 100), lty = 2, col = "lightgrey")
lines(original.rptDate, Liberia, lwd = 3, col = "blue", lty = 2)
lines(original.rptDate, Sierra.Leone, lwd = 3, col = "red", lty = 3)
lines(original.rptDate, Nigeria, lwd = 3, col = "green", lty = 4)
legend(x = original.rptDate[1], y = max(ylim), legend =
c("Guinea", "Liberia", "Sierra Leone", "Nigeria"),
lty = 1:3, col = c("black", "blue","red", "green"), bg="white", cex = 1.1)
}
uncumulate = function(x)
{
out = c(x[2:length(x)]-x[1:(length(x)-1)])
ifelse(out >= 0, out, 0)
}
# The "I_star" name will make more sense in a bit
I_star = cbind(uncumulate(Guinea),
uncumulate(Liberia),
uncumulate(Sierra.Leone),
uncumulate(Nigeria))
maxIdx = nrow(I_star)
# Define the temporal offset vector to be the number of days reflected in each
# aggregated record (time between reports).
offsets = uncumulate(original.rptDate)
if (any(offsets <= 0))
{
cat("Invalid Date Information. The data source has likely changed.\n")
stop(-1)
}
InfectionPerDay = I_star/(cbind(offsets, offsets, offsets, offsets))
# Define figure 2 for next section
ylim = c(0,max(InfectionPerDay)*1.2)
figure2 = function()
{
layout(matrix(c(1,2), nrow = 1),
widths = c(8,4), heights = c(4,4))
plot(rptDate, InfectionPerDay[,1], main = "Crude Guess at New Case Counts Per Day",
xlab = "Date",
ylab = "New Cases",
lty=1, lwd = 2,
ylim = ylim, type = "l"
)
abline(h = seq(0, 1000, 5), lty = 2, col = "lightgrey")
lines(rptDate, InfectionPerDay[,2], col = "blue",lty=2, lwd = 2)
lines(rptDate, InfectionPerDay[,3], col = "red", lty = 3, lwd = 2)
lines(rptDate, InfectionPerDay[,4], col = "green", lty = 4, lwd = 2)
par(xaxt="n")
par(yaxt="n")
par(bty="n")
par(xpd=TRUE)
plot(c(0,10),c(0,10), type = "n", main ="",xlab="",ylab="")
legend(x=-2,y=10, legend = c("Guinea", "Liberia", "Sierra Leone", "Nigeria"), lty = 1:4,lwd=2,
col = c("black", "blue", "red", "green"))
par(xpd=FALSE)
par(xaxt="s")
par(yaxt="s")
par(bty="o")
}
library(rCharts)
library(stats)
x = rptDate - min(rptDate)
guinea.interp = approx(x,InfectionPerDay[,1],xout = 0:max(x))
liberia.interp = approx(x,InfectionPerDay[,2],xout = 0:max(x))
sierraleone.interp = approx(x,InfectionPerDay[,3],xout = 0:max(x))
nigeria.interp = approx(x,InfectionPerDay[,4],xout = 0:max(x))
interpMatrix = cbind(guinea.interp$y, liberia.interp$y,sierraleone.interp$y, nigeria.interp$y)
cutvals = cut(interpMatrix, breaks = 9)
interpMatrix.cut = matrix(as.numeric(cutvals), nrow = nrow(interpMatrix))
upperVals = as.numeric(lapply(strsplit(c(gsub("[(]", "", gsub("]", "", unique(as.character(cutvals))))), ","), function(x){return(x[2])}))
upperVals = round(upperVals[order(upperVals)],0)
hcol = c("#ffffef", "#fff7bf", "#fee39f", "#fec45f", "#fe993f", "#ec702f", "#cc4c1f", "#993402", "#662520")
color.palette = c(hcol[1],hcol)
fills = setNames(color.palette, c("defaultFill", paste("lt", upperVals, sep = "")))
# GIN, LBR, SLE,
outList = list()
for (tpt in min(x):max(x))
{
outList[[as.character(tpt+1)]] = list("GIN" = list("fillKey"=factor(paste("lt", upperVals[interpMatrix.cut[tpt+1,1]], sep =""),
levels = names(fills))),
"LBR" = list("fillKey"=factor(paste("lt", upperVals[interpMatrix.cut[tpt+1,2]], sep = ""),
levels = names(fills))),
"SLE" = list("fillKey"=factor(paste("lt",upperVals[interpMatrix.cut[tpt+1,3]], sep = ""),
levels = names(fills))),
"NGA" = list("fillKey"=factor(paste("lt",upperVals[interpMatrix.cut[tpt+1,4]], sep = ""),
levels = names(fills))))
}
cat(numDays)
library(splines)
daysSinceJan = as.numeric(rptDate - as.Date("2014-01-01"))
daysSinceJan.predict = c(max(daysSinceJan) + 1, max(daysSinceJan)
+ seq(2,pred.days-2,2))
splineBasis = ns(daysSinceJan, df = modelDF)
splineBasis.predict = predict(splineBasis, daysSinceJan.predict)
# Guinea, Liberia, Sierra Leone, Nigeria
N = matrix(c(10057975, 4128572, 6190280,174507539), nrow = nrow(I_star),ncol = 4,
byrow=TRUE)
X = diag(ncol(N))
X.predict = X
Z = splineBasis
Z.predict = splineBasis.predict
# These co-variates are the same for each spatial location,
# so duplicate them row-wise.
Z = Z[rep(1:nrow(Z), nrow(X)),]
Z.predict = Z.predict[rep(1:nrow(Z.predict), nrow(X)),]
# For convenience, let's combine X and Z for prediction.
X.pred = cbind(X.predict[rep(1:nrow(X.predict),
each = nrow(Z.predict)/nrow(X)),], Z.predict)
# Define the simple "distance" matrices. There are 4 countries, three of which
# share borders. Let all of the nations share an overall correlation term, and
# let the three neighboring nations share an additional parameter
DM1 = (1-diag(4))
DM2 = rbind(cbind((1-diag(3)), rep(0, 3)), rep(0,4))
# Make row stochastic:
DM1 = DM1/matrix(apply(DM1, 1, sum), nrow = 4, ncol = 4)
DM2 = DM2/matrix(apply(DM2, 1, sum), nrow = 4, ncol = 4)
DM2 = ifelse(is.na(DM2), 0, DM2)
# Define population sizes for the three countries of interest. This data also
# from Wikipedia.
# Define prediction offsets.
offset.pred = c(1,seq(2,pred.days-2,2))
# There's no reinfection process for Ebola, but we still need to provide dummy
# values for the reinfection terms. This will be changed (along with most of
# the R level API) Dummy covariate matrix:
X_p_rs = matrix(0)
# Dummy covariate matrix dimension. Why, exactly, am I not just grabbing this
# kind of thing from Rcpp? No good reason at all: this will be fixed.
xPrsDim = dim(X_p_rs)
# Dummy value for reinfection params
beta_p_rs = rep(0, ncol(X_p_rs))
# Dummy value for reinfection params prior precision
betaPrsPriorPrecision = 0.5
# Get object dimensions. Again, this will be done automatically in the future
compMatDim = dim(I_star)
xDim = dim(X)
zDim = dim(Z)
# Declare prior parameters for the E to I and I to R probabilities.
priorAlpha_gammaEI = 25;
priorBeta_gammaEI = 100;
priorAlpha_gammaIR = 14;
priorBeta_gammaIR = 100;
# Declare prior parameters for the overdispersion precision
priorAlpha_phi = 1
priorBeta_phi = 1
# Declare prior precision for exposure model paramters
betaPriorPrecision = 0.1
# Declare a function which can come up with several different starting values
# for the model parameters. This will allow us to assess convergence.
proposeParameters = function(seedVal, chainNumber)
{
set.seed(seedVal)
# 2 to 21 day incubation period according to who
p_ei = 0.25 + rnorm(1, 0, 0.02)
# Up to 7 weeks even after recovery
p_ir = 0.14 + rnorm(1, 0, 0.01)
gamma_ei=-log(1-p_ei)
gamma_ir=-log(1-p_ir)
# Starting value for exposure regression parameters
beta = rep(0, ncol(X) + ncol(Z))
beta[1] = 2.5 + rnorm(1,0,0.5)
rho = 0.1 + rnorm(1,0,0.01) # spatial dependence parameter
phi = 0.01 # Overdispersion precision
outFileName = paste("./chain_output_ebola_", chainNumber ,".txt", sep = "")
# Make a crude guess as to the true compartments:
# S_star, E_star, R_star, and thus S,E,I and R
DataModel = buildDataModel(I_star, type = "overdispersion",
params=c(priorAlpha_phi,priorBeta_phi))
ExposureModel = buildExposureModel(X, Z, beta, betaPriorPrecision)
ReinfectionModel = buildReinfectionModel("SEIR")
SamplingControl = buildSamplingControl(iterationStride=iterationStride,
sliceWidths=c(1, # S_star
1, # E_star
1, # R_star
1, # S_0
1, # I_0
0.05, # beta
0.0, # beta_p_rs, fixed in this case
0.01, # rho
0.01, # gamma_ei
0.01, # gamma_ir
0.01)) # phi)
InitContainer = buildInitialValueContainer(I_star, N,
S0 = N[1,]-I_star[1,] - c(86,0,0,0),
I0 = c(86,0,0,0),
p_ir = 0.5,
p_rs = 0.00)
DistanceModel = buildDistanceModel(list(DM1,DM2))
TransitionPriors = buildTransitionPriorsManually(priorAlpha_gammaEI,priorBeta_gammaEI,
priorAlpha_gammaIR,priorBeta_gammaIR)
return(list(DataModel=DataModel,
ExposureModel=ExposureModel,
ReinfectionModel=ReinfectionModel,
SamplingControl=SamplingControl,
InitContainer=InitContainer,
DistanceModel=DistanceModel,
TransitionPriors=TransitionPriors,
outFileName=outFileName))
}
params =list("estimateR0"=TRUE, "traceCompartments"=TRUE, "seedVal"=12312334,"chainNumber"=4)
buildAndRunModel = function(params)
{
library(spatialSEIR)
proposal = proposeParameters(params[["seedVal"]], params[["chainNumber"]])
SEIRmodel = buildSEIRModel(proposal$outFileName,
proposal$DataModel,
proposal$ExposureModel,
proposal$ReinfectionModel,
proposal$DistanceModel,
proposal$TransitionPriors,
proposal$InitContainer,
proposal$SamplingControl)
SEIRmodel$setRandomSeed(params[["seedVal"]])
# Do we need to keep track of compartment values for prediction?
# No sense doing this for all of the chains.
if (params[["traceCompartments"]])
{
SEIRmodel$setTrace(0) #Guinea
SEIRmodel$setTrace(1) #Liberia
SEIRmodel$setTrace(2) #Sierra Leone
SEIRmodel$setTrace(3) #Nigeria
}
# Make a helper function to run each chain, as well as update the metropolis
# tuning parameters.
runSimulation = function(modelObject,
numBatches=500,
batchSize=20,
targetAcceptanceRatio=0.2,
tolerance=0.05,
proportionChange = 0.1
)
{
for (batch in 1:numBatches)
{
modelObject$simulate(batchSize)
modelObject$updateSamplingParameters(targetAcceptanceRatio,
tolerance,
proportionChange)
}
}
# Burn in tuning parameters
runSimulation(SEIRmodel, numBatches = numBurnInBatches)
SEIRmodel$compartmentSamplingMode = 14
SEIRmodel$useDecorrelation = 25
# Run Simulation
cat(paste("Running chain ", params[["chainNumber"]], "\n", sep =""))
tm = 0
tm = tm + system.time(runSimulation(SEIRmodel,
numBatches=numConvergenceBatches,
batchSize=convergenceBatchSize,
targetAcceptanceRatio=0.2,
tolerance=0.025,
proportionChange = 0.05))
cat(paste("Time elapsed: ", round(tm[3]/60,3),
" minutes\n", sep = ""))
dat = read.csv(proposal$outFileName)
## Do we need to estimate R0 for this chain?
if (params[["estimateR0"]])
{
R0 = array(0, dim = c(nrow(I_star), ncol(I_star), extraR0Iterations))
for (i in 1:extraR0Iterations)
{
SEIRmodel$simulate(iterationStride)
for (j in 0:(nrow(I_star)-1))
{
R0[j,,i] = apply(SEIRmodel$getIntegratedGenerationMatrix(j), 2, sum)
}
}
R0Mean = apply(R0, 1:2, mean)
R0LB = apply(R0, 1:2, quantile, probs = 0.05)
R0UB = apply(R0, 1:2, quantile, probs = 0.95)
orig.R0 = R0
R0 = list("mean"=R0Mean, "LB" = R0LB, "UB" = R0UB)
} else
{
R0 = NULL
orig.R0 = NULL
}
return(list("chainOutput" = dat, "R0" = R0, "rawSamples" = orig.R0))
}
mod = buildAndRunModel(params)
|
107879be8fb4da0f6ea5d7b4fb11297571db1be8
|
2d66350fc95b2d2eeeb6ffcf0a26827c43d95a4d
|
/R/config.R
|
adb47b2feae3f88f98690321a4b00226e325a804
|
[] |
no_license
|
sangddn/baseball-salaries
|
345c0eb4d115f99a512659d27049c652afa773b5
|
054c2c0c6eeeb329a85ed0962dbb1ac8cae582e5
|
refs/heads/main
| 2023-05-05T10:00:14.942086
| 2021-05-29T13:48:28
| 2021-05-29T13:48:28
| 371,976,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 125
|
r
|
config.R
|
source('data.R')
source('analysis.R')
library(tidyverse)
library(magrittr)
library(viridis)
library(ggrepel)
library(ggpubr)
|
3caa5e9251049ae604aee9d69ebab30b1a963e86
|
ae02c39397298b50526648887ac620ef26951314
|
/myprojects/Kunnat/kunnat.R
|
fe57fe3df3355e058b612708b8a46371d3c341c1
|
[] |
no_license
|
juhaeljas/datasciencecoursera
|
d289cd27486300cfc947b31e122dabc01c440255
|
a04ead8f57d25bad3a6eaebf0c20ba47383b52be
|
refs/heads/master
| 2022-05-05T17:31:21.784744
| 2022-04-05T10:23:41
| 2022-04-05T10:23:41
| 69,235,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,846
|
r
|
kunnat.R
|
setwd("C:/Users/Jussi/Documents/datasciencecoursera/myprojects/Kunnat")
asukkaat<-read.csv(file="asukasluvut.txt",encoding="UTF-8", header=FALSE)
colnames(asukkaat)<-c("Kunta","asukasluku")
pintaalat<-read.csv(file="mml_vuositilasto_2014.csv",encoding="UTF-8")
levels(asukkaat$Kunta) <- c(levels(asukkaat$Kunta), "Koski Tl")
asukkaat[asukkaat$Kunta=="KoskiTl",1]<-"Koski Tl"
levels(pintaalat$Kunta) <- c(levels(pintaalat$Kunta), "Pedersören kunta")
pintaalat[pintaalat$Kunta=="Pedersöre",1]<-"Pedersören kunta"
levels(asukkaat$Kunta) <- c(levels(asukkaat$Kunta), "Mariehamn")
asukkaat[asukkaat$Kunta=="Maarianhamina",1]<-"Mariehamn"
levels(pintaalat$Kunta) <- c(levels(pintaalat$Kunta), "Inkoo")
pintaalat[pintaalat$Kunta=="Ingå",1]<-"Inkoo"
levels(pintaalat$Kunta) <- c(levels(pintaalat$Kunta), "Pietarsaari")
pintaalat[pintaalat$Kunta=="Jakobstad",1]<-"Pietarsaari"
levels(asukkaat$Kunta) <- c(levels(asukkaat$Kunta), "Pedersören kunta")
asukkaat[asukkaat$Kunta=="Pedersörenkunta",1]<-"Pedersören kunta"
kuntatiedot <- merge(asukkaat,pintaalat,by="Kunta", all=TRUE)
kuntatiedot[,5]<-as.vector(kuntatiedot[,5])
kuntatiedot[kuntatiedot$Kunta=="Parainen",5]<-"4658.28"
kuntatiedot[,5]<-as.numeric(kuntatiedot[,5])
kuntatiedot[,4]<-as.numeric(kuntatiedot[,4])
kuntatiedot[,3]<-as.numeric(kuntatiedot[,3])
kuntatiedot[,2]<-as.numeric(kuntatiedot[,2])
kuntatiedot[,1]<-as.character(kuntatiedot[,1])
kuntatiedot[kuntatiedot$Kunta=="Hollola",3]<-kuntatiedot[kuntatiedot$Kunta=="Hollola",3] + kuntatiedot[kuntatiedot$Kunta=="Hämeenkoski",3]
kuntatiedot[kuntatiedot$Kunta=="Hollola",4]<-kuntatiedot[kuntatiedot$Kunta=="Hollola",4] + kuntatiedot[kuntatiedot$Kunta=="Hämeenkoski",4]
kuntatiedot[kuntatiedot$Kunta=="Hollola",5]<-kuntatiedot[kuntatiedot$Kunta=="Hollola",5] + kuntatiedot[kuntatiedot$Kunta=="Hämeenkoski",5]
kuntatiedot[kuntatiedot$Kunta=="Hollola",6]<-kuntatiedot[kuntatiedot$Kunta=="Hollola",6] + kuntatiedot[kuntatiedot$Kunta=="Hämeenkoski",6]
kuntatiedot[kuntatiedot$Kunta=="Kurikka",3]<-kuntatiedot[kuntatiedot$Kunta=="Kurikka",3] + kuntatiedot[kuntatiedot$Kunta=="Jalasjärvi",3]
kuntatiedot[kuntatiedot$Kunta=="Kurikka",4]<-kuntatiedot[kuntatiedot$Kunta=="Kurikka",4] + kuntatiedot[kuntatiedot$Kunta=="Jalasjärvi",4]
kuntatiedot[kuntatiedot$Kunta=="Kurikka",5]<-kuntatiedot[kuntatiedot$Kunta=="Kurikka",5] + kuntatiedot[kuntatiedot$Kunta=="Jalasjärvi",5]
kuntatiedot[kuntatiedot$Kunta=="Kurikka",6]<-kuntatiedot[kuntatiedot$Kunta=="Kurikka",6] + kuntatiedot[kuntatiedot$Kunta=="Jalasjärvi",6]
kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",3]<-kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",3] + kuntatiedot[kuntatiedot$Kunta=="Kimitoön",3]
kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",4]<-kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",4] + kuntatiedot[kuntatiedot$Kunta=="Kimitoön",4]
kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",5]<-kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",5] + kuntatiedot[kuntatiedot$Kunta=="Kimitoön",5]
kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",6]<-kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",6] + kuntatiedot[kuntatiedot$Kunta=="Kimitoön",6]
kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",3]<-kuntatiedot[kuntatiedot$Kunta=="Kimitoön",3]
kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",4]<-kuntatiedot[kuntatiedot$Kunta=="Kimitoön",4]
kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",5]<-kuntatiedot[kuntatiedot$Kunta=="Kimitoön",5]
kuntatiedot[kuntatiedot$Kunta=="Kemiönsaari",6]<-kuntatiedot[kuntatiedot$Kunta=="Kimitoön",6]
kuntatiedot[kuntatiedot$Kunta=="Mustasaari",3]<-kuntatiedot[kuntatiedot$Kunta=="Korsholm",3]
kuntatiedot[kuntatiedot$Kunta=="Mustasaari",4]<-kuntatiedot[kuntatiedot$Kunta=="Korsholm",4]
kuntatiedot[kuntatiedot$Kunta=="Mustasaari",5]<-kuntatiedot[kuntatiedot$Kunta=="Korsholm",5]
kuntatiedot[kuntatiedot$Kunta=="Mustasaari",6]<-kuntatiedot[kuntatiedot$Kunta=="Korsholm",6]
kuntatiedot[kuntatiedot$Kunta=="Säkylä",3]<-kuntatiedot[kuntatiedot$Kunta=="Säkylä",3] + kuntatiedot[kuntatiedot$Kunta=="Köyliö",3]
kuntatiedot[kuntatiedot$Kunta=="Säkylä",4]<-kuntatiedot[kuntatiedot$Kunta=="Säkylä",4] + kuntatiedot[kuntatiedot$Kunta=="Köyliö",4]
kuntatiedot[kuntatiedot$Kunta=="Säkylä",5]<-kuntatiedot[kuntatiedot$Kunta=="Säkylä",5] + kuntatiedot[kuntatiedot$Kunta=="Köyliö",5]
kuntatiedot[kuntatiedot$Kunta=="Säkylä",6]<-kuntatiedot[kuntatiedot$Kunta=="Säkylä",6] + kuntatiedot[kuntatiedot$Kunta=="Köyliö",6]
kuntatiedot[kuntatiedot$Kunta=="Kristiinankaupunki",3]<-kuntatiedot[kuntatiedot$Kunta=="Kristinestad",3]
kuntatiedot[kuntatiedot$Kunta=="Kristiinankaupunki",5]<-kuntatiedot[kuntatiedot$Kunta=="Kristinestad",5]
kuntatiedot[kuntatiedot$Kunta=="Kristiinankaupunki",4]<-kuntatiedot[kuntatiedot$Kunta=="Kristinestad",4]
kuntatiedot[kuntatiedot$Kunta=="Kristiinankaupunki",6]<-kuntatiedot[kuntatiedot$Kunta=="Kristinestad",6]
kuntatiedot[kuntatiedot$Kunta=="Kruunupyy",3]<-kuntatiedot[kuntatiedot$Kunta=="Kronoby",3]
kuntatiedot[kuntatiedot$Kunta=="Kruunupyy",4]<-kuntatiedot[kuntatiedot$Kunta=="Kronoby",4]
kuntatiedot[kuntatiedot$Kunta=="Kruunupyy",5]<-kuntatiedot[kuntatiedot$Kunta=="Kronoby",5]
kuntatiedot[kuntatiedot$Kunta=="Kruunupyy",6]<-kuntatiedot[kuntatiedot$Kunta=="Kronoby",6]
kuntatiedot[kuntatiedot$Kunta=="Kruunupyy",3]<-kuntatiedot[kuntatiedot$Kunta=="Kronoby ",3]
kuntatiedot[kuntatiedot$Kunta=="Kruunupyy",4]<-kuntatiedot[kuntatiedot$Kunta=="Kronoby ",4]
kuntatiedot[kuntatiedot$Kunta=="Kruunupyy",5]<-kuntatiedot[kuntatiedot$Kunta=="Kronoby ",5]
kuntatiedot[kuntatiedot$Kunta=="Kruunupyy",6]<-kuntatiedot[kuntatiedot$Kunta=="Kronoby ",6]
kuntatiedot[kuntatiedot$Kunta=="Luoto",3]<-kuntatiedot[kuntatiedot$Kunta=="Larsmo",3]
kuntatiedot[kuntatiedot$Kunta=="Luoto",4]<-kuntatiedot[kuntatiedot$Kunta=="Larsmo",4]
kuntatiedot[kuntatiedot$Kunta=="Luoto",5]<-kuntatiedot[kuntatiedot$Kunta=="Larsmo",5]
kuntatiedot[kuntatiedot$Kunta=="Luoto",6]<-kuntatiedot[kuntatiedot$Kunta=="Larsmo",6]
kkunta="Maalahti"
rkunta="Malax"
kuntatiedot[kuntatiedot$Kunta==kkunta,3]<-kuntatiedot[kuntatiedot$Kunta==rkunta,3]
kuntatiedot[kuntatiedot$Kunta==kkunta,4]<-kuntatiedot[kuntatiedot$Kunta==rkunta,4]
kuntatiedot[kuntatiedot$Kunta==kkunta,5]<-kuntatiedot[kuntatiedot$Kunta==rkunta,5]
kuntatiedot[kuntatiedot$Kunta==kkunta,6]<-kuntatiedot[kuntatiedot$Kunta==rkunta,6]
kkunta="Maarianhamina"
rkunta="Mariehamn"
kuntatiedot[kuntatiedot$Kunta==kkunta,3]<-kuntatiedot[kuntatiedot$Kunta==rkunta,3]
kuntatiedot[kuntatiedot$Kunta==kkunta,4]<-kuntatiedot[kuntatiedot$Kunta==rkunta,4]
kuntatiedot[kuntatiedot$Kunta==kkunta,5]<-kuntatiedot[kuntatiedot$Kunta==rkunta,5]
kuntatiedot[kuntatiedot$Kunta==kkunta,6]<-kuntatiedot[kuntatiedot$Kunta==rkunta,6]
kkunta="Myrskylä"
rkunta="Myrskylä "
kuntatiedot[kuntatiedot$Kunta==kkunta,3]<-kuntatiedot[kuntatiedot$Kunta==rkunta,3]
kuntatiedot[kuntatiedot$Kunta==kkunta,4]<-kuntatiedot[kuntatiedot$Kunta==rkunta,4]
kuntatiedot[kuntatiedot$Kunta==kkunta,5]<-kuntatiedot[kuntatiedot$Kunta==rkunta,5]
kuntatiedot[kuntatiedot$Kunta==kkunta,6]<-kuntatiedot[kuntatiedot$Kunta==rkunta,6]
kkunta="Närpiö"
rkunta="Närpes"
kuntatiedot[kuntatiedot$Kunta==kkunta,3]<-kuntatiedot[kuntatiedot$Kunta==rkunta,3]
kuntatiedot[kuntatiedot$Kunta==kkunta,4]<-kuntatiedot[kuntatiedot$Kunta==rkunta,4]
kuntatiedot[kuntatiedot$Kunta==kkunta,5]<-kuntatiedot[kuntatiedot$Kunta==rkunta,5]
kuntatiedot[kuntatiedot$Kunta==kkunta,6]<-kuntatiedot[kuntatiedot$Kunta==rkunta,6]
kkunta="Lahti"
rkunta="Nastola"
kuntatiedot[kuntatiedot$Kunta==kkunta,4]<-kuntatiedot[kuntatiedot$Kunta==kkunta,4]+kuntatiedot[kuntatiedot$Kunta==rkunta,4]
kuntatiedot[kuntatiedot$Kunta==kkunta,5]<-kuntatiedot[kuntatiedot$Kunta==kkunta,5]+kuntatiedot[kuntatiedot$Kunta==rkunta,5]
kuntatiedot[kuntatiedot$Kunta==kkunta,6]<-kuntatiedot[kuntatiedot$Kunta==kkunta,6]+kuntatiedot[kuntatiedot$Kunta==rkunta,6]
kkunta="Uusikaarlepyy"
rkunta="Nykarleby"
kuntatiedot[kuntatiedot$Kunta==kkunta,3]<-kuntatiedot[kuntatiedot$Kunta==rkunta,3]
kuntatiedot[kuntatiedot$Kunta==kkunta,4]<-kuntatiedot[kuntatiedot$Kunta==rkunta,4]
kuntatiedot[kuntatiedot$Kunta==kkunta,5]<-kuntatiedot[kuntatiedot$Kunta==rkunta,5]
kuntatiedot[kuntatiedot$Kunta==kkunta,6]<-kuntatiedot[kuntatiedot$Kunta==rkunta,6]
kkunta="Raasepori"
rkunta="Raseborg"
kuntatiedot[kuntatiedot$Kunta==kkunta,3]<-kuntatiedot[kuntatiedot$Kunta==rkunta,3]
kuntatiedot[kuntatiedot$Kunta==kkunta,4]<-kuntatiedot[kuntatiedot$Kunta==rkunta,4]
kuntatiedot[kuntatiedot$Kunta==kkunta,5]<-kuntatiedot[kuntatiedot$Kunta==rkunta,5]
kuntatiedot[kuntatiedot$Kunta==kkunta,6]<-kuntatiedot[kuntatiedot$Kunta==rkunta,6]
kuntatiedot<-kuntatiedot[-c(314:327),]
kuntatiedot[kuntatiedot$Kunta=="Säkylä",5]<-0
kuntatiedot[kuntatiedot$Kunta=="Hollola",5]<-0
kuntatiedot[kuntatiedot$Kunta=="Kurikka",5]<-0
kuntatiedot[kuntatiedot$Kunta=="Lahti",5]<-0
kuntatiedot<-kuntatiedot[order(kuntatiedot$asukasluku),]
|
7828d061cfe4fa64012c3717b24e5ff1a6fbd623
|
d8f7bfbe482d98ead30ac58b9c7ae4c254e93579
|
/R Programming/practice8.R
|
86ac03b860f27bc1157621d7cfb9acfa1b565a5d
|
[] |
no_license
|
Leoberium/Rmisc
|
1aa29ab9883f319bdf6b684f78f738079869efee
|
7f48c97dd81ae3fcdcbeb364e69c588754b07946
|
refs/heads/master
| 2020-09-25T05:49:40.574778
| 2019-12-04T19:50:27
| 2019-12-04T19:50:27
| 225,931,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,250
|
r
|
practice8.R
|
genN <- matrix(rnorm(50000), ncol = 5)
genC <- rbind(matrix(rnorm(2500, 3), ncol = 5), matrix(rnorm(47500), ncol = 5))
pvals <- sapply(1:10000, function(i) t.test(genN[i, ], genC[i, ])$p.value)
alpha <- 0.05
sn <- sum(pvals < alpha)
hist(pvals)
falspositiverate <- alpha*10000/sn
pvalsBH <- p.adjust(pvals, method = "BH")
(snBH <- sum(pvalsBH < 0.05))
(alphaBH <- 0.05*snBH/10000)
max(pvals[pvalsBH < 0.05])
(falspositiverateBH <- alphaBH*10000/snBH)
pvalsBO <- p.adjust(pvals, method = "bo")
(snBO <- sum(pvalsBO < 0.05))
(alphaBO <- 0.05*snBO/10000)
max(pvals[pvalsBO < 0.05])
(falspositiverateBO <- alphaBO*10000/snBO)
sigrows <- pvalsBH < 0.05
effsize <- rowMeans(genC[sigrows, ]) - rowMeans(genN[sigrows, ])
hist(effsize, 30)
genN2 <- matrix(rnorm(50000), ncol = 5)
genC2 <- rbind(matrix(rnorm(2500, 3), ncol = 5), matrix(rnorm(47500), ncol = 5))
pvals2 <- sapply(1:10000, function(i) t.test(genN2[i, ], genC2[i, ])$p.value)
pvalsBH2 <- p.adjust(pvals2, method = "BH")
sigrows2 <- pvalsBH2 < 0.05
sum(sigrows2)
sum(sigrows)
sum(sigrows == TRUE & sigrows2 == TRUE)
badrows <- pvals < 0.05
badrows2 <- pvals2 < 0.05
effsize2 <- rowMeans(genC[badrows, ]) - rowMeans(genN[badrows, ])
hist(effsize, 30)
hist(effsize2, 30)
sum(sigrows[1:500])
|
77cb765a4f6846fb1108a30b5e1e64557992b0eb
|
143cb871f22f85400e585ee01e6b5376ca210e45
|
/cluster_number_estimation.R
|
e8693ed82016a4426cf5dbf0f3f57bb8447b644d
|
[] |
no_license
|
NeyoShinado/DMMG
|
7ae2fed39847dc05f171fec436dfba1474c6a7b8
|
210a2c4cbb6e2d889b640d112026ba4a8bbd1a7a
|
refs/heads/master
| 2022-11-19T21:01:48.666922
| 2020-07-06T15:45:56
| 2020-07-06T15:45:56
| 277,583,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
cluster_number_estimation.R
|
cluster_number_estimation <- function(X){
return(K)
}
|
8d812bf60b3815ddb2ecda41abeca1d086c7c2ed
|
0f8d0117207253ccb94ed0362e8b13c526d3df4b
|
/man/rename_pacfin.Rd
|
9e3bf006224379f0a37e5807ff17194333a61abb
|
[] |
no_license
|
brianlangseth-NOAA/dataModerate_2021
|
4718f2183130c2015e7f08b6164432dbf7a1d294
|
281368f0b500e4b0d2fe482f5838bb7c87ce51dc
|
refs/heads/master
| 2023-04-26T20:09:42.255323
| 2020-09-22T17:59:02
| 2020-09-22T17:59:02
| 285,872,139
| 0
| 0
| null | 2020-10-06T20:40:05
| 2020-08-07T16:17:09
|
R
|
UTF-8
|
R
| false
| true
| 640
|
rd
|
rename_pacfin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rename_pacfin.R
\name{rename_pacfin}
\alias{rename_pacfin}
\title{Create standarized cleaning and renaming PacFIN data.
This should be folded into the cleanPacFIN function.}
\usage{
rename_pacfin(data, area_grouping = NULL, area_names = NULL)
}
\arguments{
\item{data}{read in pacfin data file}
\item{area_grouping}{list of area names in data source}
\item{area_names}{user area names}
}
\value{
A data frame
}
\description{
Create standarized cleaning and renaming PacFIN data.
This should be folded into the cleanPacFIN function.
}
\author{
Chantel Wetzel
}
|
ecb52c2fad238d2c1ff3b0b42ca460fd3498f36d
|
dc5673ce188e1702e1870bbcdbdec6da34ab55b5
|
/R_repository/acousaR/R/OneChannel.R
|
4b12467e4b58b0ee92610c2c6964166e7f24d9a9
|
[] |
no_license
|
saschafassler/acousa
|
56c04d442e35ae15f19bf0032f89c8ec8e19e5a9
|
a7d30e66c4aff802643a49d72a41558a4a5efa0c
|
refs/heads/master
| 2021-01-10T04:03:18.302152
| 2015-12-10T14:51:57
| 2015-12-10T14:51:57
| 43,646,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,061
|
r
|
OneChannel.R
|
### OneChannel
# load Biology file
AcousticValues <- read.csv(file="D:/Sascha/Projects/WKEVAL/Dutch HERAS data/PGNAPES format/2014TRI2HERAS_AcousticValues.csv",header=TRUE)
AV_channel <- data.frame(matrix(ncol = length(colnames(AcousticValues)), nrow = length(unique(AcousticValues$Log))))
colnames(station_recs) <- paste(colnames(AcousticValues))
temp <- aggregate(. ~ Log, data=AcousticValues, FUN=sum)
temp$chans <- temp$Year / unique(AcousticValues$Year)
temp$Country <- unique(AcousticValues$Country)
temp$Vessel <- unique(AcousticValues$Vessel)
temp$Cruise <- unique(AcousticValues$Cruise)
temp$Year <- unique(AcousticValues$Year)
temp$Month <- temp$Month/temp$chans
temp$Day <- temp$Day/temp$chans
temp$Species <- "HER"
temp$ChUppDepth <- 0
temp$ChLowDepth <- seq(50,500,unique(AcousticValues$ChLowDepth-AcousticValues$ChUppDepth))[temp$chans]
AV_channel <- temp[c(colnames(AcousticValues))]
write.csv(AV_channel, file="D:/Sascha/Projects/WKEVAL/Dutch HERAS data/PGNAPES format/2014TRI2HERAS_AcousticValuesONECHANNEL.csv")
|
fe463fe377e07e1cfc6dbb66b380b57c796e2cd7
|
b5425d6e042fb343ee2f343f75c6aa8e8d2a64de
|
/Control_vs_Sub_Behaviour_script.R
|
6c1e991fadedae166292c31670fa97fa45a0c77a
|
[] |
no_license
|
cls83211/dreaetal2021
|
6359de4c36b2887a9124e7d5ae8edb15478b19d2
|
425041e024fded84d7a2dc2aff6aafc10f219f68
|
refs/heads/main
| 2023-04-17T06:21:45.460375
| 2021-11-30T16:59:11
| 2021-11-30T16:59:11
| 327,763,712
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,147
|
r
|
Control_vs_Sub_Behaviour_script.R
|
# Charli Davies
# Meerkat Preg behaviour analysis for C vs S - over 3 pregancy stages (MP, LP, PP) - all locations, burrow then forage
# last modified 29/05/2019
# clear R of all objects
rm(list=ls())
# Load libraries####
library("car")
library("bbmle")
library("glmmADMB")
library("lsmeans")
library("multcomp")
library("predictmeans")
library("ggplot2")
library("broom")
library("tidyverse")
# First set directory as downloaded zipped folder ####
# Load data####
library(readr)
Allbehaviour <- read_csv("Behaviour_dataset.csv",
col_types = cols(AM.PM = col_factor(levels = c("AM",
"PM")), Age = col_number(), Fcomp.I = col_number(),
Fcomp.R = col_number(), Focal.Length.Hour = col_number(),
Group.Size = col_number(), HIA.I.All = col_number(),
HIA.R.All = col_number(), Location = col_factor(levels = c("Forage",
"Burrow")), Olfactory = col_number(),
Preg.stage = col_factor(levels = c("MP",
"LP", "PP")), Prosocial.I = col_number(),
Prosocial.R = col_number(), Rank = col_factor(levels = c("D",
"S")), Sub.I.All = col_number(),
Sub.R.All = col_number(), Total.Monthly.Rainfall = col_number(),
Treatment.4way = col_factor(levels = c("Control",
"Flutamide", "Subordinate", "SubordinateFlut"))),
na = "NA")
View(Allbehaviour)
str(Allbehaviour)
head(Allbehaviour)
print.data.frame(Allbehaviour)
################################################# All locations ####
# Create a datset for Control Dominant versus subordinate dams- all locations ####
CS_All <- Allbehaviour %>% filter(DF.Treatment == "Control")%>% droplevels()
View(CS_All)
CS_All$Preg.stage = factor(CS_All$Preg.stage, levels(CS_All$Preg.stage)[c(1,2,3)])
CS_All$KPM.ID=as.factor(CS_All$KMP.ID)
CS_All$Litter.Code=as.factor(CS_All$Litter.Code)
CS_All$Treatment=as.factor(CS_All$Treatment.4way)
CS_All$Location=as.factor(CS_All$Location)
CS_All$AM.PM=as.factor(CS_All$AM.PM)
CS_All$Rank=as.factor(CS_All$Rank)
# check for overall colinearity
library("psych")
library(car)
scatterplotMatrix(~Age+Total.Monthly.Rainfall+Rank+Preg.stage+AM.PM+Location+Group.Size, data=CS_All)
#age and rank might be correlated
age_rank<-lm(Age~Rank, data=CS_All)
Anova(age_rank)
# age and rank are correlated
# Start with HIA.I all locations ####
#check data is zero-inflated
z=summary(CS_All$Rank[CS_All$HIA.I.All==0])
nz=summary(CS_All$Rank[CS_All$HIA.I.All>0])
nz/z
dotchart(CS_All$HIA.I.All/CS_All$Focal.Length.Hour)
boxplot(CS_All$HIA.I.All/CS_All$Focal.Length.Hour~CS_All$Rank)
boxplot(CS_All$HIA.I.All/CS_All$Focal.Length.Hour~CS_All$Preg.stage)
# start models - first check which distribution is best
I_HIA_p1<-glmmadmb(HIA.I.All~Rank*Preg.stage+Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson" , data = CS_All)
I_HIA_n1<-glmmadmb(HIA.I.All~Rank+Preg.stage+Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data=CS_All )
I_HIA_b1<-glmmadmb(HIA.I.All~Rank+Preg.stage+Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data=CS_All )
AICtab(I_HIA_p1,I_HIA_n1, I_HIA_b1)
Anova(I_HIA_b1)
# I_HIA_b1 has lowest AIC
I_HIA_b2<-glmmadmb(HIA.I.All~Rank+Preg.stage+Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1",data=CS_All )
Anova(I_HIA_b2)
I_HIA_b3<-glmmadmb(HIA.I.All~Rank+Preg.stage+Location+AM.PM+Group.Size+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1",data=CS_All )
Anova(I_HIA_b3)
I_HIA_b4<-glmmadmb(HIA.I.All~Rank+Location+AM.PM+Group.Size+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1",data=CS_All )
Anova(I_HIA_b4)
AICtab(I_HIA_b1,I_HIA_b2,I_HIA_b3,I_HIA_b4,I_HIA_b5,I_HIA_p1)
# Best model is b4 = CS.HIA.I.All ####
# confirm model by adding dropped terms back in
CS.HIA.I.All<-glmmadmb(HIA.I.All~Rank+Location+AM.PM+Group.Size+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data=CS_All)
Anova(CS.HIA.I.All)
summary(CS.HIA.I.All)
coef(summary(CS.HIA.I.All))
write.csv((coef(summary(CS.HIA.I.All))), "CS.HIA.I.All.csv")
plot(cld(summary(glht(CS.HIA.I.All, linfct=mcp(Rank="Tukey")))))
plot(cld(summary(glht(CS.HIA.I.All, linfct=mcp(AM.PM="Tukey")))))
plot(cld(summary(glht(CS.HIA.I.All, linfct=mcp(Location="Tukey")))))
# Start with HIA.R all locations ####
#check data is zero-inflated
z=summary(CS_All$Rank[CS_All$HIA.R.All==0])
nz=summary(CS_All$Rank[CS_All$HIA.R.All>0])
nz/z
dotchart(CS_All$HIA.R.All/CS_All$Focal.Length.Hour)
boxplot(CS_All$HIA.R.All/CS_All$Focal.Length.Hour~CS_All$Rank)
boxplot(CS_All$HIA.R.All/CS_All$Focal.Length.Hour~CS_All$Preg.stage)
# start models - first check which distribution is best
p1<-glmmadmb(HIA.R.All~Rank*Preg.stage*Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data=CS_All )
n1<-glmmadmb(HIA.R.All~Rank*Preg.stage*Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data=CS_All )
b1<-glmmadmb(HIA.R.All~Rank*Preg.stage*Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data=CS_All )
AICtab(p1,n1,b1)
Anova(p1)
summary(p1)
# p1 looks best
p1<-glmmadmb(HIA.R.All~Rank*Preg.stage+Rank*Location+Preg.stage*Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson" , data=CS_All)
Anova(p1)
p2<-glmmadmb(HIA.R.All~Rank*Preg.stage+Rank*Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data=CS_All )
Anova(p2)
p3<-glmmadmb(HIA.R.All~Rank*Preg.stage+Rank*Location+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data=CS_All )
Anova(p3)
AICtab(p1,p2,p3)
# Best model is p3= CS.HIA.R.All ####
# confirm model by adding dropped terms back in
CS.HIA.R.All<-glmmadmb(HIA.R.All~Rank*Preg.stage+Rank*Location+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data=CS_All )
Anova(CS.HIA.R.All)
coef(summary(CS.HIA.I.All))
write.csv((coef(summary(CS.HIA.R.All))), "CS.HIA.R.All.csv")
summary(CS.HIA.R.All)
ph <- lsmeans(CS.HIA.R.All, ~ Preg.stage|Rank)
summary(as.glht(pairs(ph), by = NULL))
ph <- lsmeans(CS.HIA.R.All, ~ Rank| Location)
summary(as.glht(pairs(ph), by = NULL))
plot(ph)
# looks like there is no difference between ranks during foraging but is at the burrow
# also looks like subordinate behaviour doesn't change across preg stage but control does
# Start with Olfactory.All locations ####
#check data is zero-inflated
z=summary(CS_All$Rank[CS_All$Olfactory.All==0])
nz=summary(CS_All$Rank[CS_All$Olfactory.All>0])
nz/z
dotchart(CS_All$Olfactory.All/CS_All$Focal.Length.Hour)
boxplot(CS_All$Olfactory.All/CS_All$Focal.Length.Hour~CS_All$Rank)
boxplot(CS_All$Olfactory.All/CS_All$Focal.Length.Hour~CS_All$Preg.stage)
# One big outlier in control
# start models - first check which distribution is best
p1<-glmmadmb(Olfactory.All~Rank*Preg.stage*Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson" , data= CS_All)
n1<-glmmadmb(Olfactory.All~Rank*Preg.stage*Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data= CS_All )
b1<-glmmadmb(Olfactory.All~Rank*Preg.stage*Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data= CS_All )
AICtab(p1,n1,b1)
# try without 3 way interaction
p2<-glmmadmb(Olfactory.All~Rank*Preg.stage+Rank*Location+Location*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson" , data= CS_All)
n2<-glmmadmb(Olfactory.All~Rank*Preg.stage+Rank*Location+Location*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data= CS_All )
b2<-glmmadmb(Olfactory.All~Rank*Preg.stage+Rank*Location+Location*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data= CS_All )
# try without interaction
p2<-glmmadmb(Olfactory.All~Rank+Location+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson" , data= CS_All)
n2<-glmmadmb(Olfactory.All~Rank+Preg.stage+Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data= CS_All )
b2<-glmmadmb(Olfactory.All~Rank+Preg.stage+Location+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data= CS_All )
# only b2 works....
b3<-glmmadmb(Olfactory.All~Rank+Preg.stage+AM.PM+Location+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data= CS_All )
Anova(b3)
b4<-glmmadmb(Olfactory.All~Rank+Preg.stage+AM.PM+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data= CS_All )
Anova(b4)
b5<-glmmadmb(Olfactory.All~Rank+Preg.stage+AM.PM+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data= CS_All )
Anova(b5)
b6<-glmmadmb(Olfactory.All~Rank+Preg.stage+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data= CS_All )
Anova(b6)
b7<-glmmadmb(Olfactory.All~Rank+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data= CS_All )
Anova(b7)
AICtab(b1,b2,b3,b4,b5,b6,b7)
# Best model is b7 - CS.Olfactory.All ####
# confirm model by adding dropped terms back in
CS.Olfactory.All<-glmmadmb(Olfactory.All~Rank+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data= CS_All )
Anova(CS.Olfactory.All)
summary(CS.Olfactory.All)
coef(summary(CS.Olfactory.All))
plot(cld(summary(glht(CS.Olfactory.All, linfct=mcp(Rank="Tukey")))))
################################################## Burrow focals only ####
# seperate out focals taken at the burrow only
# Create a datset for C vs S - burrow only ####
CS_Burrow <- CS_All %>% filter(Location != "Forage")
View(CS_Burrow)
str(CS_Burrow)
# check for overall colinearity
scatterplotMatrix(~Age+Total.Monthly.Rainfall+Rank+Preg.stage+AM.PM+Group.Size, data=CS_Burrow)
#age and rank might be correlated
age_rank<-lm(Age~Rank, data=CS_Burrow)
Anova(age_rank)
# age and rank are correlated
# Start with Prosocial.I.All - from the burrow only ####
#check data is zero-inflated
z=summary(CS_Burrow$Preg.stage[CS_Burrow$Prosocial.I.All==0])
nz=summary(CS_Burrow$Preg.stage[CS_Burrow$Prosocial.I.All>0])
nz/z
dotchart(CS_Burrow$Prosocial.I.All/CS_Burrow$Focal.Length.Hour)
boxplot(CS_Burrow$Prosocial.I.All/CS_Burrow$Focal.Length.Hour~CS_Burrow$Rank)
boxplot(CS_Burrow$Prosocial.I.All/CS_Burrow$Focal.Length.Hour~CS_Burrow$Preg.stage)
# start models - first check which distribution is best
p1<-glmmadmb(Prosocial.I.All~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data=CS_Burrow )
n1<-glmmadmb(Prosocial.I.All~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data=CS_Burrow )
b1<-glmmadmb(Prosocial.I.All~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data=CS_Burrow )
AICtab(p1,n1,b1)
Anova(n1)
# n1 is the best
n2<-glmmadmb(Prosocial.I.All~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data=CS_Burrow )
Anova(n2)
n3<-glmmadmb(Prosocial.I.All~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data=CS_Burrow )
Anova(n3)
n4<-glmmadmb(Prosocial.I.All~Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data=CS_Burrow )
Anova(n4)
n5<-glmmadmb(Prosocial.I.All~Preg.stage+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data=CS_Burrow )
Anova(n5)
n6<-glmmadmb(Prosocial.I.All~Preg.stage+Group.Size+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data=CS_Burrow )
Anova(n6)
n7<-glmmadmb(Prosocial.I.All~Group.Size+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data=CS_Burrow )
Anova(n7)
AICtab(n1,n2,n3,n4,n5,n6,n7)
# Best model is n7 = CS.Prosocial.I.Burrow ####
# confirm model by adding dropped terms back in
CS.Prosocial.I.Burrow<-glmmadmb(Prosocial.I.All~Group.Size+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data=CS_Burrow )
Anova(CS.Prosocial.I.Burrow)
summary(CS.Prosocial.I.Burrow)
write.csv((coef(summary(CS.Prosocial.I.Burrow))), "CS.Prosocial.I.Burrow.csv")
# Start with Prosocial.R.All - from the burrow only ####
z=summary(CS_Burrow$Preg.stage[CS_Burrow$Prosocial.R.All==0])
nz=summary(CS_Burrow$Preg.stage[CS_Burrow$Prosocial.R.All>0])
nz/z
dotchart(CS_Burrow$Prosocial.R.All/CS_Burrow$Focal.Length.Hour)
boxplot(CS_Burrow$Prosocial.R.All/CS_Burrow$Focal.Length.Hour~CS_Burrow$Treatment)
boxplot(CS_Burrow$Prosocial.R.All/CS_Burrow$Focal.Length.Hour~CS_Burrow$Rank)
# one big outlier in control
# start models - first check which distribution is best
p1<-glmmadmb(Prosocial.R.All~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Burrow)
n1<-glmmadmb(Prosocial.R.All~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = CS_Burrow)
b1<-glmmadmb(Prosocial.R.All~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data = CS_Burrow )
AICtab(p1,n1,b1)
Anova(p1)
# p1 only model which will run
#try without interaction - as does not seem to be significant
p2<-glmmadmb(Prosocial.R.All~Rank+Preg.stage+AM.PMf+Group.Size+Total.Monthly.Rainfall+(1|FKPM.ID/FLitter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson" , data = CS_Burrow )
n2<-glmmadmb(Prosocial.R.All~Rank+Preg.stage+AM.PMf+Group.Size+Total.Monthly.Rainfall+(1|FKPM.ID/FLitter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Burrow )
b2<-glmmadmb(Prosocial.R.All~Rank+Preg.stage+AM.PMf+Group.Size+Total.Monthly.Rainfall+(1|FKPM.ID/FLitter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1" , data = CS_Burrow )
AICtab(p2,n2,b2, p1)
# looks like n2 is best - b still doesnt run
Anova(n2)
n3<-glmmadmb(Prosocial.R.All~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Burrow )
Anova(n3)
n4<-glmmadmb(Prosocial.R.All~Rank+Preg.stage+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Burrow )
Anova(n4)
n5<-glmmadmb(Prosocial.R.All~Rank+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Burrow )
Anova(n5)
n6<-glmmadmb(Prosocial.R.All~Rank+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Burrow )
Anova(n6)
n7<-glmmadmb(Prosocial.R.All~Rank+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = CS_Burrow )
Anova(n7)
AICtab(n2,n3,n4,n5,n6,n7)
# Best model is n6 = CS.Prosocial.R.Burrow ####
# confirm model by adding dropped terms back in
CS.Prosocial.R.Burrow<-glmmadmb(Prosocial.R.All~Rank+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Burrow )
Anova(CS.Prosocial.R.Burrow)
summary(CS.Prosocial.R.Burrow)
write.csv((coef(summary(CS.Prosocial.R.Burrow))), "CS.Prosocial.R.Burrow.csv")
################################################## Forage focals only ####
# Create a datset for C vs S - forage only ####
CS_Forage <- CS_All %>% filter(Location != "Burrow")
View(CS_Forage)
str(CS_Forage)
# check for overall colinearity
scatterplotMatrix(~Age+Total.Monthly.Rainfall+Rank+Preg.stage+AM.PM+Group.Size, data=CS_Forage)
#age and rank might be correlated
age_rank<-lm(Age~Rank, data=CS_Forage)
Anova(age_rank)
# age and rank are correlated
# Start with Fcomp.R.All - from foraging only ####
#check data is zero-inflated
z=summary(CS_Forage$Preg.stage[CS_Forage$Fcomp.R==0])
nz=summary(CS_Forage$Preg.stage[CS_Forage$Fcomp.R>0])
nz/z
dotchart(CS_Forage$Fcomp.R/CS_Forage$Focal.Length.Hour)
boxplot(CS_Forage$Fcomp.R/CS_Forage$Focal.Length.Hour~CS_Forage$Treatment)
boxplot(CS_Forage$Fcomp.R/CS_Forage$Focal.Length.Hour~CS_Forage$Preg.stage)
# start models - first check which distribution is best
p1<-glmmadmb(Fcomp.R~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage)
n1<-glmmadmb(Fcomp.R~rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Forage )
b1<-glmmadmb(Fcomp.R~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data = CS_Forage )
Anova(p1)
# only p1 works - try without interaction
p2<-glmmadmb(Fcomp.R~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage )
n1<-glmmadmb(Fcomp.R~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Forage )
b1<-glmmadmb(Fcomp.R~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data = CS_Forage )
AICtab(p1,n1,b1,p2)
# looks like p2 is best of ones that run
AICtab(p1,p2)
Anova(p2)
summary(p2)
p3<-glmmadmb(Fcomp.R~Rank+Preg.stage+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson" , data = CS_Forage)
Anova(p3)
# doesnt work well try removing next factor as well
p4<-glmmadmb(Fcomp.R~Rank+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage )
Anova(p4)
p5<-glmmadmb(Fcomp.R~Rank+Group.Size+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage )
Anova(p5)
p6<-glmmadmb(Fcomp.R~Group.Size+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage )
# does not work with just group size - looking at summary looks like need to remove KMPID as random as random factor explains very little variance
p8<-glmmadmb(Fcomp.R~Group.Size+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson" , data = CS_Forage)
Anova(p8)
AICtab(p1,p2,p3,p4,p5,p6,p7,p8)
# Best model is p8 = CS.Fcomp.R.Forage ####
# confirm model by adding dropped terms back in
CS.Fcomp.R.Forage<-glmmadmb(Fcomp.R~Group.Size+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage )
Anova(CS.Fcomp.R.Forage)
summary(CS.Fcomp.R.Forage)
write.csv((coef(summary(CS.Fcomp.R.Forage))), "CS.Fcomp.R.Forage.csv")
# Start with Fcomp.I.All - from foraging only ####
#check data is zero-inflated
z=summary(CS_Forage$Rank[CS_Forage$Fcomp.I==0])
nz=summary(CS_Forage$Rank[CS_Forage$Fcomp.I>0])
nz/z
dotchart(CS_Forage$Fcomp.I/CS_Forage$Focal.Length.Hour)
boxplot(CS_Forage$Fcomp.I/CS_Forage$Focal.Length.Hour~CS_Forage$Rank)
boxplot(CS_Forage$Fcomp.I/CS_Forage$Focal.Length.Hour~CS_Forage$Preg.stage)
# start models - first check which distribution is best
p1<-glmmadmb(Fcomp.I~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage)
n1<-glmmadmb(Fcomp.I~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Forage )
b1<-glmmadmb(Fcomp.I~Rank*Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1" , data = CS_Forage)
AICtab(p1,n1,b1)
# none work- lets try without interaction
p1<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage )
n1<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = CS_Forage)
b1<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1" , data = CS_Forage)
AICtab(p1,n1,b1)
# still none work - try removing KMPID as a random factor
p1<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage )
n1<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = CS_Forage)
b1<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data = CS_Forage )
AICtab(n1,p1)
# hmm so NOTHING works - TRY WITHOUT KMP ID
p1<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall++offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = CS_Forage )
n1<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = CS_Forage )
b1<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+Total.Monthly.Rainfall++offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1" , data = CS_Forage)
AICtab(n1,p1)
#n1 looks best
Anova(n1)
n2<-glmmadmb(Fcomp.I~Rank+Preg.stage+AM.PM+Group.Size+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = CS_Forage)
Anova(n2)
n3b<-glmmadmb(Fcomp.I~Rank+Preg.stage+Group.Size+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = CS_Forage)
n3<-glmmadmb(Fcomp.I~Rank+Preg.stage+Group.Size+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = CS_Forage)
Anova(n3)
n4<-glmmadmb(Fcomp.I~Rank+Group.Size+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = CS_Forage)
Anova(n4)
AICtab(n1,p1, n2, n3, n4)
# have been able to add litter ID back in as a random factor
# Best model is n4 = CS.Fcomp.I.Forage ####
# confirm model by adding dropped terms back in
CS.Fcomp.I.Forage<-glmmadmb(Fcomp.I~Rank+Group.Size+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = CS_Forage)
Anova(CS.Fcomp.I.Forage)
summary(CS.Fcomp.I.Forage)
write.csv((coef(summary(CS.Fcomp.I.Forage))), "CS.Fcomp.I.Forage.csv")
################################################## Only dominant females ####
# Create a dataset only including dominant dams while foraging ####
C_Forage <- CS_Forage %>% filter(Rank != "S")
View(C_Forage)
str(C_Forage)
# check for overall colinearity
scatterplotMatrix(~Age+Total.Monthly.Rainfall+Preg.stage+AM.PM+Group.Size, data=C_Forage)
# Start with Fcomp.I - dominant dam only ####
#check data is zero-inflated
z=summary(C_Forage$Preg.stage[C_Forage$Fcomp.I==0])
nz=summary(C_Forage$Preg.stage[C_Forage$Fcomp.I>0])
nz/z
dotchart(C_Forage$Fcomp.I/C_Forage$Focal.length.Hour)
boxplot(C_Forage$Fcomp.I/C_Forage$Focal.Length.Hour~C_Forage$Preg.stage)
# quite alot of zeros
# start models - first check which distribution is best
p1<-glmmadmb(Fcomp.I~Preg.stage+AM.PM+Group.Size+Age+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson" , data = C_Forage)
n1<-glmmadmb(Fcomp.I~Preg.stage+AM.PM+Group.Size+Age+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = C_Forage )
b1<-glmmadmb(Fcomp.I~Preg.stage+AM.PM+Group.Size+Age+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1" , data = C_Forage)
AICtab(p1,n1,b1)
# none work - try removing KMPID
p1<-glmmadmb(Fcomp.I~Preg.stage+AM.PM+Group.Size+Age+Total.Monthly.Rainfall+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = C_Forage )
n1<-glmmadmb(Fcomp.I~Preg.stage+AM.PM+Group.Size+Age+Total.Monthly.Rainfall+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = C_Forage)
b1<-glmmadmb(Fcomp.I~Preg.stage+AM.PM+Group.Size+Age+Total.Monthly.Rainfall+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1" , data = C_Forage)
AICtab(p1,n1,b1)
Anova(n1)
# n1 looks best
n2<-glmmadmb(Fcomp.I~Preg.stage+AM.PM+Age+Total.Monthly.Rainfall+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = C_Forage )
Anova(n2)
n3<-glmmadmb(Fcomp.I~Preg.stage+Age+Total.Monthly.Rainfall+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = C_Forage )
Anova(n3)
n4<-glmmadmb(Fcomp.I~Preg.stage+Age+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = C_Forage)
# model will not work again..... maybe try with KMP.ID instead of Litter code
n5<-glmmadmb(Fcomp.I~Preg.stage+Age+(1|KPM.ID)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = C_Forage)
# model now works including both random factors....
Anova(n5)
n6<-glmmadmb(Fcomp.I~Preg.stage+Age+(1|FKPM.ID/FLitter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" )
AICtab(n1,n2,n3,n4, n5, n6)
Anova(n6)
# Best model is n6 = C.Fcomp.I.Forage ####
# confirm model by adding dropped terms back in
C.Fcomp.I.Forage<-glmmadmb(Fcomp.I~Preg.stage+Age+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom" , data = C_Forage)
Anova(C.Fcomp.I.Forage)
summary(C.Fcomp.I.Forage)
ph <- lsmeans(C.Fcomp.I.Forage, ~ Preg.stage)
summary(as.glht(pairs(ph), by = NULL))
plot(cld(summary(glht(C.Fcomp.I.Forage, linfct=mcp(Preg.stage="Tukey")))))
summary(glht(C.Fcomp.I.Forage, linfct=mcp(Preg.stage="Tukey")))
write.csv((coef(summary(C.Fcomp.I.Forage))), "C.Fcomp.I.Forage.csv")
# Create a dataset only including dominant dams at the burrow ####
C_Burrow <- CS_Burrow %>% filter(Rank != "S")
View(C_Burrow)
str(C_Burrow)
# check for overall colinearity
scatterplotMatrix(~Age+Total.Monthly.Rainfall+Preg.stage+AM.PM+Group.Size, data=C_Burrow)
# Start with Prosocial.R.All ####
#check data is zero-inflated
z=summary(C_Burrow$Preg.stage[C_Burrow$Prosocial.I.All==0])
nz=summary(C_Burrow$Preg.stage[C_Burrow$Prosocial.I.All>0])
nz/z
dotchart(C_Burrow$Prosocial.I.All/C_Burrow$Focal.Length.Hour)
boxplot(C_Burrow$Prosocial.I.All/C_Burrow$Focal.Length.Hour~C_Burrow$Preg.stage)
# start models - first check which distribution is best
p1<-glmmadmb(Prosocial.R.All~Preg.stage+AM.PM+Group.Size+Age+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="poisson", data = C_Burrow )
n1<-glmmadmb(Prosocial.R.All~Preg.stage+AM.PM+Group.Size+Age+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = C_Burrow )
b1<-glmmadmb(Prosocial.R.All~Preg.stage+AM.PM+Group.Size+Age+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom1", data = C_Burrow )
AICtab(p1,n1,b1)
Anova(n1)
# n1 is best
n2<-glmmadmb(Prosocial.R.All~Preg.stage+Group.Size+Age+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = C_Burrow )
Anova(n2)
n3<-glmmadmb(Prosocial.R.All~Preg.stage+Group.Size+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = C_Burrow )
Anova(n3)
n4<-glmmadmb(Prosocial.R.All~Preg.stage+Total.Monthly.Rainfall+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = C_Burrow )
Anova(n4)
n5<-glmmadmb(Prosocial.R.All~Preg.stage+(1|KPM.ID/Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = C_Burrow )
AICtab(n1,n2,n3,n4,n5,p1)
# Best model is n5 - C.Prosocial.R.burrow ####
# confirm model by adding dropped terms back in
C.Prosocial.R.burrow<-glmmadmb(Prosocial.R.All~Preg.stage+(1|Litter.Code)+offset(log(Focal.Length.Hour)),zeroInflation=TRUE, family="nbinom", data = C_Burrow )
Anova(C.Prosocial.R.burrow)
summary(C.Prosocial.R.burrow)
summary(glht(C.Prosocial.R.burrow, linfct=mcp(Preg.stage="Tukey")))
coef(summary(C.Prosocial.R.burrow))
write.csv((coef(summary(C.Prosocial.R.burrow))), "C.Prosocial.R.burrow.csv")
|
5b3534f38354f1203e7629fefec6e70c74eda56a
|
599232817a2b3f186c3a8c1637dbc3f344c2be90
|
/tests/testthat/test-geocode.R
|
cbdab441db5c34cda81cdaa299a21607ab8a8dfd
|
[
"MIT"
] |
permissive
|
cjtexas/citygeocoder
|
01dc42d919228cc291ebbca28274b9785e22136d
|
32b548a84def36f2ad6bad196a786f9e9dcc752b
|
refs/heads/master
| 2021-07-06T23:19:11.132151
| 2021-01-14T17:15:36
| 2021-01-14T17:15:36
| 222,630,201
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 394
|
r
|
test-geocode.R
|
test_that("basic geocode works", {
expect_true(all(!is.na(geocode())))
})
test_that("geocode returns data.frame", {
expect_true("data.frame" %in% class(geocode()))
})
test_that("geocode returns known value", {
expect_equivalent(geocode("Boerne", "TX"), data.frame(lon = -98.7319702, lat = 29.7946641))
})
test_that("geocode handles NA values", {
expect_true(nrow(geocode(NA))==1)
})
|
9273242603a5761a2ae76fda11c2d30e11aa6900
|
b74d21a6bcf7593b838c0a710e4a6a557d4a90f1
|
/man/internal-functions.Rd
|
72265f017f1f835013fd69d408f1f26ede1e99cc
|
[] |
no_license
|
amirunpri2018/spaceNet
|
cbd2c212b7768c5ff0acba0de3f19a510a9ad4f3
|
d6877715f7873c1ebc110535655141630cb05f68
|
refs/heads/master
| 2020-04-18T08:26:54.475740
| 2018-03-26T22:57:20
| 2018-03-26T22:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 435
|
rd
|
internal-functions.Rd
|
\name{internal-functions}
\alias{startZ}
\alias{startLogit}
\alias{startSend}
\alias{startRec}
\alias{startLambda}
\alias{showTrace}
\alias{list2array}
\alias{sigmaFC}
\alias{muFc}
\alias{alphaBetaProp_k}
\alias{zProp_i}
\alias{lambdaProp_l}
\alias{gammaThetaProp_i}
\alias{logPosterior}
\alias{loglik}
\title{
Internal \code{spaceNet} functions
}
\description{
Internal functions not to be called by the user.
}
\keyword{internal}
|
5ba6b92550cea2b0201cc88a59053601bc2b2948
|
2ed70e68a904ddb39e489b79b7569a847919a8ae
|
/explain_change_in_predictions.R
|
fad0f8cd21d0e325c77a045aeb4b3c1c2771be90
|
[] |
no_license
|
abbylute/Rock_glacier_modeling
|
90fea79dfd9de11243ccfe251bbad06bfde4430e
|
316aa762e17fb5d5f47d9b1fc7f837b08bee99ed
|
refs/heads/master
| 2023-06-02T03:49:17.588779
| 2021-06-25T22:07:46
| 2021-06-25T22:07:46
| 308,749,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,507
|
r
|
explain_change_in_predictions.R
|
# Explain changes in probability of rock glacier presence
# as written, this may not be telling me what I think it is since the places with increases
# could still have relatively low probabilities in the future scenario. Maybe look at the three part
# plotting (persist, disappear, enhance) first, and then come back to this.
library(tidyverse)
library(raster)
# load predictions
dir1 <- 'WUS/Data/Maxent_outputs/May-13-2021_wrain_notschange/' #Apr-21-2021_LQH/'
pred_ctrl <- raster(paste0(dir1,'preindustrial_predictions.tif'))
pred_pgw <- raster(paste0(dir1,'pgw_predictions.tif'))
xy <- coordinates(pred_ctrl)
pred_ctrl <- values(pred_ctrl)
pred_pgw <- values(pred_pgw)
pred_df <- data.frame('lon' = xy[,1],
'lat' = xy[,2],
'dpred' = pred_pgw-pred_ctrl)
pred_df <- pred_df %>% filter(!is.na(dpred))
rm(pred_ctrl, pred_pgw, xy); gc();
# create prediction groups
pred_df <- pred_df %>% mutate('grp' = case_when((dpred < -.2) ~ 'decrease',
(dpred > 0.2) ~ 'increase'))
pred_df <- pred_df %>% filter(!is.na(grp))
pred_df$lon <- round(pred_df$lon, 6)
pred_df$lat <- round(pred_df$lat, 6)
# load covariate data
dir <- 'WUS/Data/Maxent_tables/'
cov_remove <- c('Id','hw3','tmin','tmax','maxswe','ppt','tschange')
ctrl <- read_csv(paste0(dir,'background.txt'))
#ctrl <- ctrl[1:100000,]
ctrl$rain <- ctrl$ppt - ctrl$sfe
ctrl <- ctrl %>% dplyr::select(-all_of(cov_remove))
ctrl <- ctrl %>% mutate('period'='ctrl')
ctrl$lon <- round(ctrl$lon, 6)
ctrl$lat <- round(ctrl$lat, 6)
ctrl <- left_join(pred_df, ctrl, by = c('lon','lat'))
ctrl <- ctrl %>% pivot_longer(cols = 'aspect':'rain',
names_to = 'covariate',
values_to = 'value')
pgw <- read_csv(paste0(dir,'background_PGW.txt'))
#pgw <- pgw[1:100000,]
pgw$rain <- pgw$ppt - pgw$sfe
pgw <- pgw %>% dplyr::select(-all_of(cov_remove))
pgw <- pgw %>% mutate('period'='pgw')
pgw$lon <- round(pgw$lon, 6)
pgw$lat <- round(pgw$lat, 6)
pgw <- left_join(pred_df, pgw, by = c('lon','lat'))
pgw <- pgw %>% pivot_longer(cols = 'aspect':'rain',
names_to = 'covariate',
values_to = 'value')
tab <- rbind(ctrl,pgw)
rm(ctrl,pgw);gc();
#tab$lon <- round(tab$lon, 6)
#tab$lat <- round(tab$lat, 6)
#tab <- left_join(tab,pred_df)
#tab <- tab %>%
# filter(!is.na(grp))
tab <- tab %>%
mutate(per_grp = paste0(period,grp),
per_grp = factor(per_grp,
levels =c("ctrldecrease","pgwdecrease","ctrlincrease","pgwincrease")))
# how many data points are there in the increase and decrease groups?
tab %>% filter(covariate=='aspect', period=='ctrl') %>% group_by(grp) %>% count()
# One figure looking at the distribution of time varying variables:
p1 <- tab %>%
filter(!covariate %in% c('aspect','hw5','lith','slope')) %>%
ggplot() +
geom_violin(aes(x = per_grp, y = value, color = period)) +
facet_wrap(~covariate, scales = 'free_y') +
scale_x_discrete(limits=c('ctrldecrease','pgwdecrease','ctrlincrease','pgwincrease'),
labels=c('decrease','','increase',''), name = NULL) +
theme_bw() + theme(panel.grid = element_blank())
jpeg(paste0(dir1,'covariate_distributions_by_prediction_timevarying.jpeg'),units='in',width = 6,height=4,res=300)
p1
dev.off()
# locations that increased in RG probability were more likely to
# - have longer snow duration to begin with
# - have a few more no snow days
# - have much greater SFE
# - have much more freeze-thaw oscillations
# Temperature distributions between the two groups were hard to distinguish, which is interesting!
# One looking at static variables
p2 <- tab %>%
filter(covariate %in% c('aspect','hw5','lith','slope'),
period == 'ctrl') %>%
ggplot() +
geom_violin(aes(x = per_grp, y = value, color = period), show.legend = F) +
facet_wrap(~covariate, scales = 'free_y') +
scale_x_discrete(limits = c('ctrldecrease','ctrlincrease'),
labels = c('decrease','increase'),
name = NULL) +
theme_bw() + theme(panel.grid = element_blank())
jpeg(paste0(dir1,'covariate_distributions_by_prediction_timeconstant.jpeg'),units='in',width = 4,height=4,res=300)
p2
dev.off()
# locations that increased in RG probability were more likely to be on NE aspects with slightly greater slopes,
# whereas those that saw decreased probability were more centered on N aspect with slightly lower slopes.
|
ecc69c2ae415435fd2e3bbdc590035e74f361efe
|
a66516b58d48b83a889d575b702eaafd1c961d9e
|
/R/get_liking_users.R
|
aae6d53465e3d6be272d51f1821b3d7bd3b50640
|
[
"MIT"
] |
permissive
|
TIDealHub/academictwitteR
|
2fff7a77804add1e3b5b430011eac41a94d195a1
|
e48f28e982e78199f1dd12a72028b91249c15437
|
refs/heads/master
| 2023-05-15T01:34:36.283414
| 2021-06-12T05:06:54
| 2021-06-12T05:06:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,442
|
r
|
get_liking_users.R
|
#' Get liking users
#'
#' This function fetches a list of users who liked a tweet or tweets.
#'
#' @param x string containing one tweet id or a vector of tweet ids
#' @param bearer_token string, bearer token
#' @param verbose If `FALSE`, query progress messages are suppressed
#'
#' @return a data frame
#' @export
#'
#' @examples
#' \dontrun{
#' bearer_token <- "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
#' tweet <- "1387744422729748486"
#' get_liking_users(tweet, bearer_token)
#' }
get_liking_users <- function(x, bearer_token = get_bearer(), verbose = TRUE){
bearer <- check_bearer(bearer_token)
url <- "https://api.twitter.com/2/tweets/"
endpoint <- "/liking_users"
params <- list(
"user.fields" = "created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld"
)
new_df <- data.frame()
for(i in 1:length(x)){
cat(paste0("Processing ",x[i],"\n"))
requrl <- paste0(url,x[i],endpoint)
# Sending GET Request
r <- httr::GET(requrl,httr::add_headers(Authorization = bearer),query=params)
# Fix random 503 errors
count <- 0
while(httr::status_code(r)==503 & count<4){
r <- httr::GET(requrl,httr::add_headers(Authorization = bearer),query=params)
count <- count+1
Sys.sleep(count*5)
}
# Catch other errors
if(httr::status_code(r)!=200){
stop(paste("something went wrong. Status code:", httr::status_code(r)))
}
if(httr::headers(r)$`x-rate-limit-remaining`=="1"){
warning(paste("x-rate-limit-remaining=1. Resets at",as.POSIXct(as.numeric(httr::headers(r)$`x-rate-limit-reset`), origin="1970-01-01")))
}
dat <- jsonlite::fromJSON(httr::content(r, "text"))
next_token <- dat$meta$next_token #this is NULL if there are no pages left
new_rows <- dat$data
new_rows$from_id <- x[i]
new_df <- dplyr::bind_rows(new_df, new_rows) # add new rows
cat("Total data points: ",nrow(new_df), "\n")
Sys.sleep(1)
if (is.null(next_token)) {
if(verbose) {
cat("This is the last page for ",
x[i],
": finishing collection. \n")
}
break
}
Sys.sleep(1)
if(httr::status_code(r)==429){
cat("Rate limit reached \n Sleeping...\n")
Sys.sleep(900)
}
}
return(new_df)
}
|
da20d678cde514108cf21b6f029b43824592de13
|
2551bf37a87bacc63f3c1c42a714cca3bb5d0ff7
|
/man/compute_average_scores.Rd
|
096a6a85b41fb0c9eb8f57344b4e34cef4b0c5aa
|
[] |
no_license
|
heike/bulletxtrctr
|
0515a50efd944364a7e3ce783c745225590ef131
|
50842644b32612b0caedcf1c16e60853cc401893
|
refs/heads/master
| 2023-05-25T23:39:24.463532
| 2023-05-17T22:36:57
| 2023-05-17T22:36:57
| 140,328,927
| 0
| 8
| null | 2020-09-24T19:09:28
| 2018-07-09T18:50:29
|
R
|
UTF-8
|
R
| false
| true
| 2,624
|
rd
|
compute_average_scores.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bullet-scores.R
\name{compute_average_scores}
\alias{compute_average_scores}
\title{Get average scores for bullet to bullet comparisons}
\usage{
compute_average_scores(land1, land2, score, addNA = FALSE)
}
\arguments{
\item{land1}{(numeric) vector with land ids of bullet 1}
\item{land2}{(numeric) vector with land ids of bullet 2}
\item{score}{numeric vector of scores to be summarized into a single number}
\item{addNA}{logical value. In case of missing lands, are scores set to 0 (addNA = FALSE) or set to NA (addNA = TRUE)}
}
\value{
numeric vector of average scores. Length is the same as the number of
land engraved areas on the bullets.
}
\description{
Note that the combination of \code{land1} and \code{land2} are a key to the scores,
i.e. if a bullet has six lands, each of the input vectors should have
length 36.
}
\examples{
\dontrun{
# Set the data up to be read in, cleaned, etc.
library(bulletxtrctr)
library(x3ptools)
bullets <- bullet_pipeline(
location = list(
Bullet1 = c(hamby252demo$bullet1),
Bullet2 = c(hamby252demo$bullet2)
),
x3p_clean = function(x) x \%>\%
x3p_scale_unit(scale_by=10^6) \%>\%
rotate_x3p(angle = -90) \%>\%
y_flip_x3p()
) \%>\%
mutate(land = paste0(rep(1:2, each = 6), "-", rep(1:6, times = 2)))
comparisons <- data.frame(
expand.grid(land1 = bullets$land, land2 = bullets$land),
stringsAsFactors = FALSE)
comparisons <- comparisons \%>\%
mutate(
aligned = purrr::map2(.x = land1, .y = land2, .f = function(xx, yy) {
land1 <- bullets$sigs[bullets$land == xx][[1]]
land2 <- bullets$sigs[bullets$land == yy][[1]]
land1$bullet <- "first-land"
land2$bullet <- "second-land"
sig_align(land1$sig, land2$sig)
}),
striae = purrr::map(aligned, sig_cms_max),
features = purrr::map2(.x = aligned, .y = striae, extract_features_all),
rfscore = purrr::map_dbl(features, rowMeans) # This is a hack until the new RF is fit...
)
# Clean up a bit
comparisons <- comparisons \%>\%
mutate(
bulletA = gsub("(\\\\d)-\\\\d", "\\\\1", land1),
landA = gsub("\\\\d-(\\\\d)", "\\\\1", land1),
bulletB = gsub("(\\\\d)-\\\\d", "\\\\1", land2),
landB = gsub("\\\\d-(\\\\d)", "\\\\1", land2)
) \%>\%
group_by(bulletA, bulletB) \%>\% tidyr::nest() \%>\%
mutate(
bullet_score = data \%>\% purrr::map_dbl(
.f = function(d) max(compute_average_scores(land1 = d$landA,
land2 = d$landB,
d$rfscore)))
)
}
}
|
312f68bf69cbea4f7c157d4e6ae08c86dfe74dd6
|
88f2455bd16b29da868cfed650330de6a219f215
|
/ase_BACKUP_274.R
|
b9ab607900674c443e4c156cbe804d29b55e1719
|
[] |
no_license
|
ggruenhagen3/brain_scripts
|
8b90902afe62d3a8c111ffb5f3fd203863330ef6
|
cc9f78e431cd05fdc0d7c43ed4d5ca44e715d5cc
|
refs/heads/master
| 2023-07-08T03:27:24.566197
| 2023-06-19T18:49:09
| 2023-06-19T18:49:09
| 225,917,508
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 95,590
|
r
|
ase_BACKUP_274.R
|
<<<<<<< HEAD
library("RUnit")
library("MBASED")
library("metap")
library("DESeq2")
library("apeglm")
library("EnhancedVolcano")
#=========================================================================================
# New UMD2a Data
#=========================================================================================
# DEG
rna_path = "C:/Users/miles/Downloads/brain/"
SRR904 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440904_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR905 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440905_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR906 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440906_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR907 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440907_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR908 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440908_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR909 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440909_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR904 = SRR904[which(! duplicated(SRR904[,1])),]
SRR905 = SRR905[which(! duplicated(SRR905[,1])),]
SRR906 = SRR906[which(! duplicated(SRR906[,1])),]
SRR907 = SRR907[which(! duplicated(SRR907[,1])),]
SRR908 = SRR908[which(! duplicated(SRR908[,1])),]
SRR909 = SRR909[which(! duplicated(SRR909[,1])),]
genes = SRR904[-c(1:5),1]
mat = as.matrix(cbind(SRR904[-c(1:5),2], SRR905[-c(1:5),2], SRR906[-c(1:5),2], SRR907[-c(1:5),2], SRR908[-c(1:5),2], SRR909[-c(1:5),2]),
dimnames=list(genes, c("4", "5", "6", "7", "8", "9")))
mycolData = data.frame(samples=c("4", "5", "6", "7", "8", "9"),
cond=c("pit", "pit", "castle", "castle", "iso", "iso"),
isBhve=c("bhve", "bhve", "bhve", "bhve", "ctrl", "ctrl"))
dds = DESeqDataSetFromMatrix(countData = mat,
colData = mycolData,
design = ~ cond)
dds <- DESeq(dds)
resultsNames(dds)
res <- results(dds, name="cond_pit_vs_castle")
res <- lfcShrink(dds, coef="cond_pit_vs_castle", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Pit v Castle Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/pit_v_castle_deg.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/pit_v_castle_deg_hgnc.txt", quote=F, col.names = F, row.names = F)
# BHVE v CTRL
dds = DESeqDataSetFromMatrix(countData = mat,
colData = mycolData,
design = ~ isBhve)
dds <- DESeq(dds)
resultsNames(dds)
res <- results(dds, name="isBhve_ctrl_vs_bhve")
res <- lfcShrink(dds, coef="isBhve_ctrl_vs_bhve", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Bhve v Ctrl Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/bhve_v_ctrl_deg.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/bhve_v_ctrl_deg_hgnc.txt", quote=F, col.names = F, row.names = F)
# Sim Pit v Castle
mat_pvc = as.matrix(cbind(SRR904[-c(1:5),2], SRR905[-c(1:5),2], SRR906[-c(1:5),2], SRR907[-c(1:5),2]),
dimnames=list(genes, c("4", "5", "6", "7")))
colData_pvc = data.frame(samples=c("4", "5", "6", "7"),
sim1=c("pit", "castle", "pit", "castle"),
sim2=c("castle", "pit", "castle", "pit"))
dds = DESeqDataSetFromMatrix(countData = mat_pvc,
colData = colData_pvc,
design = ~sim1)
dds <- DESeq(dds)
resultsNames(dds)
res <- results(dds, name="sim1_pit_vs_castle")
res <- lfcShrink(dds, coef="sim1_pit_vs_castle", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Simulated Pit v Castle 1 Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/sim1_pit_v_castle.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/sim1_pit_v_castle_hgnc.txt", quote=F, col.names = F, row.names = F)
dds <- DESeq(dds)
res <- results(dds, name="sim2_pit_vs_castle")
res <- lfcShrink(dds, coef="sim2_pit_vs_castle", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Simulated Pit v Castle 2 Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/sim2_pit_v_castle.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/sim2_pit_v_castle_hgnc.txt", quote=F, col.names = F, row.names = F)
# Pit v Iso
mat_pvi = as.matrix(cbind(SRR904[-c(1:5),2], SRR905[-c(1:5),2], SRR908[-c(1:5),2], SRR909[-c(1:5),2]),
dimnames=list(genes, c("4", "5", "8", "9")))
colData_pvi = data.frame(samples=c("4", "5", "8", "9"),
cond=c("pit", "pit", "iso", "iso"))
dds = DESeqDataSetFromMatrix(countData = mat_pvi,
colData = colData_pvi,
design = ~cond)
dds <- DESeq(dds)
res <- results(dds, name="cond_pit_vs_iso")
res <- lfcShrink(dds, coef="cond_pit_vs_iso", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Pit v Isolated Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/pit_v_iso.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/pit_v_iso_hgnc.txt", quote=F, col.names = F, row.names = F)
# Castle v Iso
mat_cvi = as.matrix(cbind(SRR906[-c(1:5),2], SRR907[-c(1:5),2], SRR908[-c(1:5),2], SRR909[-c(1:5),2]),
dimnames=list(genes, c("6", "7", "8", "9")))
colData_cvi = data.frame(samples=c("6", "7", "8", "9"),
cond=c("castle", "castle", "iso", "iso"))
dds = DESeqDataSetFromMatrix(countData = mat_cvi,
colData = colData_cvi,
design = ~cond)
dds <- DESeq(dds)
res <- results(dds, name="cond_iso_vs_castle")
res <- lfcShrink(dds, coef="cond_iso_vs_castle", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Castle v Isolated Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/castle_v_iso.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/castle_v_iso_hgnc.txt", quote=F, col.names = F, row.names = F)
# Dendrogram
mat = matrix(cbind(SRR904[-c(1:5),2], SRR905[-c(1:5),2], SRR906[-c(1:5),2], SRR907[-c(1:5),2], SRR908[-c(1:5),2], SRR909[-c(1:5),2]), ncol = 6, dimnames = list(genes, c("pit", "pit", "castle", "castle", "iso", "iso")))
pit_v_castle_genes = read.table("C:/Users/miles/Downloads/brain/results/pit_v_castle.txt", header=F)
pit_v_castle_genes = as.vector(pit_v_castle_genes$V1)
p = degDend(mat, pit_v_castle_genes, "C:/Users/miles/Downloads/brain/results/pit_v_castle_dend.png", include_samples = c("pit", "castle"))
p = degDend(mat, pit_v_castle_genes, "C:/Users/miles/Downloads/brain/results/pit_v_castle_all_dend.png")
pit_v_iso_genes = read.table("C:/Users/miles/Downloads/brain/results/pit_v_iso.txt", header=F)
pit_v_iso_genes = as.vector(pit_v_iso_genes$V1)
p = degDend(mat, pit_v_iso_genes, "C:/Users/miles/Downloads/brain/results/pit_v_iso_dend.png", include_samples = c("pit", "iso"))
castle_v_iso_genes = read.table("C:/Users/miles/Downloads/brain/results/castle_v_iso.txt", header=F)
castle_v_iso_genes = as.vector(castle_v_iso_genes$V1)
p = degDend(mat, castle_v_iso_genes, "C:/Users/miles/Downloads/brain/results/castle_v_iso_dend.png", include_samples = c("castle", "iso"))
# SNP-level data
SRR904 = read.table(paste(rna_path, "/data/ase/SRR5440904_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR905 = read.table(paste(rna_path, "/data/ase/SRR5440905_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR906 = read.table(paste(rna_path, "/data/ase/SRR5440906_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR907 = read.table(paste(rna_path, "/data/ase/SRR5440907_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR908 = read.table(paste(rna_path, "/data/ase/SRR5440908_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR909 = read.table(paste(rna_path, "/data/ase/SRR5440909_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR904$V6 = as.numeric(as.vector(SRR904$V6))
SRR905$V6 = as.numeric(as.vector(SRR905$V6))
SRR906$V6 = as.numeric(as.vector(SRR906$V6))
SRR907$V6 = as.numeric(as.vector(SRR907$V6))
SRR908$V6 = as.numeric(as.vector(SRR908$V6))
SRR909$V6 = as.numeric(as.vector(SRR909$V6))
SRR904$V7 = as.numeric(as.vector(SRR904$V7))
SRR905$V7 = as.numeric(as.vector(SRR905$V7))
SRR906$V7 = as.numeric(as.vector(SRR906$V7))
SRR907$V7 = as.numeric(as.vector(SRR907$V7))
SRR908$V7 = as.numeric(as.vector(SRR908$V7))
SRR909$V7 = as.numeric(as.vector(SRR909$V7))
SRR904$MC_COUNTS = SRR904$V6
SRR905$MC_COUNTS = SRR905$V6
SRR906$MC_COUNTS = SRR906$V6
SRR907$MC_COUNTS = SRR907$V6
SRR908$MC_COUNTS = SRR908$V6
SRR909$MC_COUNTS = SRR909$V6
SRR904$MC_COUNTS[which(SRR904$V14 == "False")] = SRR904$V7[which(SRR904$V14 == "False")]
SRR905$MC_COUNTS[which(SRR905$V14 == "False")] = SRR905$V7[which(SRR905$V14 == "False")]
SRR906$MC_COUNTS[which(SRR906$V14 == "False")] = SRR906$V7[which(SRR906$V14 == "False")]
SRR907$MC_COUNTS[which(SRR907$V14 == "False")] = SRR907$V7[which(SRR907$V14 == "False")]
SRR908$MC_COUNTS[which(SRR908$V14 == "False")] = SRR908$V7[which(SRR908$V14 == "False")]
SRR909$MC_COUNTS[which(SRR909$V14 == "False")] = SRR909$V7[which(SRR909$V14 == "False")]
SRR904$CV_COUNTS = SRR904$V7
SRR905$CV_COUNTS = SRR905$V7
SRR906$CV_COUNTS = SRR906$V7
SRR907$CV_COUNTS = SRR907$V7
SRR908$CV_COUNTS = SRR908$V7
SRR909$CV_COUNTS = SRR909$V7
SRR904$CV_COUNTS[which(SRR904$V14 == "False")] = SRR904$V6[which(SRR904$V14 == "False")]
SRR905$CV_COUNTS[which(SRR905$V14 == "False")] = SRR905$V6[which(SRR905$V14 == "False")]
SRR906$CV_COUNTS[which(SRR906$V14 == "False")] = SRR906$V6[which(SRR906$V14 == "False")]
SRR907$CV_COUNTS[which(SRR907$V14 == "False")] = SRR907$V6[which(SRR907$V14 == "False")]
SRR908$CV_COUNTS[which(SRR908$V14 == "False")] = SRR908$V6[which(SRR908$V14 == "False")]
SRR909$CV_COUNTS[which(SRR909$V14 == "False")] = SRR909$V6[which(SRR909$V14 == "False")]
SRR904$pos = paste0(SRR904$V1, ":", SRR904$V2, "-", SRR904$V2)
SRR905$pos = paste0(SRR905$V1, ":", SRR905$V2, "-", SRR905$V2)
SRR906$pos = paste0(SRR906$V1, ":", SRR906$V2, "-", SRR906$V2)
SRR907$pos = paste0(SRR907$V1, ":", SRR907$V2, "-", SRR907$V2)
SRR908$pos = paste0(SRR908$V1, ":", SRR908$V2, "-", SRR908$V2)
SRR909$pos = paste0(SRR909$V1, ":", SRR909$V2, "-", SRR909$V2)
pit = inner_join(SRR904, SRR905, by = "pos")
pit_mc = pit$MC_COUNTS.x + pit$MC_COUNTS.y
pit_cv = pit$CV_COUNTS.x + pit$CV_COUNTS.y
names(pit_mc) = pit$pos
names(pit_cv) = pit$pos
castle = inner_join(SRR906, SRR907, by = "pos")
castle_mc = castle$MC_COUNTS.x + castle$MC_COUNTS.y
castle_cv = castle$CV_COUNTS.x + castle$CV_COUNTS.y
names(castle_mc) = castle$pos
names(castle_cv) = castle$pos
iso = inner_join(SRR908, SRR909, by = "pos")
iso_mc = iso$MC_COUNTS.x + iso$MC_COUNTS.y
iso_cv = iso$CV_COUNTS.x + iso$CV_COUNTS.y
names(iso_mc) = iso$pos
names(iso_cv) = iso$pos
pit_v_castle_res = my_MBASED(pit_mc, pit_cv, castle_mc, castle_cv, "pit", "castle", pit$pos, n_boot, isSNP=T)
castle_v_pit_res = my_MBASED(castle_mc, castle_cv, pit_mc, pit_cv, "castle", "pit", pit$pos, n_boot, isSNP=T)
pit_v_castle_pos = pit_v_castle_res[[2]]
castle_v_pit_pos = castle_v_pit_res[[2]]
ovlp_pc_v_cp_pos = pit_v_castle_pos[which(pit_v_castle_pos %in% castle_v_pit_pos)]
pit_v_iso_res = my_MBASED(pit_mc, pit_cv, iso_mc, iso_cv, "pit", "iso", pit$pos, n_boot, isSNP=T)
iso_v_pit_res = my_MBASED(iso_mc, iso_cv, pit_mc, pit_cv, "iso", "pit", pit$pos, n_boot, isSNP=T)
pit_v_iso_pos = pit_v_iso_res[[2]]
iso_v_pit_pos = iso_v_pit_res[[2]]
ovlp_pi_v_ip_pos = pit_v_iso_pos[which(pit_v_iso_pos %in% iso_v_pit_pos)]
castle_v_iso_res = my_MBASED(castle_mc, castle_cv, iso_mc, iso_cv, "castle", "iso", castle$pos, n_boot, isSNP=T)
iso_v_castle_res = my_MBASED(iso_mc, iso_cv, castle_mc, castle_cv, "iso", "csatle", castle$pos, n_boot, isSNP=T)
castle_v_iso_pos = castle_v_iso_res[[2]]
iso_v_castle_pos = iso_v_castle_res[[2]]
ovlp_ci_v_ic_pos = castle_v_iso_pos[which(castle_v_iso_pos %in% iso_v_castle_pos)]
gtf = read.table("C:/Users/miles/Downloads/brain/brain_scripts/full_ens_w_ncbi_gene.gtf", sep="\t", header=F, stringsAsFactors = F)
gtf = gtf[which(gtf[,3] == "gene" & gtf[,1] != "NC_027944.1"),]
gtf_gene_name <- c()
for (i in 1:nrow(gtf)) {
start <- gregexpr(pattern ='gene_name', gtf$V9[i])[[1]]
stop <- gregexpr(pattern =';', substr(gtf$V9[i], start, nchar(gtf$V9[i])))[[1]][1]
gene_name <- substr(gtf$V9[i], start+10, start+stop-2)
if (start == -1) {
gene_name <- substr(gtf$V9[i], start+10, start+stop)
}
gtf_gene_name <- c(gtf_gene_name, gene_name)
}
gtf$gene_name <- gtf_gene_name
colnames(gtf) <- c("LG", "source", "type", "start", "stop", "idk", "idk1", "idk2", "info", "gene_name")
gtf = gtf[which(! startsWith(gtf$gene_name, "LOC")),]
pit_v_castle_genes = posToGene(pit_v_castle_pos, gtf)
###################
# Gene Level Data #
###################
rna_path = "C:/Users/miles/Downloads/brain/"
SRR904 = read.table(paste(rna_path, "/data/ase/SRR5440904_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR905 = read.table(paste(rna_path, "/data/ase/SRR5440905_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR906 = read.table(paste(rna_path, "/data/ase/SRR5440906_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR907 = read.table(paste(rna_path, "/data/ase/SRR5440907_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR908 = read.table(paste(rna_path, "/data/ase/SRR5440908_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR909 = read.table(paste(rna_path, "/data/ase/SRR5440909_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR904$GENE = str_replace(SRR904$GENE,"%", " (1 of many)")
SRR905$GENE = str_replace(SRR905$GENE,"%", " (1 of many)")
SRR906$GENE = str_replace(SRR906$GENE,"%", " (1 of many)")
SRR907$GENE = str_replace(SRR907$GENE,"%", " (1 of many)")
SRR908$GENE = str_replace(SRR908$GENE,"%", " (1 of many)")
SRR909$GENE = str_replace(SRR909$GENE,"%", " (1 of many)")
#NCBI
# SRR904 = read.table(paste(rna_path, "/data/ase/SRR5440904_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR905 = read.table(paste(rna_path, "/data/ase/SRR5440905_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR906 = read.table(paste(rna_path, "/data/ase/SRR5440906_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR907 = read.table(paste(rna_path, "/data/ase/SRR5440907_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR908 = read.table(paste(rna_path, "/data/ase/SRR5440908_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR909 = read.table(paste(rna_path, "/data/ase/SRR5440909_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
gene_names = SRR904$GENE
# gene_names = str_replace(gene_names,"%", " (1 of many)")
n_boot = 100
# Prepare the Data
pit_mc = SRR904$MC_COUNTS + SRR905$MC_COUNTS
pit_cv = SRR904$CV_COUNTS + SRR905$CV_COUNTS
names(pit_mc) = gene_names
names(pit_cv) = gene_names
castle_mc = SRR906$MC_COUNTS + SRR907$MC_COUNTS
castle_cv = SRR906$CV_COUNTS + SRR907$CV_COUNTS
names(castle_mc) = gene_names
names(castle_cv) = gene_names
iso_mc = SRR908$MC_COUNTS + SRR909$MC_COUNTS
iso_cv = SRR908$CV_COUNTS + SRR909$CV_COUNTS
names(iso_mc) = gene_names
names(iso_cv) = gene_names
# Check for Skews in ASE ratios
hist(log2(SRR904$MC_COUNTS/SRR904$CV_COUNTS), breaks=50)
hist(log2(SRR905$MC_COUNTS/SRR905$CV_COUNTS), breaks=50)
hist(log2(SRR906$MC_COUNTS/SRR906$CV_COUNTS), breaks=50)
hist(log2(SRR907$MC_COUNTS/SRR907$CV_COUNTS), breaks=50)
hist(log2(SRR908$MC_COUNTS/SRR908$CV_COUNTS), breaks=50)
hist(log2(SRR909$MC_COUNTS/SRR909$CV_COUNTS), breaks=50)
pos = which(SRR904$MC_COUNTS > 0 & SRR904$CV_COUNTS > 0)
d = density(log2(SRR904$MC_COUNTS[pos]/SRR904$CV_COUNTS[pos]))
plot(d, main="Pit 1")
pos = which(SRR905$MC_COUNTS > 0 & SRR905$CV_COUNTS > 0)
d = density(log2(SRR905$MC_COUNTS[pos]/SRR905$CV_COUNTS[pos]))
plot(d, main="Pit 2")
pos = which(SRR906$MC_COUNTS > 0 & SRR906$CV_COUNTS > 0)
d = density(log2(SRR906$MC_COUNTS[pos]/SRR906$CV_COUNTS[pos]))
plot(d, main="Castle 1")
pos = which(SRR907$MC_COUNTS > 0 & SRR907$CV_COUNTS > 0)
d = density(log2(SRR907$MC_COUNTS[pos]/SRR907$CV_COUNTS[pos]))
plot(d, main="Castle 2")
pos = which(SRR908$MC_COUNTS > 0 & SRR908$CV_COUNTS > 0)
d = density(log2(SRR908$MC_COUNTS[pos]/SRR908$CV_COUNTS[pos]))
plot(d, main="Isolated 1")
pos = which(SRR909$MC_COUNTS > 0 & SRR909$CV_COUNTS > 0)
d = density(log2(SRR909$MC_COUNTS[pos]/SRR909$CV_COUNTS[pos]))
plot(d, main="Isolated 2")
# Find Discordant ASE
disc_ase = gene_names[which(sign(SRR904$dif) == sign(SRR905$dif) & sign(SRR904$dif) != 0 &
sign(SRR906$dif) == sign(SRR907$dif) & sign(SRR906$dif) != 0 &
sign(SRR906$dif) != sign(SRR904$dif))]
disc_ase_pc = disc_ase
disc_ase_pc_hgnc = sort(hgncMzebraInPlace(data.frame(disc_ase_pc), 1, rownames(tj)))
write.table(disc_ase_pc, "C:/Users/miles/Downloads/brain/results/ase/disc_ASE_pc.txt", quote = F, col.names = F, row.names = F)
write.table(disc_ase_pc_hgnc, "C:/Users/miles/Downloads/brain/results/ase/disc_ASE_pc_hgnc.txt", quote = F, col.names = F, row.names = F)
# Do 1-sampled ASE experiments
pos_all_ind = which(SRR904$MC_COUNTS + SRR904$CV_COUNTS > 0 &
SRR905$MC_COUNTS + SRR905$CV_COUNTS > 0 &
SRR906$MC_COUNTS + SRR906$CV_COUNTS > 0 &
SRR907$MC_COUNTS + SRR907$CV_COUNTS > 0 &
SRR908$MC_COUNTS + SRR908$CV_COUNTS > 0 &
SRR909$MC_COUNTS + SRR909$CV_COUNTS > 0)
SRR904_1 = my_MBASED_1(SRR904$MC_COUNTS, SRR904$CV_COUNTS, "SRR904 (Pit 1)", "", gene_names, n_boot)
SRR905_1 = my_MBASED_1(SRR905$MC_COUNTS, SRR905$CV_COUNTS, "SRR905 (Pit 2)", "", gene_names, n_boot)
SRR906_1 = my_MBASED_1(SRR906$MC_COUNTS, SRR906$CV_COUNTS, "SRR906 (Castle 1)", "", gene_names, n_boot)
SRR907_1 = my_MBASED_1(SRR907$MC_COUNTS, SRR907$CV_COUNTS, "SRR907 (Castle 2)", "", gene_names, n_boot)
SRR908_1 = my_MBASED_1(SRR908$MC_COUNTS, SRR908$CV_COUNTS, "SRR908 (Isolated 1)", "", gene_names, n_boot)
SRR909_1 = my_MBASED_1(SRR909$MC_COUNTS, SRR909$CV_COUNTS, "SRR909 (Isolated 2)", "", gene_names, n_boot)
SRR904$p = 1; SRR905$p = 1; SRR906$p = 1; SRR907$p = 1; SRR908$p = 1; SRR909$p = 1
SRR904$q = 1; SRR905$q = 1; SRR906$q = 1; SRR907$q = 1; SRR908$q = 1; SRR909$q = 1
SRR904$p[which(SRR904$GENE %in% rownames(assays(SRR904_1[[1]])$pValueASE))] = assays(SRR904_1[[1]])$pValueASE
SRR905$p[which(SRR905$GENE %in% rownames(assays(SRR905_1[[1]])$pValueASE))] = assays(SRR905_1[[1]])$pValueASE
SRR906$p[which(SRR906$GENE %in% rownames(assays(SRR906_1[[1]])$pValueASE))] = assays(SRR906_1[[1]])$pValueASE
SRR907$p[which(SRR907$GENE %in% rownames(assays(SRR907_1[[1]])$pValueASE))] = assays(SRR907_1[[1]])$pValueASE
SRR908$p[which(SRR908$GENE %in% rownames(assays(SRR908_1[[1]])$pValueASE))] = assays(SRR908_1[[1]])$pValueASE
SRR909$p[which(SRR909$GENE %in% rownames(assays(SRR909_1[[1]])$pValueASE))] = assays(SRR909_1[[1]])$pValueASE
SRR904$q[which(SRR904$GENE %in% rownames(assays(SRR904_1[[1]])$pValueASE))] = p.adjust(assays(SRR904_1[[1]])$pValueASE, method="bonferroni")
SRR905$q[which(SRR905$GENE %in% rownames(assays(SRR905_1[[1]])$pValueASE))] = p.adjust(assays(SRR905_1[[1]])$pValueASE, method="bonferroni")
SRR906$q[which(SRR906$GENE %in% rownames(assays(SRR906_1[[1]])$pValueASE))] = p.adjust(assays(SRR906_1[[1]])$pValueASE, method="bonferroni")
SRR907$q[which(SRR907$GENE %in% rownames(assays(SRR907_1[[1]])$pValueASE))] = p.adjust(assays(SRR907_1[[1]])$pValueASE, method="bonferroni")
SRR908$q[which(SRR908$GENE %in% rownames(assays(SRR908_1[[1]])$pValueASE))] = p.adjust(assays(SRR908_1[[1]])$pValueASE, method="bonferroni")
SRR909$q[which(SRR909$GENE %in% rownames(assays(SRR909_1[[1]])$pValueASE))] = p.adjust(assays(SRR909_1[[1]])$pValueASE, method="bonferroni")
SRR904$sig = SRR904$q < 0.05
SRR905$sig = SRR905$q < 0.05
SRR906$sig = SRR906$q < 0.05
SRR907$sig = SRR907$q < 0.05
SRR908$sig = SRR908$q < 0.05
SRR909$sig = SRR909$q < 0.05
SRR904$dif = SRR904$MC_COUNTS - SRR904$CV_COUNTS
SRR905$dif = SRR905$MC_COUNTS - SRR905$CV_COUNTS
SRR906$dif = SRR906$MC_COUNTS - SRR906$CV_COUNTS
SRR907$dif = SRR907$MC_COUNTS - SRR907$CV_COUNTS
SRR908$dif = SRR908$MC_COUNTS - SRR908$CV_COUNTS
SRR909$dif = SRR909$MC_COUNTS - SRR909$CV_COUNTS
SRR904$ase = log2(SRR904$CV_COUNTS / SRR904$MC_COUNTS)
SRR905$ase = log2(SRR905$CV_COUNTS / SRR905$MC_COUNTS)
SRR906$ase = log2(SRR906$CV_COUNTS / SRR906$MC_COUNTS)
SRR907$ase = log2(SRR907$CV_COUNTS / SRR907$MC_COUNTS)
SRR908$ase = log2(SRR908$CV_COUNTS / SRR908$MC_COUNTS)
SRR909$ase = log2(SRR909$CV_COUNTS / SRR909$MC_COUNTS)
df = data.frame(cbind(SRR904$GENE, SRR904$ase, SRR905$ase, SRR906$ase, SRR907$ase, SRR908$ase, SRR909$ase))
df = data.frame(cbind(SRR904, SRR905, SRR906, SRR907, SRR908, SRR909))
df$Digging_1 = as.numeric(as.vector(df$Digging_1))
df$Digging_2 = as.numeric(as.vector(df$Digging_2))
df$Building_1 = as.numeric(as.vector(df$Building_1))
df$Building_2 = as.numeric(as.vector(df$Building_2))
df$Control_1 = as.numeric(as.vector(df$Control_1))
df$Control_2 = as.numeric(as.vector(df$Control_2))
my_ryan = df[which(df[,1] %in% ryan$X),]
my_ryan = my_ryan[match(ryan$X, my_ryan[,1]),]
colnames(my_ryan) = c("GENE", "Digging_1", "Digging_2", "Building_1", "Building_2", "Control_1", "Control_2")
my_ryan$Digging_Mean_ASE = (as.numeric(as.vector(my_ryan$Digging_1)) + as.numeric(as.vector(my_ryan$Digging_2)))/2
my_ryan$Building_Mean_ASE = (as.numeric(as.vector(my_ryan$Building_1)) + as.numeric(as.vector(my_ryan$Building_2)))/2
my_ryan$Control_Mean_ASE = (as.numeric(as.vector(my_ryan$Control_1)) + as.numeric(as.vector(my_ryan$Control_2)))/2
length(which( is.na(my_ryan[,2]) & is.na(my_ryan[,3]) & is.na(my_ryan[,4]) & is.na(my_ryan[,5]) & is.na(my_ryan[,6]) & is.na(my_ryan[,7]) ))
length(which(sign(my_ryan$Digging_Mean_ASE) == sign(ryan$Digging_Mean_ASE) & sign(my_ryan$Building_Mean_ASE) == sign(ryan$Building_Mean_ASE) & sign(my_ryan$Control_Mean_ASE) == sign(ryan$Iso_Mean_ASE) ))
all_sig_same_dir = SRR904$GENE[which(SRR904$sig & SRR905$sig & SRR906$sig & SRR907$sig & SRR908$sig & SRR909$sig &
sign(SRR905$dif) == sign(SRR904$dif) &
sign(SRR906$dif) == sign(SRR904$dif) &
sign(SRR907$dif) == sign(SRR904$dif) &
sign(SRR908$dif) == sign(SRR904$dif) &
sign(SRR909$dif) == sign(SRR904$dif) )]
all_same_dir = SRR904$GENE[which(sign(SRR905$dif) == sign(SRR904$dif) & SRR905$dif != 0 & SRR904$dif != 0 &
sign(SRR906$dif) == sign(SRR904$dif) & SRR906$dif != 0 & SRR904$dif != 0 &
sign(SRR907$dif) == sign(SRR904$dif) & SRR907$dif != 0 & SRR904$dif != 0 &
sign(SRR908$dif) == sign(SRR904$dif) & SRR908$dif != 0 & SRR904$dif != 0 &
sign(SRR909$dif) == sign(SRR904$dif) & SRR909$dif != 0 & SRR904$dif != 0 )]
# Zack's Method: Combine p-values
# all_p = sapply(1:length(gene_names), function(x) sumlog(c(SRR904$p[x], SRR905$p[x], SRR906$p[x], SRR907$p[x], SRR908$p[x], SRR909$p[x]))$p)
all_p = sapply(1:length(gene_names), function(x) sumz(c(SRR904$p[x], SRR905$p[x], SRR906$p[x], SRR907$p[x], SRR908$p[x], SRR909$p[x]))$p)
all_p[which(SRR904$p == 0 &
SRR905$p == 0 &
SRR906$p == 0 &
SRR907$p == 0 &
SRR908$p == 0 &
SRR909$p == 0 )] = 0
all_q = p.adjust(all_p, method = "BH")
agg = gene_names[which(all_q < 0.05 &
sign(SRR905$dif) == sign(SRR904$dif) &
sign(SRR906$dif) == sign(SRR904$dif) &
sign(SRR907$dif) == sign(SRR904$dif) &
sign(SRR908$dif) == sign(SRR904$dif) &
sign(SRR909$dif) == sign(SRR904$dif) )]
write.table(all_sig_same_dir, "C:/Users/miles/Downloads/brain/results/ase_all_sig_same_dir_RG.txt", quote = F, col.names = F, row.names = F)
write.table(all_same_dir, "C:/Users/miles/Downloads/brain/results/ase_all_same_dir_RG.txt", quote = F, col.names = F, row.names = F)
write.table(agg, "C:/Users/miles/Downloads/brain/results/ase_agg_sig_same_dir_RG.txt", quote = F, col.names = F, row.names = F)
all_sig_same_dir_hgnc = hgncMzebraInPlace(data.frame(all_sig_same_dir), 1, gene_names)
all_same_dir_hgnc = hgncMzebraInPlace(data.frame(all_same_dir), 1, gene_names)
agg_hgnc = hgncMzebraInPlace(data.frame(agg), 1, gene_names)
write.table(all_sig_same_dir_hgnc, "C:/Users/miles/Downloads/brain/results/ase_all_sig_same_dir_hgnc_RG.txt", quote = F, col.names = F, row.names = F)
write.table(all_same_dir_hgnc, "C:/Users/miles/Downloads/brain/results/ase_all_same_dir_hgnc_RG.txt", quote = F, col.names = F, row.names = F)
write.table(agg_hgnc, "C:/Users/miles/Downloads/brain/results/ase_agg_sig_same_dir_hgnc_RG.txt", quote = F, col.names = F, row.names = F)
# Do 2-sampled ASE experiments
# Pit v Castle
pit_v_castle_res = my_MBASED(pit_mc, pit_cv, castle_mc, castle_cv, "pit", "castle", gene_names, n_boot)
pit_v_castle_genes = pit_v_castle_res[[2]]
castle_v_pit_res = my_MBASED(castle_mc, castle_cv, pit_mc, pit_cv, "castle", "pit", gene_names, n_boot)
castle_v_pit_genes = castle_v_pit_res[[2]]
ovlp_pc_v_cp = pit_v_castle_genes[which(pit_v_castle_genes %in% castle_v_pit_genes)]
# Pit v Isolated
pit_v_iso_res = my_MBASED(pit_mc, pit_cv, iso_mc, iso_cv, "pit", "iso", gene_names, n_boot)
pit_v_iso_genes = pit_v_iso_res[[2]]
iso_v_pit_res = my_MBASED(iso_mc, iso_cv, pit_mc, pit_cv, "iso", "pit", gene_names, n_boot)
iso_v_pit_genes = iso_v_pit_res[[2]]
ovlp_pi_v_ip = pit_v_iso_genes[which(pit_v_iso_genes %in% iso_v_pit_genes)]
# Castle v Isolated
castle_v_iso_res = my_MBASED(castle_mc, castle_cv, iso_mc, iso_cv, "castle", "iso", gene_names, n_boot)
castle_v_iso_genes = castle_v_iso_res[[2]]
iso_v_castle_res = my_MBASED(iso_mc, iso_cv, castle_mc, castle_cv, "iso", "castle", gene_names, n_boot)
iso_v_castle_genes = iso_v_castle_res[[2]]
ovlp_ci_v_ic = castle_v_iso_genes[which(castle_v_iso_genes %in% iso_v_castle_genes)]
res = data.frame(test=c("pit_v_castle", "castle_v_pit", "pvc_cvp_ovlp", "pit_v_iso", "iso_v_pit", "pvi_ivp_ovlp", "castle_v_iso", "iso_v_castle", "cvi_ivc"),
num_genes=c(length(pit_v_castle_genes), length(castle_v_pit_genes), length(ovlp_pc_v_cp),
length(pit_v_iso_genes), length(iso_v_pit_genes), length(ovlp_pi_v_ip),
length(castle_v_iso_genes), length(iso_v_castle_genes), length(ovlp_ci_v_ic)))
write.table(pit_v_castle_genes, "C:/Users/miles/Downloads/brain/results/ase_pit_v_castle_RG.txt", quote = F, col.names = F, row.names = F)
write.table(castle_v_pit_genes, "C:/Users/miles/Downloads/brain/results/ase_castle_v_pit_RG.txt", quote = F, col.names = F, row.names = F)
write.table(pit_v_iso_genes, "C:/Users/miles/Downloads/brain/results/ase_pit_v_iso_RG.txt", quote = F, col.names = F, row.names = F)
write.table(iso_v_pit_genes, "C:/Users/miles/Downloads/brain/results/ase_iso_v_pit_RG.txt", quote = F, col.names = F, row.names = F)
write.table(castle_v_iso_genes, "C:/Users/miles/Downloads/brain/results/ase_castle_v_iso_RG.txt", quote = F, col.names = F, row.names = F)
write.table(iso_v_castle_genes, "C:/Users/miles/Downloads/brain/results/ase_iso_v_castle_RG.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_pc_v_cp, "C:/Users/miles/Downloads/brain/results/ase_ovlp_pc_v_cp_RG.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_pi_v_ip, "C:/Users/miles/Downloads/brain/results/ase_ovlp_pi_v_ip_RG.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_ci_v_ic, "C:/Users/miles/Downloads/brain/results/ase_ovlp_ci_v_ic_RG.txt", quote = F, col.names = F, row.names = F)
pit_v_castle_genes_hgnc = hgncMzebraInPlace(data.frame(pit_v_castle_genes), 1, gene_names)
castle_v_pit_genes_hgnc = hgncMzebraInPlace(data.frame(castle_v_pit_genes), 1, gene_names)
pit_v_iso_genes_hgnc = hgncMzebraInPlace(data.frame(pit_v_iso_genes), 1, gene_names)
iso_v_pit_genes_hgnc = hgncMzebraInPlace(data.frame(iso_v_pit_genes), 1, gene_names)
castle_v_iso_genes_hgnc = hgncMzebraInPlace(data.frame(pit_v_iso_genes), 1, gene_names)
iso_v_castle_genes_hgnc = hgncMzebraInPlace(data.frame(iso_v_castle_genes), 1, gene_names)
ovlp_pc_v_cp_hgnc = hgncMzebraInPlace(data.frame(ovlp_pc_v_cp), 1, gene_names)
ovlp_pi_v_ip_hgnc = hgncMzebraInPlace(data.frame(ovlp_pi_v_ip), 1, gene_names)
ovlp_ci_v_ic_hgnc = hgncMzebraInPlace(data.frame(ovlp_ci_v_ic), 1, gene_names)
write.table(pit_v_castle_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_pit_v_castle_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(castle_v_pit_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_castle_v_pit_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(pit_v_iso_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_pit_v_iso_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(iso_v_pit_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_iso_v_pit_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(castle_v_iso_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_castle_v_iso_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(iso_v_castle_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_iso_v_castle_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_pc_v_cp_hgnc, "C:/Users/miles/Downloads/brain/results/ase_ovlp_pc_v_cp_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_pi_v_ip_hgnc, "C:/Users/miles/Downloads/brain/results/ase_ovlp_pi_v_ip_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_ci_v_ic_hgnc, "C:/Users/miles/Downloads/brain/results/ase_ovlp_ci_v_ic_hgnc.txt", quote = F, col.names = F, row.names = F)
my_MBASED_1 = function(s1_mc, s1_cv, s1_name, gene_ind, gene_names, n_boot, myIsPhased=T, verbose=T) {
# Purpose: Run a one sampled MBASED Experiment
# s1_mc: sample 1 mc counts
# s1_cv: sample 1 cv counts
# gene_ind: index of genes to run on (aka subset of gene indexes), set to "" to find pos genes in this sample
# gene_names: genes the counts are for (aka all genes)
# n_boot: number of bootstraps in runMBASED
# pos_ind = 1:length(gene_names)
pos_ind = gene_ind
if (pos_ind == "") {
pos_ind = which( s1_mc + s1_cv > 0)
}
pos_gene = gene_names[pos_ind]
this_s1_mc = s1_mc[pos_ind]
this_s1_cv = s1_cv[pos_ind]
if (verbose) {
print(paste("Genes Used", length(pos_gene)))
}
# Create the SummarizedExperiment and run MBASED
my_granges = GRanges(seqnames = rep("chr1:1-2", length(pos_gene)), aseID=pos_gene)
# lociAllele1Counts
s1_exp = SummarizedExperiment(assays=list(
lociAllele1Counts = matrix( c(this_s1_mc), ncol=1, dimnames = list(pos_gene, s1_name)),
lociAllele2Counts = matrix( c(this_s1_cv), ncol=1, dimnames = list(pos_gene, s1_name))
), rowRanges = my_granges)
s1 = runMBASED(ASESummarizedExperiment=s1_exp, isPhased = myIsPhased, numSim = n_boot)
# Analyze MBASED Data
# hist(assays(s1)$majorAlleleFrequencyDifference, main=paste(s1_name, "MAF"), xlab = "Major Allele Frequency")
# hist(assays(s1)$pValueASE, main=paste(s1_name, "p-value"), xlab = "p-value")
qvalue = p.adjust(assays(s1)$pValueASE, method="bonferroni")
s1_genes = pos_gene[which(qvalue < 0.05)]
return(list(s1, s1_genes))
}
my_MBASED = function(s1_mc, s1_cv, s2_mc, s2_cv, s1_name, s2_name, gene_names, n_boot, myIsPhased=T, verbose=T, isSNP=F) {
# Purpose: Run a two sampled MBASED Experiment
# s1_mc: sample 1 mc counts
# s1_cv: sample 1 cv counts
# s2_mc: sample 2 mc counts
# s2_cv: sample 2 cv counts
# s1_name: name of sample 1 (for example "pit")
# s2_name: name of sample 2 (for example "castle")
# gene_names: genes the counts are for
# n_boot: number of bootstraps in runMBASED
# First find non-zero loci bc according to the documentation:
# "All supplied loci must have total read count (across both alleles) greater than 0
# (in each of the two samples, in the case of two-sample analysis)."
if (isSNP) {
this_s1_mc = s1_mc[which(names(s1_mc) %in% names(s2_mc))]
this_s1_cv = s1_cv[which(names(s1_cv) %in% names(s2_cv))]
this_s2_mc = s2_mc[which(names(s2_mc) %in% names(s1_mc))]
this_s2_cv = s2_cv[which(names(s2_cv) %in% names(s1_cv))]
print(paste("SNPs lost from s1:", length(s1_mc) - length(this_s1_mc)))
print(paste("SNPs lost from s2:", length(s2_mc) - length(this_s2_mc)))
pos_gene = names(s1_mc)[which(names(s1_mc) %in% names(s2_mc))]
} else {
pos_ind = which( s1_mc + s1_cv > 0 & s2_mc + s2_cv > 0 )
pos_gene = gene_names[pos_ind]
this_s1_mc = s1_mc[pos_ind]
this_s1_cv = s1_cv[pos_ind]
this_s2_mc = s2_mc[pos_ind]
this_s2_cv = s2_cv[pos_ind]
if (verbose) {
print(paste("Genes Used", length(pos_gene)))
}
}
# Create the SummarizedExperiment and run MBASED
my_granges = GRanges(seqnames = rep("chr1:1-2", length(pos_gene)), aseID=pos_gene)
s1_v_s2_exp = SummarizedExperiment(assays=list(
lociAllele1Counts = matrix( c(this_s1_mc, this_s2_mc), ncol=2, dimnames = list(pos_gene, c(s1_name, s2_name))),
lociAllele2Counts = matrix( c(this_s1_cv, this_s2_cv), ncol=2, dimnames = list(pos_gene, c(s1_name, s2_name)))
), rowRanges = my_granges)
s1_v_s2 = runMBASED(ASESummarizedExperiment=s1_v_s2_exp, isPhased = myIsPhased, numSim = n_boot)
# Analyze MBASED Data
hist(assays(s1_v_s2)$majorAlleleFrequencyDifference, main=paste(s1_name, "v", s2_name, "MAF"), xlab = "Major Allele Frequency")
hist(assays(s1_v_s2)$pValueASE, main=paste(s1_name, "v", s2_name, "p-value"), xlab = "p-value")
qvalue = p.adjust(assays(s1_v_s2)$pValueASE, method="bonferroni")
s1_v_s2_genes = pos_gene[which(qvalue < 0.05)]
return(list(s1_v_s2, s1_v_s2_genes))
}
posToGene = function(all_pos, gtf) {
found_gene = c()
for (pos in all_pos) {
stop_1 = gregexpr(pattern = ':', pos)[[1]]
stop_2 = gregexpr(pattern = '-', pos)[[1]]
lg = substr(pos, 1, stop_1-1)
base = substr(pos, stop_1+1, stop_2-1)
this_found = gtf$gene_name[which(gtf$LG == lg & gtf$start+25000 <= base & gtf$stop+25000 >= base)]
found_gene = c(found_gene, this_found)
}
return(found_gene)
}
shuffleAlleles = function(s1_mc, s1_cv, s2_mc, s2_cv) {
all_mc = data.frame(s1_mc, s2_mc)
ind1 = sample(c(1,2), length(s1_mc), replace = T)
ind2 = ind1
ind2 = factor(ind1, levels = c("1", "2"))
ind2 = plyr::revalue(ind2, replace = c("1" = "2", "2" = "1"))
new_s1_mc = all_mc[as.matrix(data.frame(1:nrow(all_mc), as.numeric(as.vector(ind1))))]
new_s2_mc = all_mc[as.matrix(data.frame(1:nrow(all_mc), as.numeric(as.vector(ind2))))]
all_cv = data.frame(s1_cv, s2_cv)
ind1 = sample(c(1,2), length(s1_cv), replace = T)
ind2 = ind1
ind2 = factor(ind1, levels = c("1", "2"))
ind2 = plyr::revalue(ind2, replace = c("1" = "2", "2" = "1"))
new_s1_cv = all_cv[as.matrix(data.frame(1:nrow(all_cv), as.numeric(as.vector(ind1))))]
new_s2_cv = all_cv[as.matrix(data.frame(1:nrow(all_cv), as.numeric(as.vector(ind2))))]
res = data.frame(new_s1_mc, new_s1_cv, new_s2_mc, new_s2_cv)
return(res)
}
#===============#
# Bootstrapping #
#===============#
real_pc = length(pit_v_castle_genes)
real_cp = length(castle_v_pit_genes)
real_ovlp_pc_v_cp = length(ovlp_pc_v_cp)
boot_res = data.frame()
for (n in 1:n_boot) {
if(n == n_boot) {
cat(paste(n, "\n"))
} else if (n %% (n_boot/10) == 0 || n == 1) {
cat(n)
} else {
cat(".")
}
tryCatch({
# Pit v Castle
shuf_res = shuffleAlleles(pit_mc, pit_cv, castle_mc, castle_cv)
pit_v_castle_res = my_MBASED(shuf_res$new_s1_mc, shuf_res$new_s1_cv, shuf_res$new_s2_mc, shuf_res$new_s2_cv, "pit", "castle", gene_names, n_boot, verbose=F)
pit_v_castle_genes = pit_v_castle_res[[2]]
castle_v_pit_res = my_MBASED(shuf_res$new_s2_mc, shuf_res$new_s2_cv, shuf_res$new_s1_mc, shuf_res$new_s1_cv, "castle", "pit", gene_names, n_boot, verbose=F)
castle_v_pit_genes = castle_v_pit_res[[2]]
ovlp_pc_v_cp = pit_v_castle_genes[which(pit_v_castle_genes %in% castle_v_pit_genes)]
# # Pit v Isolated
# pit_v_iso_res = my_MBASED(pit_mc, pit_cv, iso_mc, iso_cv, "pit", "iso", gene_names, n_boot, verbose=F)
# pit_v_iso_genes = pit_v_iso_res[[2]]
# iso_v_pit_res = my_MBASED(iso_mc, iso_cv, pit_mc, pit_cv, "iso", "pit", gene_names, n_boot, verbose=F)
# iso_v_pit_genes = iso_v_pit_res[[2]]
# ovlp_pi_v_ip = pit_v_iso_genes[which(pit_v_iso_genes %in% iso_v_pit_genes)]
#
# # Castle v Isolated
# castle_v_iso_res = my_MBASED(castle_mc, castle_cv, iso_mc, iso_cv, "castle", "iso", gene_names, n_boot, verbose=F)
# castle_v_iso_genes = castle_v_iso_res[[2]]
# iso_v_castle_res = my_MBASED(iso_mc, iso_cv, castle_mc, castle_cv, "iso", "castle", gene_names, n_boot, verbose=F)
# iso_v_castle_genes = iso_v_castle_res[[2]]
# ovlp_ci_v_ic = castle_v_iso_genes[which(castle_v_iso_genes %in% iso_v_castle_genes)]
# boot_res = rbind(boot_res, t(c(n, ovlp_pc_v_cp, ovlp_pi_v_ip, ovlp_ci_v_ic)))
boot_res = rbind(boot_res, t(c(n, length(ovlp_pc_v_cp))))
}, error = function(e) {
print(paste("Error on boostrap", n))
})
}
# colnames(boot_res) = c("run", "overlap_in_pvc_and_cvp", "overlap_in_pvi_and_ivp", "overlap_in_cvi_and_ivc")
colnames(boot_res) = c("run", "overlap_in_pvc_and_cvp")
boot_res$above = boot_res$overlap_in_pvc_and_cvp > real_ovlp_pc_v_cp
ggplot(boot_res, aes(overlap_in_pvc_and_cvp, alpha=.7, fill=above)) + geom_histogram(alpha=0.5, color = "purple") + geom_vline(aes(xintercept = real_ovlp_pc_v_cp)) + geom_text(aes(x=real_ovlp_pc_v_cp, label="Real Value"), y = Inf, hjust=0, vjust=1, color = "black") + xlab("# of Gene in Overlap Between Pit v Castle and Castle v Pit") + ggtitle("Comparison Between Bootstrap Values and Real Value") + guides(color=F, alpha=F, fill=F)
print(paste("p-value =", length(boot_res$above[which(boot_res$above)]) / length(boot_res$above)))
#=========================================================================================
# Old UMD1 Data
#=========================================================================================
rna_path <- "C:/Users/miles/Downloads/brain/"
data <- read.table(paste(rna_path, "/data/disc_ase.txt", sep=""), header = TRUE)
disc_genes <- c()
for (gene in unique(data$gene)) {
this_rows <- data[which(data$gene == gene),]
if (gene == "atp1b4") {
print(this_rows)
}
if (this_rows$rep_1_ase_ratio[1] > 0 && this_rows$rep_2_ase_ratio[1] > 0 && nrow(this_rows) >= 2) { # both pos
for (i in 2:nrow(this_rows)) {
if (this_rows$rep_1_ase_ratio[i] < 0 && this_rows$rep_2_ase_ratio[i] < 0) {
disc_genes <- c(disc_genes, gene)
}
}
} else if (this_rows$rep_1_ase_ratio[1] < 0 && this_rows$rep_2_ase_ratio[1] < 0 && nrow(this_rows) >= 2) { # both neg
for (i in 2:nrow(this_rows)) {
if (this_rows$rep_1_ase_ratio[i] > 0 && this_rows$rep_2_ase_ratio[i] > 0) {
disc_genes <- c(disc_genes, gene)
}
}
}
}
mc_up <- c()
for (gene in unique(data$gene)) {
this_rows <- data[which(data$gene == gene),]
build_rows <- this_rows[which(this_rows$condition == "building"),]
iso_rows <- this_rows[which(this_rows$condition == "isolated"),]
dig_rows <- this_rows[which(this_rows$condition == "digging"),]
min_build <- min(build_rows$rep_1_ase_ratio, build_rows$rep_2_ase_ratio)
if (nrow(iso_rows) > 0 && nrow(dig_rows) > 0) { # only both up is considered mc_up
if (iso_rows$rep_1_ase_ratio[i] < min_build && iso_rows$rep_2_ase_ratio[i] < min_build && dig_rows$rep_1_ase_ratio[i] < min_build && dig_rows$rep_2_ase_ratio[i] < min_build) {
mc_up <- c(mc_up, gene)
}
} else { # either one up, is considered mc_up
if (nrow(iso_rows) > 0 && iso_rows$rep_1_ase_ratio[i] < min_build && iso_rows$rep_2_ase_ratio[i] < min_build) {
mc_up <- c(mc_up, gene)
}
if (nrow(dig_rows) > 0 && dig_rows$rep_1_ase_ratio[i] < min_build && dig_rows$rep_2_ase_ratio[i] < min_build) {
mc_up <- c(mc_up, gene)
}
}
}
df <- data.frame(gene <- mc_up, bio <- rep("MC_UP", length(mc_up)))
write.table(df, paste(rna_path, "/data/mc_up.txt", sep=""), sep="\t", quote = FALSE, col.names = FALSE, row.names = FALSE)
data = read.csv("C:/Users/miles/Downloads/cichlid_ase_common_genes_all_conditions_filtered_030920.csv", header = T)
test = as.vector(data[which( sign(data$Digging_Mean_ASE) != sign(data$Building_Mean_ASE) & sign(data$Building_1) == -1 ),1])
test = as.vector(data[which( sign(data$Digging_Mean_ASE) == sign(data$Building_Mean_ASE) & sign(data$Building_Mean_ASE) != sign(data$Iso_Mean_ASE) ),1])
test = as.vector(data[which( sign(data$Digging_Mean_ASE) == sign(data$Building_Mean_ASE) & sign(data$Building_Mean_ASE) == sign(data$Iso_Mean_ASE) & sign(data$Building_Mean_ASE) == -1 ),1])
ase_down_all = data.frame(umd1To2a(test), "ASE_DOWN_ALL")
write.table(ase_down_all, "C:/Users/miles/Downloads/brain/data/markers/ase_down_all_111820.txt", sep="\t", col.names = F, row.names = F, quote = F)
disc_ase_pc_castle_up_in_build = data.frame(umd1To2a(test), "DISC_ASE")
write.table(disc_ase_pc_pit_up_in_dig, "C:/Users/miles/Downloads/brain/data/markers/disc_ase_dig_v_build_pit_up_in_dig_111820.txt", sep="\t", col.names = F, row.names = F, quote = F)
=======
library("RUnit")
library("MBASED")
library("metap")
library("DESeq2")
library("apeglm")
library("EnhancedVolcano")
#=========================================================================================
# New UMD2a Data
#=========================================================================================
# DEG
rna_path = "C:/Users/miles/Downloads/brain/"
SRR904 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440904_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR905 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440905_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR906 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440906_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR907 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440907_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR908 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440908_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR909 = read.table(paste(rna_path, "/data/pit_castle_deg/SRR5440909_counter_per_gene.tsv", sep=""), sep="\t", header = F, stringsAsFactors = F)
SRR904 = SRR904[which(! duplicated(SRR904[,1])),]
SRR905 = SRR905[which(! duplicated(SRR905[,1])),]
SRR906 = SRR906[which(! duplicated(SRR906[,1])),]
SRR907 = SRR907[which(! duplicated(SRR907[,1])),]
SRR908 = SRR908[which(! duplicated(SRR908[,1])),]
SRR909 = SRR909[which(! duplicated(SRR909[,1])),]
genes = SRR904[-c(1:5),1]
mat = as.matrix(cbind(SRR904[-c(1:5),2], SRR905[-c(1:5),2], SRR906[-c(1:5),2], SRR907[-c(1:5),2], SRR908[-c(1:5),2], SRR909[-c(1:5),2]),
dimnames=list(genes, c("4", "5", "6", "7", "8", "9")))
mycolData = data.frame(samples=c("4", "5", "6", "7", "8", "9"),
cond=c("pit", "pit", "castle", "castle", "iso", "iso"),
isBhve=c("bhve", "bhve", "bhve", "bhve", "ctrl", "ctrl"))
dds = DESeqDataSetFromMatrix(countData = mat,
colData = mycolData,
design = ~ cond)
dds <- DESeq(dds)
resultsNames(dds)
res <- results(dds, name="cond_pit_vs_castle")
res <- lfcShrink(dds, coef="cond_pit_vs_castle", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Pit v Castle Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/pit_v_castle_deg.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/pit_v_castle_deg_hgnc.txt", quote=F, col.names = F, row.names = F)
# BHVE v CTRL
dds = DESeqDataSetFromMatrix(countData = mat,
colData = mycolData,
design = ~ isBhve)
dds <- DESeq(dds)
resultsNames(dds)
res <- results(dds, name="isBhve_ctrl_vs_bhve")
res <- lfcShrink(dds, coef="isBhve_ctrl_vs_bhve", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Bhve v Ctrl Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/bhve_v_ctrl_deg.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/bhve_v_ctrl_deg_hgnc.txt", quote=F, col.names = F, row.names = F)
# Sim Pit v Castle
mat_pvc = as.matrix(cbind(SRR904[-c(1:5),2], SRR905[-c(1:5),2], SRR906[-c(1:5),2], SRR907[-c(1:5),2]),
dimnames=list(genes, c("4", "5", "6", "7")))
colData_pvc = data.frame(samples=c("4", "5", "6", "7"),
sim1=c("pit", "castle", "pit", "castle"),
sim2=c("castle", "pit", "castle", "pit"))
dds = DESeqDataSetFromMatrix(countData = mat_pvc,
colData = colData_pvc,
design = ~sim1)
dds <- DESeq(dds)
resultsNames(dds)
res <- results(dds, name="sim1_pit_vs_castle")
res <- lfcShrink(dds, coef="sim1_pit_vs_castle", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Simulated Pit v Castle 1 Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/sim1_pit_v_castle.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/sim1_pit_v_castle_hgnc.txt", quote=F, col.names = F, row.names = F)
dds <- DESeq(dds)
res <- results(dds, name="sim2_pit_vs_castle")
res <- lfcShrink(dds, coef="sim2_pit_vs_castle", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Simulated Pit v Castle 2 Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/sim2_pit_v_castle.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/sim2_pit_v_castle_hgnc.txt", quote=F, col.names = F, row.names = F)
# Pit v Iso
mat_pvi = as.matrix(cbind(SRR904[-c(1:5),2], SRR905[-c(1:5),2], SRR908[-c(1:5),2], SRR909[-c(1:5),2]),
dimnames=list(genes, c("4", "5", "8", "9")))
colData_pvi = data.frame(samples=c("4", "5", "8", "9"),
cond=c("pit", "pit", "iso", "iso"))
dds = DESeqDataSetFromMatrix(countData = mat_pvi,
colData = colData_pvi,
design = ~cond)
dds <- DESeq(dds)
res <- results(dds, name="cond_pit_vs_iso")
res <- lfcShrink(dds, coef="cond_pit_vs_iso", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Pit v Isolated Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/pit_v_iso.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/pit_v_iso_hgnc.txt", quote=F, col.names = F, row.names = F)
# Castle v Iso
mat_cvi = as.matrix(cbind(SRR906[-c(1:5),2], SRR907[-c(1:5),2], SRR908[-c(1:5),2], SRR909[-c(1:5),2]),
dimnames=list(genes, c("6", "7", "8", "9")))
colData_cvi = data.frame(samples=c("6", "7", "8", "9"),
cond=c("castle", "castle", "iso", "iso"))
dds = DESeqDataSetFromMatrix(countData = mat_cvi,
colData = colData_cvi,
design = ~cond)
dds <- DESeq(dds)
res <- results(dds, name="cond_iso_vs_castle")
res <- lfcShrink(dds, coef="cond_iso_vs_castle", type="apeglm")
sig_ind = which(res$padj < 0.05 & res$log2FoldChange > 1)
sig_genes = genes[sig_ind]
res_df = data.frame(gene=genes, logFC=res$log2FoldChange, padj=res$padj)
rownames(res_df) = res_df$gene
EnhancedVolcano(res_df, lab=rownames(res_df), x="logFC", y="padj") + labs(subtitle="Castle v Isolated Volcano Plot") + theme(plot.title = element_blank(), plot.caption = element_blank())
sig_genes_hgnc = hgncMzebraInPlace(data.frame(sig_genes), 1, gene_names)
write.table(sig_genes, "C:/Users/miles/Downloads/brain/results/castle_v_iso.txt", quote=F, col.names = F, row.names = F)
write.table(sig_genes_hgnc, "C:/Users/miles/Downloads/brain/results/castle_v_iso_hgnc.txt", quote=F, col.names = F, row.names = F)
# Dendrogram
mat = matrix(cbind(SRR904[-c(1:5),2], SRR905[-c(1:5),2], SRR906[-c(1:5),2], SRR907[-c(1:5),2], SRR908[-c(1:5),2], SRR909[-c(1:5),2]), ncol = 6, dimnames = list(genes, c("pit", "pit", "castle", "castle", "iso", "iso")))
pit_v_castle_genes = read.table("C:/Users/miles/Downloads/brain/results/pit_v_castle.txt", header=F)
pit_v_castle_genes = as.vector(pit_v_castle_genes$V1)
p = degDend(mat, pit_v_castle_genes, "C:/Users/miles/Downloads/brain/results/pit_v_castle_dend.png", include_samples = c("pit", "castle"))
p = degDend(mat, pit_v_castle_genes, "C:/Users/miles/Downloads/brain/results/pit_v_castle_all_dend.png")
pit_v_iso_genes = read.table("C:/Users/miles/Downloads/brain/results/pit_v_iso.txt", header=F)
pit_v_iso_genes = as.vector(pit_v_iso_genes$V1)
p = degDend(mat, pit_v_iso_genes, "C:/Users/miles/Downloads/brain/results/pit_v_iso_dend.png", include_samples = c("pit", "iso"))
castle_v_iso_genes = read.table("C:/Users/miles/Downloads/brain/results/castle_v_iso.txt", header=F)
castle_v_iso_genes = as.vector(castle_v_iso_genes$V1)
p = degDend(mat, castle_v_iso_genes, "C:/Users/miles/Downloads/brain/results/castle_v_iso_dend.png", include_samples = c("castle", "iso"))
# SNP-level data
SRR904 = read.table(paste(rna_path, "/data/ase/SRR5440904_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR905 = read.table(paste(rna_path, "/data/ase/SRR5440905_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR906 = read.table(paste(rna_path, "/data/ase/SRR5440906_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR907 = read.table(paste(rna_path, "/data/ase/SRR5440907_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR908 = read.table(paste(rna_path, "/data/ase/SRR5440908_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR909 = read.table(paste(rna_path, "/data/ase/SRR5440909_informative.vcf", sep=""), header = F, stringsAsFactors = F)
SRR904$V6 = as.numeric(as.vector(SRR904$V6))
SRR905$V6 = as.numeric(as.vector(SRR905$V6))
SRR906$V6 = as.numeric(as.vector(SRR906$V6))
SRR907$V6 = as.numeric(as.vector(SRR907$V6))
SRR908$V6 = as.numeric(as.vector(SRR908$V6))
SRR909$V6 = as.numeric(as.vector(SRR909$V6))
SRR904$V7 = as.numeric(as.vector(SRR904$V7))
SRR905$V7 = as.numeric(as.vector(SRR905$V7))
SRR906$V7 = as.numeric(as.vector(SRR906$V7))
SRR907$V7 = as.numeric(as.vector(SRR907$V7))
SRR908$V7 = as.numeric(as.vector(SRR908$V7))
SRR909$V7 = as.numeric(as.vector(SRR909$V7))
SRR904$MC_COUNTS = SRR904$V6
SRR905$MC_COUNTS = SRR905$V6
SRR906$MC_COUNTS = SRR906$V6
SRR907$MC_COUNTS = SRR907$V6
SRR908$MC_COUNTS = SRR908$V6
SRR909$MC_COUNTS = SRR909$V6
SRR904$MC_COUNTS[which(SRR904$V14 == "False")] = SRR904$V7[which(SRR904$V14 == "False")]
SRR905$MC_COUNTS[which(SRR905$V14 == "False")] = SRR905$V7[which(SRR905$V14 == "False")]
SRR906$MC_COUNTS[which(SRR906$V14 == "False")] = SRR906$V7[which(SRR906$V14 == "False")]
SRR907$MC_COUNTS[which(SRR907$V14 == "False")] = SRR907$V7[which(SRR907$V14 == "False")]
SRR908$MC_COUNTS[which(SRR908$V14 == "False")] = SRR908$V7[which(SRR908$V14 == "False")]
SRR909$MC_COUNTS[which(SRR909$V14 == "False")] = SRR909$V7[which(SRR909$V14 == "False")]
SRR904$CV_COUNTS = SRR904$V7
SRR905$CV_COUNTS = SRR905$V7
SRR906$CV_COUNTS = SRR906$V7
SRR907$CV_COUNTS = SRR907$V7
SRR908$CV_COUNTS = SRR908$V7
SRR909$CV_COUNTS = SRR909$V7
SRR904$CV_COUNTS[which(SRR904$V14 == "False")] = SRR904$V6[which(SRR904$V14 == "False")]
SRR905$CV_COUNTS[which(SRR905$V14 == "False")] = SRR905$V6[which(SRR905$V14 == "False")]
SRR906$CV_COUNTS[which(SRR906$V14 == "False")] = SRR906$V6[which(SRR906$V14 == "False")]
SRR907$CV_COUNTS[which(SRR907$V14 == "False")] = SRR907$V6[which(SRR907$V14 == "False")]
SRR908$CV_COUNTS[which(SRR908$V14 == "False")] = SRR908$V6[which(SRR908$V14 == "False")]
SRR909$CV_COUNTS[which(SRR909$V14 == "False")] = SRR909$V6[which(SRR909$V14 == "False")]
SRR904$pos = paste0(SRR904$V1, ":", SRR904$V2, "-", SRR904$V2)
SRR905$pos = paste0(SRR905$V1, ":", SRR905$V2, "-", SRR905$V2)
SRR906$pos = paste0(SRR906$V1, ":", SRR906$V2, "-", SRR906$V2)
SRR907$pos = paste0(SRR907$V1, ":", SRR907$V2, "-", SRR907$V2)
SRR908$pos = paste0(SRR908$V1, ":", SRR908$V2, "-", SRR908$V2)
SRR909$pos = paste0(SRR909$V1, ":", SRR909$V2, "-", SRR909$V2)
pit = inner_join(SRR904, SRR905, by = "pos")
pit_mc = pit$MC_COUNTS.x + pit$MC_COUNTS.y
pit_cv = pit$CV_COUNTS.x + pit$CV_COUNTS.y
names(pit_mc) = pit$pos
names(pit_cv) = pit$pos
castle = inner_join(SRR906, SRR907, by = "pos")
castle_mc = castle$MC_COUNTS.x + castle$MC_COUNTS.y
castle_cv = castle$CV_COUNTS.x + castle$CV_COUNTS.y
names(castle_mc) = castle$pos
names(castle_cv) = castle$pos
iso = inner_join(SRR908, SRR909, by = "pos")
iso_mc = iso$MC_COUNTS.x + iso$MC_COUNTS.y
iso_cv = iso$CV_COUNTS.x + iso$CV_COUNTS.y
names(iso_mc) = iso$pos
names(iso_cv) = iso$pos
pit_v_castle_res = my_MBASED(pit_mc, pit_cv, castle_mc, castle_cv, "pit", "castle", pit$pos, n_boot, isSNP=T)
castle_v_pit_res = my_MBASED(castle_mc, castle_cv, pit_mc, pit_cv, "castle", "pit", pit$pos, n_boot, isSNP=T)
pit_v_castle_pos = pit_v_castle_res[[2]]
castle_v_pit_pos = castle_v_pit_res[[2]]
ovlp_pc_v_cp_pos = pit_v_castle_pos[which(pit_v_castle_pos %in% castle_v_pit_pos)]
pit_v_iso_res = my_MBASED(pit_mc, pit_cv, iso_mc, iso_cv, "pit", "iso", pit$pos, n_boot, isSNP=T)
iso_v_pit_res = my_MBASED(iso_mc, iso_cv, pit_mc, pit_cv, "iso", "pit", pit$pos, n_boot, isSNP=T)
pit_v_iso_pos = pit_v_iso_res[[2]]
iso_v_pit_pos = iso_v_pit_res[[2]]
ovlp_pi_v_ip_pos = pit_v_iso_pos[which(pit_v_iso_pos %in% iso_v_pit_pos)]
castle_v_iso_res = my_MBASED(castle_mc, castle_cv, iso_mc, iso_cv, "castle", "iso", castle$pos, n_boot, isSNP=T)
iso_v_castle_res = my_MBASED(iso_mc, iso_cv, castle_mc, castle_cv, "iso", "csatle", castle$pos, n_boot, isSNP=T)
castle_v_iso_pos = castle_v_iso_res[[2]]
iso_v_castle_pos = iso_v_castle_res[[2]]
ovlp_ci_v_ic_pos = castle_v_iso_pos[which(castle_v_iso_pos %in% iso_v_castle_pos)]
gtf = read.table("C:/Users/miles/Downloads/brain/brain_scripts/full_ens_w_ncbi_gene.gtf", sep="\t", header=F, stringsAsFactors = F)
gtf = gtf[which(gtf[,3] == "gene" & gtf[,1] != "NC_027944.1"),]
gtf_gene_name <- c()
for (i in 1:nrow(gtf)) {
start <- gregexpr(pattern ='gene_name', gtf$V9[i])[[1]]
stop <- gregexpr(pattern =';', substr(gtf$V9[i], start, nchar(gtf$V9[i])))[[1]][1]
gene_name <- substr(gtf$V9[i], start+10, start+stop-2)
if (start == -1) {
gene_name <- substr(gtf$V9[i], start+10, start+stop)
}
gtf_gene_name <- c(gtf_gene_name, gene_name)
}
gtf$gene_name <- gtf_gene_name
colnames(gtf) <- c("LG", "source", "type", "start", "stop", "idk", "idk1", "idk2", "info", "gene_name")
gtf = gtf[which(! startsWith(gtf$gene_name, "LOC")),]
pit_v_castle_genes = posToGene(pit_v_castle_pos, gtf)
###################
# Gene Level Data #
###################
rna_path = "C:/Users/miles/Downloads/brain/"
SRR904 = read.table(paste(rna_path, "/data/ase/SRR5440904_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR905 = read.table(paste(rna_path, "/data/ase/SRR5440905_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR906 = read.table(paste(rna_path, "/data/ase/SRR5440906_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR907 = read.table(paste(rna_path, "/data/ase/SRR5440907_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR908 = read.table(paste(rna_path, "/data/ase/SRR5440908_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR909 = read.table(paste(rna_path, "/data/ase/SRR5440909_RG_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
SRR904$GENE = str_replace(SRR904$GENE,"%", " (1 of many)")
SRR905$GENE = str_replace(SRR905$GENE,"%", " (1 of many)")
SRR906$GENE = str_replace(SRR906$GENE,"%", " (1 of many)")
SRR907$GENE = str_replace(SRR907$GENE,"%", " (1 of many)")
SRR908$GENE = str_replace(SRR908$GENE,"%", " (1 of many)")
SRR909$GENE = str_replace(SRR909$GENE,"%", " (1 of many)")
#NCBI
# SRR904 = read.table(paste(rna_path, "/data/ase/SRR5440904_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR905 = read.table(paste(rna_path, "/data/ase/SRR5440905_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR906 = read.table(paste(rna_path, "/data/ase/SRR5440906_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR907 = read.table(paste(rna_path, "/data/ase/SRR5440907_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR908 = read.table(paste(rna_path, "/data/ase/SRR5440908_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
# SRR909 = read.table(paste(rna_path, "/data/ase/SRR5440909_RG_nc_counts.tsv", sep=""), header = TRUE, stringsAsFactors = F)
gene_names = SRR904$GENE
# gene_names = str_replace(gene_names,"%", " (1 of many)")
n_boot = 100
# Prepare the Data
pit_mc = SRR904$MC_COUNTS + SRR905$MC_COUNTS
pit_cv = SRR904$CV_COUNTS + SRR905$CV_COUNTS
names(pit_mc) = gene_names
names(pit_cv) = gene_names
castle_mc = SRR906$MC_COUNTS + SRR907$MC_COUNTS
castle_cv = SRR906$CV_COUNTS + SRR907$CV_COUNTS
names(castle_mc) = gene_names
names(castle_cv) = gene_names
iso_mc = SRR908$MC_COUNTS + SRR909$MC_COUNTS
iso_cv = SRR908$CV_COUNTS + SRR909$CV_COUNTS
names(iso_mc) = gene_names
names(iso_cv) = gene_names
# Check for Skews in ASE ratios
hist(log2(SRR904$MC_COUNTS/SRR904$CV_COUNTS), breaks=50)
hist(log2(SRR905$MC_COUNTS/SRR905$CV_COUNTS), breaks=50)
hist(log2(SRR906$MC_COUNTS/SRR906$CV_COUNTS), breaks=50)
hist(log2(SRR907$MC_COUNTS/SRR907$CV_COUNTS), breaks=50)
hist(log2(SRR908$MC_COUNTS/SRR908$CV_COUNTS), breaks=50)
hist(log2(SRR909$MC_COUNTS/SRR909$CV_COUNTS), breaks=50)
pos = which(SRR904$MC_COUNTS > 0 & SRR904$CV_COUNTS > 0)
d = density(log2(SRR904$MC_COUNTS[pos]/SRR904$CV_COUNTS[pos]))
plot(d, main="Pit 1")
pos = which(SRR905$MC_COUNTS > 0 & SRR905$CV_COUNTS > 0)
d = density(log2(SRR905$MC_COUNTS[pos]/SRR905$CV_COUNTS[pos]))
plot(d, main="Pit 2")
pos = which(SRR906$MC_COUNTS > 0 & SRR906$CV_COUNTS > 0)
d = density(log2(SRR906$MC_COUNTS[pos]/SRR906$CV_COUNTS[pos]))
plot(d, main="Castle 1")
pos = which(SRR907$MC_COUNTS > 0 & SRR907$CV_COUNTS > 0)
d = density(log2(SRR907$MC_COUNTS[pos]/SRR907$CV_COUNTS[pos]))
plot(d, main="Castle 2")
pos = which(SRR908$MC_COUNTS > 0 & SRR908$CV_COUNTS > 0)
d = density(log2(SRR908$MC_COUNTS[pos]/SRR908$CV_COUNTS[pos]))
plot(d, main="Isolated 1")
pos = which(SRR909$MC_COUNTS > 0 & SRR909$CV_COUNTS > 0)
d = density(log2(SRR909$MC_COUNTS[pos]/SRR909$CV_COUNTS[pos]))
plot(d, main="Isolated 2")
# Find Discordant ASE
disc_ase = gene_names[which(sign(SRR904$dif) == sign(SRR905$dif) & sign(SRR904$dif) != 0 &
sign(SRR906$dif) == sign(SRR907$dif) & sign(SRR906$dif) != 0 &
sign(SRR906$dif) != sign(SRR904$dif))]
disc_ase_pc = disc_ase
disc_ase_pc_hgnc = sort(hgncMzebraInPlace(data.frame(disc_ase_pc), 1, rownames(tj)))
write.table(disc_ase_pc, "C:/Users/miles/Downloads/brain/results/ase/disc_ASE_pc.txt", quote = F, col.names = F, row.names = F)
write.table(disc_ase_pc_hgnc, "C:/Users/miles/Downloads/brain/results/ase/disc_ASE_pc_hgnc.txt", quote = F, col.names = F, row.names = F)
# Do 1-sampled ASE experiments
pos_all_ind = which(SRR904$MC_COUNTS + SRR904$CV_COUNTS > 0 &
SRR905$MC_COUNTS + SRR905$CV_COUNTS > 0 &
SRR906$MC_COUNTS + SRR906$CV_COUNTS > 0 &
SRR907$MC_COUNTS + SRR907$CV_COUNTS > 0 &
SRR908$MC_COUNTS + SRR908$CV_COUNTS > 0 &
SRR909$MC_COUNTS + SRR909$CV_COUNTS > 0)
SRR904_1 = my_MBASED_1(SRR904$MC_COUNTS, SRR904$CV_COUNTS, "SRR904 (Pit 1)", "", gene_names, n_boot)
SRR905_1 = my_MBASED_1(SRR905$MC_COUNTS, SRR905$CV_COUNTS, "SRR905 (Pit 2)", "", gene_names, n_boot)
SRR906_1 = my_MBASED_1(SRR906$MC_COUNTS, SRR906$CV_COUNTS, "SRR906 (Castle 1)", "", gene_names, n_boot)
SRR907_1 = my_MBASED_1(SRR907$MC_COUNTS, SRR907$CV_COUNTS, "SRR907 (Castle 2)", "", gene_names, n_boot)
SRR908_1 = my_MBASED_1(SRR908$MC_COUNTS, SRR908$CV_COUNTS, "SRR908 (Isolated 1)", "", gene_names, n_boot)
SRR909_1 = my_MBASED_1(SRR909$MC_COUNTS, SRR909$CV_COUNTS, "SRR909 (Isolated 2)", "", gene_names, n_boot)
SRR904$p = 1; SRR905$p = 1; SRR906$p = 1; SRR907$p = 1; SRR908$p = 1; SRR909$p = 1
SRR904$q = 1; SRR905$q = 1; SRR906$q = 1; SRR907$q = 1; SRR908$q = 1; SRR909$q = 1
SRR904$p[which(SRR904$GENE %in% rownames(assays(SRR904_1[[1]])$pValueASE))] = assays(SRR904_1[[1]])$pValueASE
SRR905$p[which(SRR905$GENE %in% rownames(assays(SRR905_1[[1]])$pValueASE))] = assays(SRR905_1[[1]])$pValueASE
SRR906$p[which(SRR906$GENE %in% rownames(assays(SRR906_1[[1]])$pValueASE))] = assays(SRR906_1[[1]])$pValueASE
SRR907$p[which(SRR907$GENE %in% rownames(assays(SRR907_1[[1]])$pValueASE))] = assays(SRR907_1[[1]])$pValueASE
SRR908$p[which(SRR908$GENE %in% rownames(assays(SRR908_1[[1]])$pValueASE))] = assays(SRR908_1[[1]])$pValueASE
SRR909$p[which(SRR909$GENE %in% rownames(assays(SRR909_1[[1]])$pValueASE))] = assays(SRR909_1[[1]])$pValueASE
SRR904$q[which(SRR904$GENE %in% rownames(assays(SRR904_1[[1]])$pValueASE))] = p.adjust(assays(SRR904_1[[1]])$pValueASE, method="bonferroni")
SRR905$q[which(SRR905$GENE %in% rownames(assays(SRR905_1[[1]])$pValueASE))] = p.adjust(assays(SRR905_1[[1]])$pValueASE, method="bonferroni")
SRR906$q[which(SRR906$GENE %in% rownames(assays(SRR906_1[[1]])$pValueASE))] = p.adjust(assays(SRR906_1[[1]])$pValueASE, method="bonferroni")
SRR907$q[which(SRR907$GENE %in% rownames(assays(SRR907_1[[1]])$pValueASE))] = p.adjust(assays(SRR907_1[[1]])$pValueASE, method="bonferroni")
SRR908$q[which(SRR908$GENE %in% rownames(assays(SRR908_1[[1]])$pValueASE))] = p.adjust(assays(SRR908_1[[1]])$pValueASE, method="bonferroni")
SRR909$q[which(SRR909$GENE %in% rownames(assays(SRR909_1[[1]])$pValueASE))] = p.adjust(assays(SRR909_1[[1]])$pValueASE, method="bonferroni")
SRR904$sig = SRR904$q < 0.05
SRR905$sig = SRR905$q < 0.05
SRR906$sig = SRR906$q < 0.05
SRR907$sig = SRR907$q < 0.05
SRR908$sig = SRR908$q < 0.05
SRR909$sig = SRR909$q < 0.05
SRR904$dif = SRR904$MC_COUNTS - SRR904$CV_COUNTS
SRR905$dif = SRR905$MC_COUNTS - SRR905$CV_COUNTS
SRR906$dif = SRR906$MC_COUNTS - SRR906$CV_COUNTS
SRR907$dif = SRR907$MC_COUNTS - SRR907$CV_COUNTS
SRR908$dif = SRR908$MC_COUNTS - SRR908$CV_COUNTS
SRR909$dif = SRR909$MC_COUNTS - SRR909$CV_COUNTS
SRR904$ase = log2(SRR904$CV_COUNTS / SRR904$MC_COUNTS)
SRR905$ase = log2(SRR905$CV_COUNTS / SRR905$MC_COUNTS)
SRR906$ase = log2(SRR906$CV_COUNTS / SRR906$MC_COUNTS)
SRR907$ase = log2(SRR907$CV_COUNTS / SRR907$MC_COUNTS)
SRR908$ase = log2(SRR908$CV_COUNTS / SRR908$MC_COUNTS)
SRR909$ase = log2(SRR909$CV_COUNTS / SRR909$MC_COUNTS)
df = data.frame(cbind(SRR904$GENE, SRR904$ase, SRR905$ase, SRR906$ase, SRR907$ase, SRR908$ase, SRR909$ase))
df = data.frame(cbind(SRR904, SRR905, SRR906, SRR907, SRR908, SRR909))
df$Digging_1 = as.numeric(as.vector(df$Digging_1))
df$Digging_2 = as.numeric(as.vector(df$Digging_2))
df$Building_1 = as.numeric(as.vector(df$Building_1))
df$Building_2 = as.numeric(as.vector(df$Building_2))
df$Control_1 = as.numeric(as.vector(df$Control_1))
df$Control_2 = as.numeric(as.vector(df$Control_2))
my_ryan = df[which(df[,1] %in% ryan$X),]
my_ryan = my_ryan[match(ryan$X, my_ryan[,1]),]
colnames(my_ryan) = c("GENE", "Digging_1", "Digging_2", "Building_1", "Building_2", "Control_1", "Control_2")
my_ryan$Digging_Mean_ASE = (as.numeric(as.vector(my_ryan$Digging_1)) + as.numeric(as.vector(my_ryan$Digging_2)))/2
my_ryan$Building_Mean_ASE = (as.numeric(as.vector(my_ryan$Building_1)) + as.numeric(as.vector(my_ryan$Building_2)))/2
my_ryan$Control_Mean_ASE = (as.numeric(as.vector(my_ryan$Control_1)) + as.numeric(as.vector(my_ryan$Control_2)))/2
length(which( is.na(my_ryan[,2]) & is.na(my_ryan[,3]) & is.na(my_ryan[,4]) & is.na(my_ryan[,5]) & is.na(my_ryan[,6]) & is.na(my_ryan[,7]) ))
length(which(sign(my_ryan$Digging_Mean_ASE) == sign(ryan$Digging_Mean_ASE) & sign(my_ryan$Building_Mean_ASE) == sign(ryan$Building_Mean_ASE) & sign(my_ryan$Control_Mean_ASE) == sign(ryan$Iso_Mean_ASE) ))
all_sig_same_dir = SRR904$GENE[which(SRR904$sig & SRR905$sig & SRR906$sig & SRR907$sig & SRR908$sig & SRR909$sig &
sign(SRR905$dif) == sign(SRR904$dif) &
sign(SRR906$dif) == sign(SRR904$dif) &
sign(SRR907$dif) == sign(SRR904$dif) &
sign(SRR908$dif) == sign(SRR904$dif) &
sign(SRR909$dif) == sign(SRR904$dif) )]
all_same_dir = SRR904$GENE[which(sign(SRR905$dif) == sign(SRR904$dif) & SRR905$dif != 0 & SRR904$dif != 0 &
sign(SRR906$dif) == sign(SRR904$dif) & SRR906$dif != 0 & SRR904$dif != 0 &
sign(SRR907$dif) == sign(SRR904$dif) & SRR907$dif != 0 & SRR904$dif != 0 &
sign(SRR908$dif) == sign(SRR904$dif) & SRR908$dif != 0 & SRR904$dif != 0 &
sign(SRR909$dif) == sign(SRR904$dif) & SRR909$dif != 0 & SRR904$dif != 0 )]
# Zack's Method: Combine p-values
# all_p = sapply(1:length(gene_names), function(x) sumlog(c(SRR904$p[x], SRR905$p[x], SRR906$p[x], SRR907$p[x], SRR908$p[x], SRR909$p[x]))$p)
all_p = sapply(1:length(gene_names), function(x) sumz(c(SRR904$p[x], SRR905$p[x], SRR906$p[x], SRR907$p[x], SRR908$p[x], SRR909$p[x]))$p)
all_p[which(SRR904$p == 0 &
SRR905$p == 0 &
SRR906$p == 0 &
SRR907$p == 0 &
SRR908$p == 0 &
SRR909$p == 0 )] = 0
all_q = p.adjust(all_p, method = "BH")
agg = gene_names[which(all_q < 0.05 &
sign(SRR905$dif) == sign(SRR904$dif) &
sign(SRR906$dif) == sign(SRR904$dif) &
sign(SRR907$dif) == sign(SRR904$dif) &
sign(SRR908$dif) == sign(SRR904$dif) &
sign(SRR909$dif) == sign(SRR904$dif) )]
write.table(all_sig_same_dir, "C:/Users/miles/Downloads/brain/results/ase_all_sig_same_dir_RG.txt", quote = F, col.names = F, row.names = F)
write.table(all_same_dir, "C:/Users/miles/Downloads/brain/results/ase_all_same_dir_RG.txt", quote = F, col.names = F, row.names = F)
write.table(agg, "C:/Users/miles/Downloads/brain/results/ase_agg_sig_same_dir_RG.txt", quote = F, col.names = F, row.names = F)
all_sig_same_dir_hgnc = hgncMzebraInPlace(data.frame(all_sig_same_dir), 1, gene_names)
all_same_dir_hgnc = hgncMzebraInPlace(data.frame(all_same_dir), 1, gene_names)
agg_hgnc = hgncMzebraInPlace(data.frame(agg), 1, gene_names)
write.table(all_sig_same_dir_hgnc, "C:/Users/miles/Downloads/brain/results/ase_all_sig_same_dir_hgnc_RG.txt", quote = F, col.names = F, row.names = F)
write.table(all_same_dir_hgnc, "C:/Users/miles/Downloads/brain/results/ase_all_same_dir_hgnc_RG.txt", quote = F, col.names = F, row.names = F)
write.table(agg_hgnc, "C:/Users/miles/Downloads/brain/results/ase_agg_sig_same_dir_hgnc_RG.txt", quote = F, col.names = F, row.names = F)
# Do 2-sampled ASE experiments
# Pit v Castle
pit_v_castle_res = my_MBASED(pit_mc, pit_cv, castle_mc, castle_cv, "pit", "castle", gene_names, n_boot)
pit_v_castle_genes = pit_v_castle_res[[2]]
castle_v_pit_res = my_MBASED(castle_mc, castle_cv, pit_mc, pit_cv, "castle", "pit", gene_names, n_boot)
castle_v_pit_genes = castle_v_pit_res[[2]]
ovlp_pc_v_cp = pit_v_castle_genes[which(pit_v_castle_genes %in% castle_v_pit_genes)]
# Pit v Isolated
pit_v_iso_res = my_MBASED(pit_mc, pit_cv, iso_mc, iso_cv, "pit", "iso", gene_names, n_boot)
pit_v_iso_genes = pit_v_iso_res[[2]]
iso_v_pit_res = my_MBASED(iso_mc, iso_cv, pit_mc, pit_cv, "iso", "pit", gene_names, n_boot)
iso_v_pit_genes = iso_v_pit_res[[2]]
ovlp_pi_v_ip = pit_v_iso_genes[which(pit_v_iso_genes %in% iso_v_pit_genes)]
# Castle v Isolated
castle_v_iso_res = my_MBASED(castle_mc, castle_cv, iso_mc, iso_cv, "castle", "iso", gene_names, n_boot)
castle_v_iso_genes = castle_v_iso_res[[2]]
iso_v_castle_res = my_MBASED(iso_mc, iso_cv, castle_mc, castle_cv, "iso", "castle", gene_names, n_boot)
iso_v_castle_genes = iso_v_castle_res[[2]]
ovlp_ci_v_ic = castle_v_iso_genes[which(castle_v_iso_genes %in% iso_v_castle_genes)]
res = data.frame(test=c("pit_v_castle", "castle_v_pit", "pvc_cvp_ovlp", "pit_v_iso", "iso_v_pit", "pvi_ivp_ovlp", "castle_v_iso", "iso_v_castle", "cvi_ivc"),
num_genes=c(length(pit_v_castle_genes), length(castle_v_pit_genes), length(ovlp_pc_v_cp),
length(pit_v_iso_genes), length(iso_v_pit_genes), length(ovlp_pi_v_ip),
length(castle_v_iso_genes), length(iso_v_castle_genes), length(ovlp_ci_v_ic)))
write.table(pit_v_castle_genes, "C:/Users/miles/Downloads/brain/results/ase_pit_v_castle_RG.txt", quote = F, col.names = F, row.names = F)
write.table(castle_v_pit_genes, "C:/Users/miles/Downloads/brain/results/ase_castle_v_pit_RG.txt", quote = F, col.names = F, row.names = F)
write.table(pit_v_iso_genes, "C:/Users/miles/Downloads/brain/results/ase_pit_v_iso_RG.txt", quote = F, col.names = F, row.names = F)
write.table(iso_v_pit_genes, "C:/Users/miles/Downloads/brain/results/ase_iso_v_pit_RG.txt", quote = F, col.names = F, row.names = F)
write.table(castle_v_iso_genes, "C:/Users/miles/Downloads/brain/results/ase_castle_v_iso_RG.txt", quote = F, col.names = F, row.names = F)
write.table(iso_v_castle_genes, "C:/Users/miles/Downloads/brain/results/ase_iso_v_castle_RG.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_pc_v_cp, "C:/Users/miles/Downloads/brain/results/ase_ovlp_pc_v_cp_RG.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_pi_v_ip, "C:/Users/miles/Downloads/brain/results/ase_ovlp_pi_v_ip_RG.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_ci_v_ic, "C:/Users/miles/Downloads/brain/results/ase_ovlp_ci_v_ic_RG.txt", quote = F, col.names = F, row.names = F)
pit_v_castle_genes_hgnc = hgncMzebraInPlace(data.frame(pit_v_castle_genes), 1, gene_names)
castle_v_pit_genes_hgnc = hgncMzebraInPlace(data.frame(castle_v_pit_genes), 1, gene_names)
pit_v_iso_genes_hgnc = hgncMzebraInPlace(data.frame(pit_v_iso_genes), 1, gene_names)
iso_v_pit_genes_hgnc = hgncMzebraInPlace(data.frame(iso_v_pit_genes), 1, gene_names)
castle_v_iso_genes_hgnc = hgncMzebraInPlace(data.frame(pit_v_iso_genes), 1, gene_names)
iso_v_castle_genes_hgnc = hgncMzebraInPlace(data.frame(iso_v_castle_genes), 1, gene_names)
ovlp_pc_v_cp_hgnc = hgncMzebraInPlace(data.frame(ovlp_pc_v_cp), 1, gene_names)
ovlp_pi_v_ip_hgnc = hgncMzebraInPlace(data.frame(ovlp_pi_v_ip), 1, gene_names)
ovlp_ci_v_ic_hgnc = hgncMzebraInPlace(data.frame(ovlp_ci_v_ic), 1, gene_names)
write.table(pit_v_castle_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_pit_v_castle_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(castle_v_pit_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_castle_v_pit_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(pit_v_iso_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_pit_v_iso_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(iso_v_pit_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_iso_v_pit_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(castle_v_iso_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_castle_v_iso_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(iso_v_castle_genes_hgnc, "C:/Users/miles/Downloads/brain/results/ase_iso_v_castle_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_pc_v_cp_hgnc, "C:/Users/miles/Downloads/brain/results/ase_ovlp_pc_v_cp_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_pi_v_ip_hgnc, "C:/Users/miles/Downloads/brain/results/ase_ovlp_pi_v_ip_hgnc.txt", quote = F, col.names = F, row.names = F)
write.table(ovlp_ci_v_ic_hgnc, "C:/Users/miles/Downloads/brain/results/ase_ovlp_ci_v_ic_hgnc.txt", quote = F, col.names = F, row.names = F)
my_MBASED_1 = function(s1_mc, s1_cv, s1_name, gene_ind, gene_names, n_boot, myIsPhased=T, verbose=T) {
# Purpose: Run a one sampled MBASED Experiment
# s1_mc: sample 1 mc counts
# s1_cv: sample 1 cv counts
# gene_ind: index of genes to run on (aka subset of gene indexes), set to "" to find pos genes in this sample
# gene_names: genes the counts are for (aka all genes)
# n_boot: number of bootstraps in runMBASED
# pos_ind = 1:length(gene_names)
pos_ind = gene_ind
if (pos_ind == "") {
pos_ind = which( s1_mc + s1_cv > 0)
}
pos_gene = gene_names[pos_ind]
this_s1_mc = s1_mc[pos_ind]
this_s1_cv = s1_cv[pos_ind]
if (verbose) {
print(paste("Genes Used", length(pos_gene)))
}
# Create the SummarizedExperiment and run MBASED
my_granges = GRanges(seqnames = rep("chr1:1-2", length(pos_gene)), aseID=pos_gene)
# lociAllele1Counts
s1_exp = SummarizedExperiment(assays=list(
lociAllele1Counts = matrix( c(this_s1_mc), ncol=1, dimnames = list(pos_gene, s1_name)),
lociAllele2Counts = matrix( c(this_s1_cv), ncol=1, dimnames = list(pos_gene, s1_name))
), rowRanges = my_granges)
s1 = runMBASED(ASESummarizedExperiment=s1_exp, isPhased = myIsPhased, numSim = n_boot)
# Analyze MBASED Data
# hist(assays(s1)$majorAlleleFrequencyDifference, main=paste(s1_name, "MAF"), xlab = "Major Allele Frequency")
# hist(assays(s1)$pValueASE, main=paste(s1_name, "p-value"), xlab = "p-value")
qvalue = p.adjust(assays(s1)$pValueASE, method="bonferroni")
s1_genes = pos_gene[which(qvalue < 0.05)]
return(list(s1, s1_genes))
}
my_MBASED = function(s1_mc, s1_cv, s2_mc, s2_cv, s1_name, s2_name, gene_names, n_boot, myIsPhased=T, verbose=T, isSNP=F) {
# Purpose: Run a two sampled MBASED Experiment
# s1_mc: sample 1 mc counts
# s1_cv: sample 1 cv counts
# s2_mc: sample 2 mc counts
# s2_cv: sample 2 cv counts
# s1_name: name of sample 1 (for example "pit")
# s2_name: name of sample 2 (for example "castle")
# gene_names: genes the counts are for
# n_boot: number of bootstraps in runMBASED
# First find non-zero loci bc according to the documentation:
# "All supplied loci must have total read count (across both alleles) greater than 0
# (in each of the two samples, in the case of two-sample analysis)."
if (isSNP) {
this_s1_mc = s1_mc[which(names(s1_mc) %in% names(s2_mc))]
this_s1_cv = s1_cv[which(names(s1_cv) %in% names(s2_cv))]
this_s2_mc = s2_mc[which(names(s2_mc) %in% names(s1_mc))]
this_s2_cv = s2_cv[which(names(s2_cv) %in% names(s1_cv))]
print(paste("SNPs lost from s1:", length(s1_mc) - length(this_s1_mc)))
print(paste("SNPs lost from s2:", length(s2_mc) - length(this_s2_mc)))
pos_gene = names(s1_mc)[which(names(s1_mc) %in% names(s2_mc))]
} else {
pos_ind = which( s1_mc + s1_cv > 0 & s2_mc + s2_cv > 0 )
pos_gene = gene_names[pos_ind]
this_s1_mc = s1_mc[pos_ind]
this_s1_cv = s1_cv[pos_ind]
this_s2_mc = s2_mc[pos_ind]
this_s2_cv = s2_cv[pos_ind]
if (verbose) {
print(paste("Genes Used", length(pos_gene)))
}
}
# Create the SummarizedExperiment and run MBASED
my_granges = GRanges(seqnames = rep("chr1:1-2", length(pos_gene)), aseID=pos_gene)
s1_v_s2_exp = SummarizedExperiment(assays=list(
lociAllele1Counts = matrix( c(this_s1_mc, this_s2_mc), ncol=2, dimnames = list(pos_gene, c(s1_name, s2_name))),
lociAllele2Counts = matrix( c(this_s1_cv, this_s2_cv), ncol=2, dimnames = list(pos_gene, c(s1_name, s2_name)))
), rowRanges = my_granges)
s1_v_s2 = runMBASED(ASESummarizedExperiment=s1_v_s2_exp, isPhased = myIsPhased, numSim = n_boot)
# Analyze MBASED Data
hist(assays(s1_v_s2)$majorAlleleFrequencyDifference, main=paste(s1_name, "v", s2_name, "MAF"), xlab = "Major Allele Frequency")
hist(assays(s1_v_s2)$pValueASE, main=paste(s1_name, "v", s2_name, "p-value"), xlab = "p-value")
qvalue = p.adjust(assays(s1_v_s2)$pValueASE, method="bonferroni")
s1_v_s2_genes = pos_gene[which(qvalue < 0.05)]
return(list(s1_v_s2, s1_v_s2_genes))
}
posToGene = function(all_pos, gtf) {
found_gene = c()
for (pos in all_pos) {
stop_1 = gregexpr(pattern = ':', pos)[[1]]
stop_2 = gregexpr(pattern = '-', pos)[[1]]
lg = substr(pos, 1, stop_1-1)
base = substr(pos, stop_1+1, stop_2-1)
this_found = gtf$gene_name[which(gtf$LG == lg & gtf$start+25000 <= base & gtf$stop+25000 >= base)]
found_gene = c(found_gene, this_found)
}
return(found_gene)
}
shuffleAlleles = function(s1_mc, s1_cv, s2_mc, s2_cv) {
all_mc = data.frame(s1_mc, s2_mc)
ind1 = sample(c(1,2), length(s1_mc), replace = T)
ind2 = ind1
ind2 = factor(ind1, levels = c("1", "2"))
ind2 = plyr::revalue(ind2, replace = c("1" = "2", "2" = "1"))
new_s1_mc = all_mc[as.matrix(data.frame(1:nrow(all_mc), as.numeric(as.vector(ind1))))]
new_s2_mc = all_mc[as.matrix(data.frame(1:nrow(all_mc), as.numeric(as.vector(ind2))))]
all_cv = data.frame(s1_cv, s2_cv)
ind1 = sample(c(1,2), length(s1_cv), replace = T)
ind2 = ind1
ind2 = factor(ind1, levels = c("1", "2"))
ind2 = plyr::revalue(ind2, replace = c("1" = "2", "2" = "1"))
new_s1_cv = all_cv[as.matrix(data.frame(1:nrow(all_cv), as.numeric(as.vector(ind1))))]
new_s2_cv = all_cv[as.matrix(data.frame(1:nrow(all_cv), as.numeric(as.vector(ind2))))]
res = data.frame(new_s1_mc, new_s1_cv, new_s2_mc, new_s2_cv)
return(res)
}
#===============#
# Bootstrapping #
#===============#
real_pc = length(pit_v_castle_genes)
real_cp = length(castle_v_pit_genes)
real_ovlp_pc_v_cp = length(ovlp_pc_v_cp)
boot_res = data.frame()
for (n in 1:n_boot) {
if(n == n_boot) {
cat(paste(n, "\n"))
} else if (n %% (n_boot/10) == 0 || n == 1) {
cat(n)
} else {
cat(".")
}
tryCatch({
# Pit v Castle
shuf_res = shuffleAlleles(pit_mc, pit_cv, castle_mc, castle_cv)
pit_v_castle_res = my_MBASED(shuf_res$new_s1_mc, shuf_res$new_s1_cv, shuf_res$new_s2_mc, shuf_res$new_s2_cv, "pit", "castle", gene_names, n_boot, verbose=F)
pit_v_castle_genes = pit_v_castle_res[[2]]
castle_v_pit_res = my_MBASED(shuf_res$new_s2_mc, shuf_res$new_s2_cv, shuf_res$new_s1_mc, shuf_res$new_s1_cv, "castle", "pit", gene_names, n_boot, verbose=F)
castle_v_pit_genes = castle_v_pit_res[[2]]
ovlp_pc_v_cp = pit_v_castle_genes[which(pit_v_castle_genes %in% castle_v_pit_genes)]
# # Pit v Isolated
# pit_v_iso_res = my_MBASED(pit_mc, pit_cv, iso_mc, iso_cv, "pit", "iso", gene_names, n_boot, verbose=F)
# pit_v_iso_genes = pit_v_iso_res[[2]]
# iso_v_pit_res = my_MBASED(iso_mc, iso_cv, pit_mc, pit_cv, "iso", "pit", gene_names, n_boot, verbose=F)
# iso_v_pit_genes = iso_v_pit_res[[2]]
# ovlp_pi_v_ip = pit_v_iso_genes[which(pit_v_iso_genes %in% iso_v_pit_genes)]
#
# # Castle v Isolated
# castle_v_iso_res = my_MBASED(castle_mc, castle_cv, iso_mc, iso_cv, "castle", "iso", gene_names, n_boot, verbose=F)
# castle_v_iso_genes = castle_v_iso_res[[2]]
# iso_v_castle_res = my_MBASED(iso_mc, iso_cv, castle_mc, castle_cv, "iso", "castle", gene_names, n_boot, verbose=F)
# iso_v_castle_genes = iso_v_castle_res[[2]]
# ovlp_ci_v_ic = castle_v_iso_genes[which(castle_v_iso_genes %in% iso_v_castle_genes)]
# boot_res = rbind(boot_res, t(c(n, ovlp_pc_v_cp, ovlp_pi_v_ip, ovlp_ci_v_ic)))
boot_res = rbind(boot_res, t(c(n, length(ovlp_pc_v_cp))))
}, error = function(e) {
print(paste("Error on boostrap", n))
})
}
# colnames(boot_res) = c("run", "overlap_in_pvc_and_cvp", "overlap_in_pvi_and_ivp", "overlap_in_cvi_and_ivc")
colnames(boot_res) = c("run", "overlap_in_pvc_and_cvp")
boot_res$above = boot_res$overlap_in_pvc_and_cvp > real_ovlp_pc_v_cp
ggplot(boot_res, aes(overlap_in_pvc_and_cvp, alpha=.7, fill=above)) + geom_histogram(alpha=0.5, color = "purple") + geom_vline(aes(xintercept = real_ovlp_pc_v_cp)) + geom_text(aes(x=real_ovlp_pc_v_cp, label="Real Value"), y = Inf, hjust=0, vjust=1, color = "black") + xlab("# of Gene in Overlap Between Pit v Castle and Castle v Pit") + ggtitle("Comparison Between Bootstrap Values and Real Value") + guides(color=F, alpha=F, fill=F)
print(paste("p-value =", length(boot_res$above[which(boot_res$above)]) / length(boot_res$above)))
#=========================================================================================
# Old UMD1 Data
#=========================================================================================
rna_path <- "C:/Users/miles/Downloads/brain/"
data <- read.table(paste(rna_path, "/data/disc_ase.txt", sep=""), header = TRUE)
disc_genes <- c()
for (gene in unique(data$gene)) {
this_rows <- data[which(data$gene == gene),]
if (gene == "atp1b4") {
print(this_rows)
}
if (this_rows$rep_1_ase_ratio[1] > 0 && this_rows$rep_2_ase_ratio[1] > 0 && nrow(this_rows) >= 2) { # both pos
for (i in 2:nrow(this_rows)) {
if (this_rows$rep_1_ase_ratio[i] < 0 && this_rows$rep_2_ase_ratio[i] < 0) {
disc_genes <- c(disc_genes, gene)
}
}
} else if (this_rows$rep_1_ase_ratio[1] < 0 && this_rows$rep_2_ase_ratio[1] < 0 && nrow(this_rows) >= 2) { # both neg
for (i in 2:nrow(this_rows)) {
if (this_rows$rep_1_ase_ratio[i] > 0 && this_rows$rep_2_ase_ratio[i] > 0) {
disc_genes <- c(disc_genes, gene)
}
}
}
}
mc_up <- c()
for (gene in unique(data$gene)) {
this_rows <- data[which(data$gene == gene),]
build_rows <- this_rows[which(this_rows$condition == "building"),]
iso_rows <- this_rows[which(this_rows$condition == "isolated"),]
dig_rows <- this_rows[which(this_rows$condition == "digging"),]
min_build <- min(build_rows$rep_1_ase_ratio, build_rows$rep_2_ase_ratio)
if (nrow(iso_rows) > 0 && nrow(dig_rows) > 0) { # only both up is considered mc_up
if (iso_rows$rep_1_ase_ratio[i] < min_build && iso_rows$rep_2_ase_ratio[i] < min_build && dig_rows$rep_1_ase_ratio[i] < min_build && dig_rows$rep_2_ase_ratio[i] < min_build) {
mc_up <- c(mc_up, gene)
}
} else { # either one up, is considered mc_up
if (nrow(iso_rows) > 0 && iso_rows$rep_1_ase_ratio[i] < min_build && iso_rows$rep_2_ase_ratio[i] < min_build) {
mc_up <- c(mc_up, gene)
}
if (nrow(dig_rows) > 0 && dig_rows$rep_1_ase_ratio[i] < min_build && dig_rows$rep_2_ase_ratio[i] < min_build) {
mc_up <- c(mc_up, gene)
}
}
}
df <- data.frame(gene <- mc_up, bio <- rep("MC_UP", length(mc_up)))
write.table(df, paste(rna_path, "/data/mc_up.txt", sep=""), sep="\t", quote = FALSE, col.names = FALSE, row.names = FALSE)
data = read.csv("C:/Users/miles/Downloads/cichlid_ase_common_genes_all_conditions_filtered_030920.csv", header = T)
test = data[which( sign(data$Digging_Mean_ASE-1) != sign(data$Building_Mean_ASE-1) ),1]
#==============================================================================================
# Single Nuc ASE ==============================================================================
#==============================================================================================
bb = readRDS("~/scratch/brain/data/bb_clustered_102820.rds")
counts = read.table("~/scratch/brain/ase/counts.txt", sep = "\t", header = T, stringsAsFactors=F)
bb_backup = bb
mat_ref = matrix(0L, nrow=nrow(bb_backup), ncol=ncol(bb_backup), dimnames = list(rownames(bb_backup), colnames(bb_backup)))
mat_alt = matrix(0L, nrow=nrow(bb_backup), ncol=ncol(bb_backup), dimnames = list(rownames(bb_backup), colnames(bb_backup)))
for (i in 1:nrow(counts)) {
if (i%%nrow(counts)/10 == 0) { print(i) }
gene = counts$GENE[i]
cell = counts$CELL[i]
mat_ref[gene, cell] = mat_ref[gene, cell] + counts$REF_COUNT[i]
mat_alt[gene, cell] = mat_alt[gene, cell] + counts$ALT_COUNT[i]
}
saveRDS(mat_ref, "~/scratch/brain/ase/R/ref_mat.rds")
saveRDS(mat_alt, "~/scratch/brain/ase/R/alt_mat.rds")
# Find the average ASE for the cluster and set the numbers to be the same for every cell in the cluster
mat_clust_ref_alt_15 = matrix(0L, nrow=nrow(bb_backup), ncol=ncol(bb_backup), dimnames = list(rownames(bb_backup), colnames(bb_backup)))
mat_clust_log_ref_alt_15 = matrix(0L, nrow=nrow(bb_backup), ncol=ncol(bb_backup), dimnames = list(rownames(bb_backup), colnames(bb_backup)))
Idents(bb) = bb$seuratclusters15
for (cluster in 0:14) {
cat(paste0(cluster, "."))
clust_cells = WhichCells(bb, idents = cluster)
bhve_cells = colnames(bb)[which(bb$cond == "BHVE")]
ctrl_cells = colnames(bb)[which(bb$cond == "CTRL")]
clust_b = clust_cells[which(clust_cells %in% bhve_cells)]
clust_c = clust_cells[which(clust_cells %in% ctrl_cells)]
# ase_ref_means = rowMeans(mat_ref[,clust_cells])
# ase_alt_means = rowMeans(mat_alt[,clust_cells])
ase_ref_sums_b = rowSums(mat_ref[,clust_b])
ase_ref_sums_c = rowSums(mat_ref[,clust_c])
ase_alt_sums_b = rowSums(mat_alt[,clust_b])
ase_alt_sums_c = rowSums(mat_alt[,clust_c])
# mat_clust_ref_alt_15[,clust_cells] = matrix( rep(ase_ref_means/ase_alt_means, length(clust_cells)), ncol = length(clust_cells) )
# mat_clust_ref_alt_15[,clust_cells] = matrix( rep(ase_ref_means/ase_alt_means, length(clust_cells)), ncol = length(clust_cells) )
mat_clust_log_ref_alt_15[,clust_b] = matrix( rep(log2(ase_ref_sums_b/ase_alt_sums_b), length(clust_b)), ncol = length(clust_b) )
mat_clust_log_ref_alt_15[,clust_c] = matrix( rep(log2(ase_ref_sums_c/ase_alt_sums_c), length(clust_c)), ncol = length(clust_c) )
}
# bb@assays$RNA@data = mat_clust_log_ref_alt
png_name = "~/scratch/brain/ase/R/log_drd2.png"
png(file = png_name, width = 1000, height = 1000, res = 150)
print(FeaturePlot(bb, "drd2", order = T, pt.size = 1, label =T))
dev.off()
system(paste0("rclone copy ", png_name, " dropbox:BioSci-Streelman/George/Brain/bb/results/sn_ase/"))
bb@assays$RNA@data = bb_backup@assays$RNA@data
png_name = "~/scratch/brain/ase/R/drd2_exp.png"
png(file = png_name, width = 1000, height = 1000, res = 150)
print(FeaturePlot(bb, "drd2", order = T, pt.size = 1, label =T))
dev.off()
system(paste0("rclone copy ", png_name, " dropbox:BioSci-Streelman/George/Brain/bb/results/sn_ase/"))
pos = which(rowSums(mat_ref) > 0 & rowSums(mat_alt) > 0)
d = density(log2( rowSums(mat_ref[pos,]) / rowSums(mat_alt[pos,]) ))
png_name = "~/scratch/brain/ase/R/all_pos.png"
png(file = png_name, width = 1000, height = 1000, res = 150)
print(plot(d, main="All"))
dev.off()
system(paste0("rclone copy ", png_name, " dropbox:BioSci-Streelman/George/Brain/bb/results/sn_ase/"))
for (i in 0:14) {
clust_cells = WhichCells(bb, idents = cluster)
pos = which(rowSums(mat_ref[,clust_cells]) > 0 & rowSums(mat_alt[,clust_cells]) > 0)
d = density(log2( rowSums(mat_ref[pos,clust_cells]) / rowSums(mat_alt[pos,clust_cells]) ))
png_name = paste0("~/scratch/brain/ase/R/", i, "_pos.png")
png(file = png_name, width = 1000, height = 1000, res = 150)
print(plot(d, main=i))
dev.off()
system(paste0("rclone copy ", png_name, " dropbox:BioSci-Streelman/George/Brain/bb/results/sn_ase/"))
}
# good_genes = rownames(bb)
# for (i in 0:14) {
# clust_cells = WhichCells(bb, idents = cluster)
# ase_ref_sums = rowSums(mat_ref[good_genes, clust_cells])
# ase_alt_sums = rowSums(mat_alt[good_genes, clust_cells])
# good_genes = good_genes[which(ase_ref_sums > 5 & ase_alt_sums > 5)]
# }
# length(good_genes)
Idents(bb) = bb$cond
good_genes = rownames(bb)[which(rowSums(mat_ref) > 200 & rowSums(mat_alt) > 200)]
n_boot = 100
# sig_df = data.frame()
# sig_genes = list()
# for (i in 0:14) {
# for (j in i:14) {
i_cells = WhichCells(bb, idents = "BHVE")
i_ref = rowSums(mat_ref[good_genes, i_cells])
i_alt = rowSums(mat_alt[good_genes, i_cells])
j_cells = WhichCells(bb, idents = "CTRL")
j_ref = rowSums(mat_ref[good_genes, j_cells])
j_alt = rowSums(mat_alt[good_genes, j_cells])
i_j_res = my_MBASED(i_ref, i_alt, j_ref, j_alt, i, j, good_genes, n_boot)
i_j_genes = i_j_res[[2]]
j_i_res = my_MBASED(j_ref, j_alt, i_ref, i_alt, j, i, good_genes, n_boot)
j_i_genes = j_i_res[[2]]
sig_genes = i_j_genes[which(i_j_genes %in% j_i_genes)]
# sig_genes[[paste0(i, "_", j)]] = i_j_genes[which(i_j_genes %in% j_i_genes)]
# sig_df = rbind(sig_df, t(c(i, j, length(i_j_genes), length(j_i_genes), length(sig_genes[[paste0(i, "_", j)]]))))
# colnames(sig_df) = c("cluster_A", "cluster_B", "A_B_genes", "B_A_gnes", "ovlp")
# sig_df = sig_df[which(sig_df$cluster_A != sig_df$cluster_B),]
all_sig_genes = unique(sig_genes)
Idents(bb) = bb$seuratclusters15
for (gene in all_sig_genes) {
sig_in = unlist(sapply(1:length(sig_genes), function(x) if(gene %in% sig_genes[[x]]) {names(sig_genes)[x]}))
bb@assays$RNA@data = mat_clust_log_ref_alt_15
png_name = paste0("~/scratch/brain/ase/R/log_", gene, ".png")
png(file = png_name, width = 2000, height = 1000, res = 150)
print(FeaturePlot(bb, gene, order = T, pt.size = 1, label =T, split.by = "cond") + labs(caption = paste0("ASE Ratio in BHVE: ", log2(sum(mat_ref[gene, bhve_cells])/sum(mat_alt[gene, bhve_cells])), ". ASE Ratio in CTRL: ", log2(sum(mat_ref[gene, ctrl_cells])/sum(mat_alt[gene, ctrl_cells])) )))
dev.off()
system(paste0("rclone copy ", png_name, " dropbox:BioSci-Streelman/George/Brain/bb/results/sn_ase/genes_bvc/"))
bb@assays$RNA@data = bb_backup@assays$RNA@data
png_name = paste0("~/scratch/brain/ase/R/exp_", gene, ".png")
png(file = png_name, width = 1000, height = 1000, res = 150)
print(FeaturePlot(bb, gene, order = T, pt.size = 1, label =T))
dev.off()
system(paste0("rclone copy ", png_name, " dropbox:BioSci-Streelman/George/Brain/bb/results/sn_ase/genes_bvc/"))
}
>>>>>>> f2582c0621b35f33da53657066327ce1be56299d
|
c256962525b3d0f1dd56caf7706b4a964cd484fe
|
b40ea60204f4f3e6c53079d012ef7a2cd8b2bc41
|
/day2/day2.R
|
ab4be031bdd9d22787ba1b13b4fc49adec53bce9
|
[] |
no_license
|
johnlocker/adventofcode2018
|
f9648fc316e6ce6e61dab38d936a2eb4acde3587
|
3d8cae621c663df3a423b30e0b15e9bf713871c6
|
refs/heads/master
| 2020-04-09T09:16:36.802088
| 2018-12-16T16:41:03
| 2018-12-16T16:41:03
| 160,227,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,096
|
r
|
day2.R
|
input <- read.table(file.path("day2", "input.txt"), stringsAsFactors = FALSE)
df <- data.frame(str = input$V1)
df$two <- FALSE
df$three <- FALSE
df$values <- NA
matchNotFound <- TRUE
for (i in 1:length(input$V1)) {
spLetters <- unlist(strsplit(as.character(input$V1[i]), split = ""))
counts <- table(spLetters)
df$two[i] <- sum(counts == 2) > 0
df$three[i] <- sum(counts == 3) > 0
df$values[i] <- paste(sapply(spLetters, function(x) match(x, letters)),
collapse = "-")
if (i > 1 & matchNotFound) {
cur <- as.numeric(sapply(spLetters, function(x) match(x, letters)))
subLoop <- c(1:i)[which(c(1:i) != i)]
for (j in subLoop) {
otherVals <- as.numeric(unlist(strsplit(df$values[j], "-")))
if ( (sum( (cur - otherVals) == 0) == 25) &
(sum(abs(cur - otherVals) == 1) == 1)) {
cat(paste0("Part B: ", paste(spLetters[-which( (cur - otherVals) != 0)],
collapse = "")), "\n")
matchNotFound <- FALSE
}
}
}
}
cat(paste0("\nPart A: ", sum(df$two) * sum(df$three), "\n"))
|
8c0fcc97319f33ac8c59eec2007f97e188e63dc2
|
fb6563e8e589d1cfd124eb32e67c109a451290d9
|
/project_app/app.R
|
fa981bcc2351549a8bbde571551300e8bcc45ee8
|
[] |
no_license
|
rben18/dataViz_final_project
|
5164ee8c53ed8e3fbccfe4ce55ed6ba541b4fa45
|
abfe3788200bfbf5ecdc69b7bd2c3ec978ecea6f
|
refs/heads/master
| 2020-09-29T03:01:56.776406
| 2019-12-16T17:24:21
| 2019-12-16T17:24:21
| 226,934,286
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,075
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidyverse)
library(leaflet)
library(dplyr)
library(sf)
library(DT)
library(KernSmooth)
library(raster)
#Read the phone call log and separating the "call_Date" date-time variable into 2 columns.
phone_call_log <- read_csv("311_Phone_Call_Log_Mod.csv")%>%
rename(Call_Date_Time = "Call_Date", "Duration_Minutes" = "Duration__Minutes_") %>%
dplyr::select(-"Work_Order_Type")%>%
#Transform the call_date_time variable and then splot
mutate(Call_Date_Time = as.POSIXct(Call_Date_Time, format="%Y:%m:%d %H:%M:%S"),
Time = format(Call_Date_Time,"%H:%M"),
Date = as.Date(Call_Date_Time)
)%>%
dplyr::select(FID, Date, Time, Department, Called_About, Duration_Minutes, duration_Seconds)
parks.points <- readRDS("parks.points.rds")
cost <- c(13750,3000,3500,97750,24600,10833.33,26500,350601.13,180600,41250,11600,22750,109750,21000,25100,2000,11600,1233.33,11583.34,15100,350000,715600,285083.34,700000,26250,1233.33,2000,166383.34,216808.38,110916.68,61250,62750,17950,737200,128000,151666.68,87583.34,95333.34,158608.33,31583.34,26250,44916.68,3500,6250,26100,58208.33,32250,106583.33,39850,11600,39250,26250,18242.8,36576.14,6250,135650,72375,41583.34,92916.68,18600,3000,3000
)
parks.points$cost <- cost
# Loading the code violation dataset and filtering it to the public parking violations
code.points <- read.csv("Code_Enforcement_Cases.csv")
code.points <- dplyr::filter(code.points, Case_Type_Code_Description == "VEHICLE-PUBLIC")
# Compute A 2D Binned Kernel Density Estimate
kde <- bkde2D(code.points[, 13:14],
bandwidth=c(.0045, .0068), gridsize = c(1000,1000))
# Create Raster from Kernel Density output
KernelDensityRaster <- raster(list(x=kde$x1, y=kde$x2, z = kde$fhat))
# Create pal function for coloring the raster
KernelDensityRaster@data@values[which(KernelDensityRaster@data@values < 1)] <- NA
palRaster <- colorBin("Spectral", bins = 5, domain = KernelDensityRaster@data@values, na.color = "transparent")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Team 2 Final Project"),
h4("Park Management Dashboard"),
tabsetPanel(
tabPanel("Rodrigo",
sidebarLayout(
sidebarPanel(width = 3,
selectInput(inputId = "department",
label = "Choose department to focus on",
choices = sort(unique(phone_call_log$Department)),
selected = min(sort(phone_call_log$Department))
),
sliderInput(inputId = "duration",
label = "Duration of call (in seconds):",
min = 0,
max = max(phone_call_log$duration_Seconds,na.rm = TRUE),
value = c(0,max(phone_call_log$duration_Seconds,na.rm = TRUE))
),
dateRangeInput(inputId = "dates_selected",
label = "Choose a desired date range",
start = min(phone_call_log$Date),
end = max(phone_call_log$Date),
min = min(phone_call_log$Date),
max = max(phone_call_log$Date)
),
textOutput("error_message"),
tags$head(tags$style("#error_message{color: red;font-size: 20px;}")
)
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel(title = "Graphical Form",
fluidRow(
column(6,
h4("Graph below shows the number of times people call for a particular problem in selected department"),
plotOutput("called_about_plot")
),
column(6,
h4("Graph below shows the proportion of calls at a given duration"),
plotOutput(outputId = "duration_plot"))
)
),
tabPanel(title = "Tabular Form",
DT::dataTableOutput("phone_data")
)
)
)
)
),
tabPanel("Raj",
sidebarLayout(
sidebarPanel(
selectInput(inputId = "park_type", "Type of Park:",
choices=unique(parks.points$Park_Type)),
helpText("Data: Parks_Locations_and_Features")
),
# Show a plot of the Parks in the Map
mainPanel(
h3("Parks in South Bend, IN"),
leafletOutput("ParkMap1")
)
)
),
tabPanel("Evan",
sidebarLayout(
sidebarPanel(
sliderInput("slider2", "Cost per Year (Bubble Size = Cost)",
min = min(parks.points$cost), max = max(parks.points$cost), value = c(1, 12))
),
mainPanel(
h3("Park Maintenance Cost in the South Bend, IN"),
leafletOutput("ParkMap2")
)
)
),
tabPanel("Charle",
mainPanel(
h3("Parking Violations Density in South Bend"),
leafletOutput("map")
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
######
#Rodrigo Section:
#This recative section will filter the phone log depending on the values on the inputs in the sidebar
filtered_phone_log <- reactive({
phone_call_log %>%
filter(duration_Seconds >= input$duration[1] & duration_Seconds <= input$duration[2],
Date >= input$dates_selected[1] & Date <= input$dates_selected[2],
Department == input$department)
})
#This is the data table of the filtered phone log to display on the second tab.
output$phone_data <- DT::renderDataTable(
DT::datatable(filtered_phone_log(),
colnames = c("Called About" = 'Called_About', "Duration Minutes" = 'Duration_Minutes',
"Duration Seconds" = 'duration_Seconds')
)
)
#This is will render a bar plot with a count of the distinct things people called about.
output$called_about_plot <- renderPlot(
ggplot(data = filtered_phone_log(), aes (x = Called_About, fill = Called_About)) +
geom_bar() +
labs(x = "What People Called About", y = "Count") +
coord_flip() +
guides(fill = "none") +
theme_classic()
)
#This is the data used for the density plot. This is the same as the filtered phone log, except that the calls
#in the top 99 percentile duration_Second are filtered out into to have a prettier graph and less outliers.
data_for_plot <- reactive({
filtered_phone_log() %>%
filter(quantile(duration_Seconds,.99,na.rm = T) > duration_Seconds)
})
#Renders the density plot for duration seconds.
output$duration_plot <- renderPlot(
ggplot(data = data_for_plot(), aes(x = duration_Seconds)) +
geom_density(fill="blue") +
labs(x = "Duration (seconds)", y = "Density",
title = paste("Median duration: ", round(median(data_for_plot()$duration_Seconds,na.rm = TRUE)), " seconds"),
caption = "Only the shortest 99% of calls were plotted") +
theme_classic() +
geom_vline(aes(xintercept=median(duration_Seconds,na.rm = TRUE)), #plots the median line
color="orange", linetype="dashed", size=2)
)
#Outputs and error message in case there is no data for the given parameters.
output$error_message <- renderText(
if(nrow(data_for_plot()) == 0){
"There is no data that meets these parameters.\n Please change the input(s)"
}
else{
""
}
)
#######
#Raj Section:
selectedType <- reactive({
parks.points[parks.points$Park_Type == input$park_type,]
})
output$ParkMap1 <- renderLeaflet({
leaflet(options = leafletOptions()) %>%
addTiles() %>%
addMarkers(selectedType()$Lon, selectedType()$Lat, popup = selectedType()$Park_Name)
})
######
#Evan section:
filtered_data <- reactive({
parks.points[parks.points$cost<=input$slider2[2]
& parks.points$cost>=input$slider2[1],]
})
output$ParkMap2 <- renderLeaflet({
leaflet(data=filtered_data()) %>%
addTiles() %>%
addCircleMarkers(stroke = FALSE,color = "red", fillOpacity = 0.5, radius=(filtered_data()$cost+10000)/10000, popup = paste(filtered_data()$Park_Name,"<br>","$",filtered_data()$cost,"<br>","Type:",filtered_data()$Park_Type
))
})
#Charle Section
#####
output$map <- renderLeaflet({
leaflet(code.points) %>%
addTiles() %>%
fitBounds(~min(Lon), ~min(Lat), ~max(Lon), ~max(Lat))%>%
addRasterImage(KernelDensityRaster,
colors = palRaster,
opacity = .5) %>%
addLegend(pal = palRaster,
values = KernelDensityRaster@data@values,
title = "Density of Violations",
position = "bottomright")
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
9a2c4a8e6ab0225228b2248f85492b7a172d032e
|
f61678dc8d0291069ef81772918c4dad91a6e7e5
|
/Data/Final dataset with variable types.R
|
d635d27790d162a7548671be86f6eef3777c837c
|
[] |
no_license
|
bowes-chris/footballplaycaller
|
10ee31852787ec5b2b434933c6da3cfe671356f3
|
8a6d0b4ff389e9a8b9a156a5719f07110570c158
|
refs/heads/master
| 2021-05-08T08:36:10.408908
| 2017-12-08T22:31:10
| 2017-12-08T22:31:10
| 107,051,050
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,073
|
r
|
Final dataset with variable types.R
|
############################################################################################################
############ This file is to create a final dataset to create all models from.
############ The first 20 variables are known-before-play variables.
############ Variables 21-25 variables are options variables.
############ Variables 26-36 variables are results variables.
############################################################################################################
# Bring in data
nfl <- read.csv("nfl0917.csv")
nfl <- nfl[,-1]
# Create vector of all features in the nfl play-by-ply dataset
features <- colnames(nfl)
features <- as.data.frame(features )
# Create vector of features known before the start of the play
pre.snap <- rbind(1,1,1,1,1,1,
1,1,1,1,1,1,
0,1,0,1,1,0,
0,0,0,0,0,0,
0,0,0,1,0,0,
0,0,0,0,0,0,
0,0,0,0,0,0,
0,0,0,0,0,0,
0,0,0,0,0,0,
0,0,0,1,1,1,
1)
# Create vector of options features
options.features <- rbind(0,0,0,0,0,0,
0,0,0,0,0,0,
0,0,0,0,0,0,
0,0,0,0,0,0,
0,0,1,0,0,0,
0,1,0,0,0,0,
1,1,0,0,0,0,
0,0,0,0,0,0,
0,0,0,0,0,0,
0,0,0,0,0,0,
0)
# Create vector of results features
resp.features <- rbind(0,0,0,0,0,0,
0,0,0,0,0,0,
1,0,1,0,0,0,
0,1,1,1,0,0,
0,0,0,0,1,1,
1,0,1,0,0,1,
0,0,0,1,0,0,
0,0,0,0,0,0,
0,0,0,0,0,0,
0,0,0,0,0,0,
0)
# Call the needed variables
pre.snap <- ifelse(pre.snap == 1, TRUE, FALSE)
options.features <- ifelse(options.features == 1, TRUE, FALSE)
resp.features <- ifelse(resp.features == 1, TRUE, FALSE)
# Create df of kbp features
ps.data <- nfl[,pre.snap]
ps.data <- ps.data[-1,]
# Create df of options features
opt.data <- nfl[,options.features]
opt.data <- opt.data[-1,]
opt.data$PlayType2 <- ifelse(responses$PassAttempt == 1 & opt.data$PlayType != "No Play" , paste(opt.data$PlayType, opt.data$PassLocation),
ifelse(responses$RushAttempt == 1 & opt.data$PlayType != "No Play" & opt.data$RunLocation == "middle", paste(opt.data$PlayType, opt.data$RunLocation),
ifelse(responses$RushAttempt == 1 & opt.data$PlayType != "No Play" & opt.data$RunLocation != "middle", paste(opt.data$PlayType, opt.data$RunLocation, opt.data$RunGap),"")))
# Create df of results features
responses <- nfl[,resp.features]
responses <- responses[-1,]
# Creat csv to work off
nfl.final <- cbind(ps.data,opt.data,responses)
write.csv(nfl.final, file = "nfl.final.csv")
|
90fc680b2e65aac93bbb6c284889f5170c99cedb
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/7832_0/rinput.R
|
d885fe71b9929675aea2d6c318d0d64571db93a2
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("7832_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7832_0_unrooted.txt")
|
aeb30bd8ff2d4519ec942dc3a03863baf7ea236a
|
3eea0347be8d20bf6cdfeb102668e0af0f20ede2
|
/RunModelFinal.R
|
e2fd348d8e16c9b9a4217cd7f9502dc44e2a874f
|
[] |
no_license
|
jninanya/YieldPrediction
|
68842382a17f6ab9eee68980abb3fe3646631952
|
a6381c67b6de61fac070d2b8bb82601e192e77dd
|
refs/heads/main
| 2023-02-26T01:30:07.358413
| 2021-02-03T14:44:24
| 2021-02-03T14:44:24
| 335,436,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,358
|
r
|
RunModelFinal.R
|
library(lubridate)
dat = read.csv("CropParametersFinal.csv")
path = "C:/Users/jninanya/OneDrive - CGIAR/Desktop/Taller - CIP_LAC/SolanumR/MeteorologicalData/"
FileName = paste0(path, "RCP85Data_2091-2100.csv")
yyy = c(2091:2100)
mtx1 = matrix(nrow = length(yyy), ncol = 4)
mtx2 = matrix(nrow = length(yyy), ncol = 4)
mtx3 = matrix(nrow = length(yyy), ncol = 4)
mtx3 = as.data.frame(mtx3)
for(ik in 1:4){
dfr = dat[ik, ]
for(ij in 1:length(yyy)){
sowing = as.character(paste0(yyy[ij], substr(dfr$sowing, 5, 10)))
harvest = as.character(as.Date(sowing) + dfr$harvest)
EDay = dfr$Eday
plantDensity = dfr$p_density
wmax = dfr$wmax
tm = dfr$tm
te = dfr$te
A_0 = dfr$A
Tu_0 = dfr$tu
b = dfr$b
DMcont = dfr$dmc
RUE = dfr$rue
Tb_0 = 4
To_0 = 15
Tu_1 = 28
Pc = 12
w = 0.5
filename = FileName
source("Module_PotentialGrowth_V2.0.R")
mtx1[ij, ik] = fty[dfr$harvest + 1]
mtx2[ij, ik] = sum(climate$Rad)
mtx3[ij, ik] = as.character(climate$Date[1])
}
}
mtx = data.frame(mtx1)
colnames(mtx) = dat$Variety
rownames(mtx) = yyy
write.csv(mtx, "output_RCP85Data_2091-2100.csv")
|
d6577815878577c018ae1e49d482b598e52a38e1
|
be01433ef1c49d13046933eaf1a7ea1705cd7e5a
|
/LogisticRegression.R
|
7c497755f9d88314b23a557d76846b88f3fb645a
|
[
"MIT"
] |
permissive
|
sjkim0716/Logistic-Regression
|
9360617383f1879687537b9de6d9bff0fd922b67
|
c41d99e9046df50d0575fe82cb1cdd77c3ed64d7
|
refs/heads/master
| 2021-06-14T19:59:15.055731
| 2017-03-14T19:31:48
| 2017-03-14T19:31:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,977
|
r
|
LogisticRegression.R
|
library(car) ## needed to recode variables
set.seed(1)
## read and print the data
delays_flight <- read.csv("~/FlightDelays.csv")
delays_flight[1:3,]
## define hours of departure
delays_flight$sched=factor(floor(delays_flight$schedtime/100))
table(delays_flight$sched)
table(delays_flight$carrier)
table(delays_flight$dest)
table(delays_flight$origin)
table(delays_flight$weather)
table(delays_flight$dayweek)
table(delays_flight$daymonth)
table(delays_flight$delay)
delays_flight$delay=recode(delays_flight$delay,"'delayed'=1;else=0")
delays_flight$delay=as.numeric(levels(delays_flight$delay)[delays_flight$delay])
table(delays_flight$delay)
## Delay: 1=Monday; 2=Tuesday; 3=Wednesday; 4=Thursday;
## 5=Friday; 6=Saturday; 7=Sunday
## 7=Sunday and 1=Monday coded as 1
delays_flight$dayweek=recode(delays_flight$dayweek,"c(1,7)=1;else=0")
table(delays_flight$dayweek)
## omit unused variables
delays_flight=delays_flight[,c(-1,-3,-5,-6,-7,-11,-12)]
delays_flight[1:3,]
n=length(delays_flight$delay)
n
n1=floor(n*(0.6))
n1
n2=n-n1
n2
train=sample(1:n,n1)
## estimation of the logistic regression model
## explanatory variables: carrier, destination, origin, weather, day of week
## (weekday/weekend), scheduled hour of departure
## create design matrix; indicators for categorical variables (factors)
Xdelays_flight <- model.matrix(delay~.,data=delays_flight)[,-1]
Xdelays_flight[1:3,]
xtrain <- Xdelays_flight[train,]
xnew <- Xdelays_flight[-train,]
ytrain <- delays_flight$delay[train]
ynew <- delays_flight$delay[-train]
m1=glm(delay~.,family=binomial,data=data.frame(delay=ytrain,xtrain))
summary(m1)
## prediction: predicted default probabilities for cases in test set
ptest <- predict(m1,newdata=data.frame(xnew),type="response")
data.frame(ynew,ptest)[1:10,]
## first column in list represents the case number of the test element
plot(ynew~ptest)
26
## coding as 1 if probability 0.5 or larger
gg1=floor(ptest+0.5) ## floor function; see help command
ttt=table(ynew,gg1)
ttt
error=(ttt[1,2]+ttt[2,1])/n2
error
## coding as 1 if probability 0.3 or larger
gg2=floor(ptest+0.7)
ttt=table(ynew,gg2)
ttt
error=(ttt[1,2]+ttt[2,1])/n2
error
bb=cbind(ptest,ynew)
bb
bb1=bb[order(ptest,decreasing=TRUE),]
bb1
## order cases in test set according to their success prob
## actual outcome shown next to it
## overall success (delay) prob in the evaluation data set
xbar=mean(ynew)
xbar
## calculating the lift
## cumulative 1's sorted by predicted values
## cumulative 1's using the average success prob from evaluation set
axis=dim(n2)
ax=dim(n2)
ay=dim(n2)
axis[1]=1
ax[1]=xbar
ay[1]=bb1[1,2]
for (i in 2:n2) {
axis[i]=i
ax[i]=xbar*i
ay[i]=ay[i-1]+bb1[i,2]
}
aaa=cbind(bb1[,1],bb1[,2],ay,ax)
aaa[1:100,]
plot(axis,ay,xlab="number of cases",ylab="number of successes",main="Lift:
Cum successes sorted by pred val/success prob")
points(axis,ax,type="l")
|
60ab53e400ddec09cb862d0f40bfab946cf19971
|
8dd0d70f4a9afeabdac926ffe6ab101c7196c8a6
|
/R/seaMass.R
|
34bd7ee923fff97b9b26b4ac58cf054ea5d58a2e
|
[
"Apache-2.0"
] |
permissive
|
alecksphillips/seaMass
|
664ca1172be2e869e43710359617193ab66dfaa5
|
59d4735142681ad775eb59b4efb94a51ebad3739
|
refs/heads/master
| 2020-11-24T00:52:38.020717
| 2020-09-30T15:24:29
| 2020-09-30T15:24:29
| 227,891,326
| 0
| 0
|
Apache-2.0
| 2019-12-13T17:33:46
| 2019-12-13T17:33:45
| null |
UTF-8
|
R
| false
| false
| 12,072
|
r
|
seaMass.R
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage(paste0("seaMass v", packageVersion("seaMass"), " | © 2019-2020 BIOSP", utf8::utf8_encode("\U0001f441"), " Laboratory"))
packageStartupMessage("This program comes with ABSOLUTELY NO WARRANTY.")
packageStartupMessage("This is free software, and you are welcome to redistribute it under certain conditions.")
}
#' seaMass object
#'
#' Methods shared between \link{seaMass_sigma}, \link{sigma_block} and \link{seaMass_delta}
setClass("seaMass", contains = "VIRTUAL")
#' @import data.table
#' @export
#' @include generics.R
setMethod("read_samples", "seaMass", function(object, input, type, items = NULL, chains = 1:control(object)@model.nchain, summary = NULL, summary.func = "robust_normal", as.data.table = FALSE) {
if (is.null(summary) || summary == F) summary <- NULL
if (!is.null(summary)) {
summary <- ifelse(summary == T, paste0("dist_samples_", summary.func), paste0("dist_samples_", summary))
filename <- file.path(filepath(object), input, paste(summary, type, "fst", sep = "."))
}
if (!is.null(summary) && file.exists(filename)) {
# load and filter from cache
DT <- fst::read.fst(filename, as.data.table = T)
if (!is.null(blocks(object))) {
DT[, Block := factor(name(object), levels = names(blocks(object)))]
setcolorder(DT, "Block")
}
if (is.data.frame(items)) {
DT <- merge(DT, items, by = colnames(items), sort = F)
}
else if (!is.null(items)) {
DT <- DT[get(colnames(DT)[2]) %in% items]
}
} else {
# load and filter index
filename.index <- file.path(filepath(object), input, paste(type, "index.fst", sep = "."))
if (!file.exists(filename.index)) return(NULL)
DT.index <- fst::read.fst(filename.index, as.data.table = T)
if (!is.null(blocks(object))) {
DT.index[, Block := factor(name(object), levels = names(blocks(object)))]
setcolorder(DT.index, "Block")
}
if (is.data.frame(items)) {
DT.index <- merge(DT.index, items, by = colnames(items), sort = F)
} else if (!is.null(items)) {
DT.index <- DT.index[get(setdiff(colnames(DT.index), "Block")[1]) %in% items]
}
DT.index <- DT.index[complete.cases(DT.index)]
if (nrow(DT.index) == 0) return(NULL)
setkey(DT.index, file, from)
# batch
ctrl <- control(object)
summary.cols <- colnames(DT.index)[1:(which(colnames(DT.index) == "file") - 1)]
DTs.index <- copy(DT.index)
for (col in colnames(DTs.index)[1:(which(colnames(DTs.index) == "file") - 1)]) DTs.index[, (col) := as.integer(get(col))]
if (is.null(summary)) {
DTs.index <- list(DTs.index)
} else {
DTs.index <- batch_split(DTs.index, summary.cols, 16 * ctrl@nthread, keep.by = F)
}
fp <- filepath(object)
DT <- rbindlist(parallel_lapply(DTs.index, function(item, fp, input, chains, summary, summary.cols) {
# minimise file access
DT0.index <- copy(item)
item[, file.prev := shift(file, fill = "")]
item[, to.prev := shift(to + 1, fill = 0)]
item[, file.next := shift(file, fill = "", -1)]
item[, from.next := shift(from - 1, fill = 0, -1)]
item <- cbind(
item[!(file == file.prev & from == to.prev), .(file, from)],
item[!(file == file.next & to == from.next), .(to)]
)
# read
return(rbindlist(lapply(1:nrow(item), function(i) {
DT0 <- rbindlist(lapply(chains, function(chain) {
DT0 <- NULL
filename <- as.character(item[i, file])
try({
DT0 <- fst::read.fst(
file.path(fp, input, dirname(filename), sub("^([0-9]+)", chain, basename(filename))),
from = item[i, from],
to = item[i, to],
as.data.table = T
)}, silent = T)
return(DT0)
}))
if (!is.null(blocks(object))) {
DT0[, Block := as.integer(factor(name(object), levels = names(blocks(object))))]
setcolorder(DT0, "Block")
}
# optional summarise
if (!is.null(summary) && nrow(DT0) > 0) DT0 <- DT0[, do.call(summary, list(chain = chain, sample = sample, value = value)), by = summary.cols]
DT0 <- merge(DT0, DT0.index[, !c("file", "from", "to")], by = summary.cols, sort = F)
return(DT0)
})))
}, nthread = ifelse(length(items) == 1, 1, ctrl@nthread)))
for (col in summary.cols) DT[, (col) := factor(get(col), levels = 1:nlevels(DT.index[, get(col)]), labels = levels(DT.index[, get(col)]))]
# cache results
if (!is.null(summary) && is.null(items) && identical(chains, 1:ctrl@model.nchain)) {
fst::write.fst(DT, filename)
}
}
if (!as.data.table) setDF(DT)
else DT[]
return(DT)
})
#' @import data.table
#' @export
#' @include generics.R
setMethod("plot_samples", "seaMass", function(object, input, type, items = NULL, sort.cols = NULL, label.cols = NULL, value.label = "value", horizontal = TRUE, colour = NULL, colour.guide = NULL, fill = NULL, fill.guide = NULL, file = NULL, value.length = 120, level.length = 5, transform.func = NULL) {
# read samples
DT <- read_samples(object, input, type, items, as.data.table = T)
if (is.null(DT) || nrow(DT) == 0) {
if (!is.null(file)) ggplot2::ggsave(file, NULL, width = 10, height = 10)
g <- NULL
} else {
if (!is.null(transform.func)) DT$value <- transform.func(DT$value)
summary.cols <- colnames(DT)[1:(which(colnames(DT) == "chain") - 1)]
if (is.null(label.cols)) label.cols <- summary.cols
# metadata for each column level
DT1 <- DT[, (as.list(quantile(value, probs = c(0.025, 0.17, 0.5, 0.83, 0.975), na.rm = T))), by = summary.cols]
colnames(DT1)[(ncol(DT1) - 4):ncol(DT1)] <- c("q025", "q17", "value", "q83", "q975")
if ("Group" %in% summary.cols) DT1 <- merge(DT1, groups(object, as.data.table = T), sort = F, by = "Group", suffixes = c("", ".G"))
if ("Group" %in% summary.cols && "Component" %in% summary.cols) DT1 <- merge(DT1, components(object, as.data.table = T), sort = F, by = c("Group", "Component"), suffixes = c("", ".C"))
if ("Group" %in% summary.cols && "Component" %in% summary.cols && "Measurement" %in% summary.cols) DT1 <- merge(DT1, measurements(object, as.data.table = T), sort = F, by = c("Group", "Component", "Measurement"), suffixes = c("", ".M"))
if ("Block" %in% summary.cols && "Assay" %in% summary.cols) DT1 <- merge(DT1, assay_design(object, as.data.table = T), sort = F, by = c("Block", "Assay"), suffixes = c("", ".AD"))
if ("Group" %in% summary.cols && "Assay" %in% summary.cols) DT1 <- merge(DT1, assay_groups(object, as.data.table = T), sort = F, by = c("Group", "Assay"), suffixes = c("", ".AG"))
if ("Group" %in% summary.cols && "Component" %in% summary.cols && "Assay" %in% summary.cols) DT1 <- merge(DT1, assay_components(object, as.data.table = T), sort = F, by = c("Group", "Component", "Assay"), suffixes = c("", ".AC"))
if (!is.null(colour) && (!(colour %in% colnames(DT1)) || all(is.na(DT1[, get(colour)])))) colour <- NULL
if (!is.null(fill) && (!(fill %in% colnames(DT1)) || all(is.na(DT1[, get(fill)])))) fill <- NULL
# sort order
if (!is.null(sort.cols)) setorderv(DT1, sort.cols, na.last = T)
DT1[, Element := Reduce(function(...) paste(..., sep = " : "), .SD[, mget(label.cols)])]
if (horizontal) {
DT1[, Element := factor(Element, levels = rev(unique(Element)))]
} else {
DT1[, Element := factor(Element, levels = unique(Element))]
}
DT1[, min := as.numeric(Element) - 0.5]
DT1[, max := as.numeric(Element) + 0.5]
# truncate densities to 95%
DT <- merge(DT, DT1[, unique(c("Element", summary.cols, colour, fill, "q025", "q975")), with = F], by = summary.cols)
DT <- DT[value > q025 & value < q975]
DT[, q025 := NULL]
DT[, q975 := NULL]
# plot!
if (horizontal) {
g <- ggplot2::ggplot(DT, ggplot2::aes(x = value, y = Element))
g <- g + ggplot2::geom_vline(xintercept = 0, colour = "grey")
if (is.null(colour)) {
g <- g + ggdist::stat_slab(side = "both", size = 0.25, colour = "black")
} else {
g <- g + ggdist::stat_slab(ggplot2::aes_string(colour = colour), side = "both", size = 0.25)
}
g <- g + ggdist::geom_pointinterval(ggplot2::aes_string(x = "value", xmin = "q17", xmax = "q83", colour = colour), DT1, interval_size = 2, point_size = 1)
g <- g + ggplot2::guides(colour = colour.guide, fill = fill.guide)
if (!is.null(fill)) g <- g + ggplot2::geom_rect(ggplot2::aes_string(ymin = "min", ymax = "max", fill = fill), DT1, xmin = -Inf, xmax = Inf, alpha = 0.2, colour = NA)
g <- g + ggplot2::xlab(paste("log2", value.label))
g <- g + ggplot2::coord_cartesian(xlim = c(min(DT1$q025), max(DT1$q975)))
g <- g + ggplot2::theme(legend.position = "top", axis.title.y = ggplot2::element_blank())
if (!is.null(file)) {
gt <- egg::set_panel_size(g, width = grid::unit(value.length, "mm"), height = grid::unit(level.length * nlevels(DT1$Element), "mm"))
ggplot2::ggsave(file, gt, width = 10 + sum(as.numeric(grid::convertUnit(gt$widths, "mm"))), height = 10 + sum(as.numeric(grid::convertUnit(gt$heights, "mm"))), units = "mm", limitsize = F)
}
} else {
g <- ggplot2::ggplot(DT, ggplot2::aes(x = Element, y = value))
g <- g + ggplot2::geom_hline(yintercept = 0, colour = "grey")
if (is.null(colour)) {
g <- g + ggdist::stat_slab(side = "both", size = 0.25, colour = "black")
} else {
g <- g + ggdist::stat_slab(ggplot2::aes_string(colour = colour), side = "both", size = 0.25)
}
g <- g + ggdist::geom_pointinterval(ggplot2::aes_string(y = "value", ymin = "q17", ymax = "q83", colour = colour), DT1, interval_size = 2, point_size = 1)
g <- g + ggplot2::guides(colour = colour.guide, fill = fill.guide)
if (!is.null(fill)) g <- g + ggplot2::geom_rect(ggplot2::aes_string(xmin = "min", xmax = "max", fill = fill), DT1, ymin = -Inf, ymax = Inf, alpha = 0.2, colour = NA)
g <- g + ggplot2::ylab(paste("log2", value.label))
g <- g + ggplot2::coord_cartesian(ylim = c(min(DT1$q025), max(DT1$q975)))
g <- g + ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5, hjust = 1), legend.position = "left", axis.title.x = ggplot2::element_blank())
if (!is.null(file)) {
gt <- egg::set_panel_size(g, height = grid::unit(value.length, "mm"), width = grid::unit(level.length * nlevels(DT1$Element), "mm"))
ggplot2::ggsave(file, gt, width = 10 + sum(as.numeric(grid::convertUnit(gt$widths, "mm"))), height = 10 + sum(as.numeric(grid::convertUnit(gt$heights, "mm"))), units = "mm", limitsize = F)
}
}
}
return(g)
})
# ensure all items in plot for all blocks
# if (is.null(items)) items <- unique(DT$Element)
# if (block.drop || uniqueN(DT$Block) == 1) {
# DT <- merge(data.table(Element = factor(items, levels = items)), DT, all.x = T, sort = F, by = "Element")
# } else {
# if (block.sort) {
# DT <- merge(CJ(Block = levels(DT$Block), Element = factor(items, levels = items)), DT, all.x = T, sort = F, by = c("Block", "Element"))
# } else {
# DT <- merge(CJ(Element = factor(items, levels = items), Block = levels(DT$Block)), DT, all.x = T, sort = F, by = c("Block", "Element"))
# }
# }
# DT[, Element := paste0(Element, " [", Block, "]")]
# if (horizontal) {
# DT[, Element := factor(Element, levels = rev(unique(Element)))]
# } else {
# DT[, Element := factor(Element, levels = unique(Element))]
# }
# metadata for each column level
# DT1 <- DT[, (as.list(quantile(value, probs = c(0.025, 0.5, 0.975), na.rm = T))), by = Element]
# DT1[, min := as.numeric(Element) - 0.5]
# DT1[, max := as.numeric(Element) + 0.5]
# DT1 <- merge(DT1, DT[, .SD[1], by = Element], by = "Element")
#' @import data.table
#' @export
#' @include generics.R
setMethod("finish", "seaMass", function(object) {
# reserved for future use
cat(paste0("[", Sys.time(), "] seaMass finished!\n"))
return(invisible(NULL))
})
|
69650b32c9516a463616621d106e337849645399
|
7a116ffcce3c2772c5eaf5ba7e110c8f42fc05e8
|
/AV-BLFixedETF.R
|
e52d343fc17019bd70ac204a009a35adc71bd9d5
|
[] |
no_license
|
Archieus/AVBatchQuotes
|
92705cbd76c8449515ead73d4e565945bd4d4272
|
24761faf4e3616dc1ea3366adbe0f34d4b4c1e9f
|
refs/heads/master
| 2021-05-08T19:35:30.579165
| 2018-06-28T17:32:09
| 2018-06-28T17:32:09
| 119,574,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,533
|
r
|
AV-BLFixedETF.R
|
library(PerformanceAnalytics)
library(PortfolioAnalytics)
library(fPortfolio)
library(quantmod)
library(BLCOP)
Sys.setenv(TZ = "EST5EDT")
#### reads/loads CSV into R ####
symblist <- t(read.csv("blfixed.csv", header = FALSE))
quotes <- new.env() # create new enviroment for data to be loaded to
#### CREATE COMPONENTS FOR API CALL ####
apikey <- "&outputsize=full&apikey=Y474&datatype=csv"
URLbase <- "http://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol="
cu <-NULL
ru <-NULL
### Loop to download data for all symbols in list as it's own object ###
dataEnv <- new.env()
for(i in 1:length(symblist)){
cu[i] <- paste0(URLbase, symblist[i])
ru[i] <- paste0(cu[i],apikey)
assign(paste(symblist[i]), read.csv(ru[i]), env = dataEnv)
}
### IDENTIFY THE SYMBOL WITH THE LEAST NUMBER OF ROWS IN ENVIRONMENT ###
RowCount <- matrix(0, ncol = ncol(symblist), nrow = 1)
for(i in 1:length(symblist)) {
RowCount[,i] <- cbind(nrow(get(symblist[,i], envir = dataEnv)))
}
RowMin <- min(RowCount)
#### SET NEW ENVIRONMENT FOR ADJUSTED CLOSE DATA ####
qmodEnv <- new.env()
for(i in 1:length(symblist)) {
assign(symblist[i], head(cbind(get(symblist[,i], envir = dataEnv)$adjusted_close), RowMin), envir = qmodEnv)
}
#### Create a Matrix of Adjusted Close Values ####
AC <- matrix(0, ncol = ncol(symblist), nrow = RowMin)
for(i in 1:length(symblist)){
AC[,i] <- cbind(get(symblist[,i], envir = qmodEnv))
}
Dates <- get(symblist[1], envir = dataEnv)$timestamp
df <- matrix(unlist(Dates))
AC.df <- as.data.frame(AC)
colnames(AC.df) <- symblist
row.names(AC.df) <- head(df,RowMin)
#### Create the XTS Object to be used for analysis ####
AC.zoo <- as.zoo(AC.df)
DailyClose <- as.xts(AC.zoo)
FIRet <- na.omit(Return.calculate(DailyClose))
###Convert to Monthly Data
monthend <- endpoints(FIRet, on = "months", k = 1)
Ret.mo <- FIRet[monthend]
BLroll <- rollingWindows(as.timeSeries(Ret.mo), "12m", "1m")
FIETFCAPMbeta <- CAPM.beta(Ret.mo[,c(1,3:13)]['last(BLroll$from):/last(BLroll$end)'],
Ret.mo$BNDX['last(BLroll$from):/last(BLroll$end)'], .03/12)
#Apply Weigths to Returns of Individual Assets
FIETFEqLib <- Return.portfolio(Ret.mo[,c(1,3:13)]['last(BLroll$from):/last(BLroll$end)'],
weights = as.numeric(FIETFCAPMbeta))
#Calculate Excess Returns over Eq Lib Port
FIETFExcRet <- Return.excess(Ret.mo[,c(1,3:13)]['last(BLroll$from):/last(BLroll$end)'], FIETFEqLib)
#Create Cov Matrix of Excess Returns
CovMat <- cov.mve(FIETFExcRet)$cov
nameList <- names(Ret.mo[,c(1,3:13)])
colnames(CovMat) <- nameList
##myPosterior Data##
priorMeans <- rep(0,12) #set means to 0 for the twelve assets
###Create a "pick" matrix. Connects assets with a specific "view"
Pick <- matrix(0, ncol = ncol(CovMat), nrow = 6, dimnames = list(NULL,as.list(colnames(nameList))))
###Create a Vector Q which contains information on the Excess Return for the corresponding "view"
#QVect <- c(.02,.02,0,0,0,0)
QVect <- c(0,0,0,0,0,0)
##Fill Matrix with Picks on over and underperformance (Relative or Absolute)###
##Relative views in a row must net to zero (0).
##Absolute view in a row must add upt to one (1).
##Fill row1, col7 ==> pick[1,7]
#Pick[1,12] <- -1
#Pick[1,1] <- 1
#Pick[2,2] <- -1
#Pick[2,7] <- 1
# tau = scalar = set to as close to zero as possible (in practice) .025?
##Calculate the confidence of the Views (recipricols of the Variances of View Portfolios * tau)
#ViewConf <- c(70,70,.01,.01,.01,.01) #Between .01(No confidence) and 100(High confidence)
ViewConf <- c(.01,.01,.01,.01,.01,.01)
Views <- BLViews(Pick, QVect, confidences = ViewConf, assetNames = colnames(CovMat))
###Generate "posterior" estimates using "prior" inputs and Investors Views and confidences
CAPMPosterior <- posteriorEst(Views, mu = priorMeans, tau = 0.025, sigma = CovMat)
###Optimize with Constraints to Max Weight###
cvarSpec <- portfolioSpec(
model = list(type = "CVaR", optimize = "minRisk",
estimator = "covEstimator", tailRisk = list(),
params = list(alpha = 0.05)),
portfolio = list(weights = NULL,
targetReturn = NULL, targetRisk = NULL,
riskFreeRate = 0, nFrontierPoints = 50,
status = 0),
optim = list(solver = "solveRglpk.CVAR", objective = NULL,
params = list(), control = list(), trace = FALSE))
optimalPortfolios(CAPMPosterior)
|
0200ce71ad13755061ea74374aa948536b4d11ec
|
25eea0b5f652efe51efab98ccde893f27342e4b3
|
/raw_script.R
|
a898808e92128332a713a20248477eaa49b4d3b2
|
[] |
no_license
|
AdemRamadani/RepData_PeerAssessment2
|
af44e0299f68877d37253d938a94036a177cf25c
|
051c6001d3479f203065407ef754c496d4ac20c2
|
refs/heads/master
| 2021-01-23T03:12:34.307178
| 2015-04-26T18:40:30
| 2015-04-26T18:40:30
| 34,625,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,791
|
r
|
raw_script.R
|
library(reshape2)
library(plyr)
library(ggplot2)
library(maps)
library(RColorBrewer)
normalize <- function(vect){
(vect-min(vect))/(max(vect)-min(vect))
}
#load data
data <- read.csv(bzfile("./data/repdata-data-StormData.csv.bz2"))
#select relevant columns
data <- bigdata[,c("STATE","EVTYPE","FATALITIES","INJURIES","PROPDMG","PROPDMGEXP","CROPDMG","CROPDMGEXP")]
#columns to lower case
colnames(data) <- tolower(colnames(data))
#damage and event exp to upper case
data$propdmgexp <- factor(toupper(data$propdmgexp))
data$cropdmgexp <- factor(toupper(data$cropdmgexp))
data$evtype <- factor(toupper(data$evtype))
#check exp values
table(data$cropdmgexp)
table(data$propdmgexp)
#look for not valid values for cropdmgexp
mean(!(data$cropdmgexp %in% c("","0","H","K","M","B")))
data[!(data$cropdmgexp %in% c("","0","H","K","M","B")),]
#look for not valid for propdmgexp
mean(!(data$propdmgexp %in% c("","0","H","K","M","B")))
data[!(data$propdmgexp %in% c("","0","H","K","M","B")),]
#define valid multipliers
exponents <- data.frame(c("","0","H","K","M","B"),c(1,1,10^2,10^3,10^6,10^9))
colnames(exponents) <- c("validexp","multiplier")
#subset over valid exps
data <- subset(data, (cropdmgexp %in% exponents$validexp) & (propdmgexp %in% exponents$validexp))
#convert damage values in number
colnames(exponents) <- c("validexp","propdmgmultiplier")
data <- merge(data, exponents, by.x="propdmgexp", by.y="validexp")
data$propdmg <- (data$propdmg*data$propdmgmultiplier)
colnames(exponents) <- c("validexp","cropdmgmultiplier")
data <- merge(data, exponents, by.x="cropdmgexp", by.y="validexp")
data$cropdmg <- (data$cropdmg*data$cropdmgmultiplier)
#----------economic section
data$totalCost <- data$propdmg+data$cropdmg #define total cost
economicData <- subset(data, totalCost > 0) #select only events with cost > 0
#polish dataframe
economicData <- economicData[,c("state","evtype","totalCost","propdmg","cropdmg","latitude","longitude")]
economicData$evtype <- factor(economicData$evtype)
economicData$state <- factor(economicData$state)
#sum over states by type of event and arrange for decreasing costs
economicData <- dcast(economicData, state~evtype,fun.aggregate=sum,value.var="totalCost")
economicData <- melt(economicData, id="state")
economicData <- arrange(economicData, state, desc(value))
#select only max cost event
economicData <- split(economicData,economicData$state)
economicData <- lapply(economicData, function(x) x[1,])
economicData <- melt(economicData, id="state", id.vars="variable", measure.vars="value")
colnames(economicData) <- c("evtype","totalCost","state")
economicData$evtype <- factor(economicData$evtype)
economicData$state <- factor(economicData$state)
#----------health section
#define total health cost through pca
pca <- data[,c("fatalities","injuries")]
pca <- princomp(pca)
summary(pca)
data$totalHealthCost <- pca$scores[,1]
healthData <- subset(data, totalHealthCost > 0) #select only events with cost > 0
#polish dataframe
healthData <- healthData[,c("state","evtype","totalHealthCost","fatalities","injuries","latitude","longitude")]
healthData$evtype <- factor(healthData$evtype)
healthData$state <- factor(healthData$state)
#sum over states by type of event and arrange for decreasing costs
healthData <- dcast(healthData, state~evtype,fun.aggregate=sum,value.var="totalHealthCost")
healthData <- melt(healthData, id="state")
healthData <- arrange(healthData, state, desc(value))
#select only max cost event
healthData <- split(healthData,healthData$state)
healthData <- lapply(healthData, function(x) x[1,])
healthData <- melt(healthData, id="state", id.vars="variable", measure.vars="value")
colnames(healthData) <- c("evtype","totalHealthCost","state")
healthData$evtype <- factor(healthData$evtype)
healthData$state <- factor(healthData$state)
#map values for health cost
tmp <- numeric(0)
data(state.fips)
tmp <- data.frame(state.fips$abb,state.fips$polyname)
colnames(tmp) <- c("state","stateName")
healthData <- merge(healthData,tmp)
healthData$normCost <- normalize(log(healthData$totalHealthCost))
pal <- colorRamp(c("white","red"))
map("state", regions = healthData$stateName, lty = 1, lwd =1, boundary=TRUE, fill=TRUE, col=rgb(pal(healthData$normCost)/255))
title(main="Health Costs suffered by states")
#map values for economic cost
tmp <- numeric(0)
tmp <- data.frame(state.fips$abb,state.fips$polyname)
colnames(tmp) <- c("state","stateName")
economicData <- merge(economicData,tmp)
economicData$normCost <- normalize(log(economicData$totalCost))
pal <- colorRamp(c("white","green"))
map("state", regions = economicData$stateName, lty = 1, lwd =1, boundary=TRUE, fill=TRUE, col=rgb(pal(economicData$normCost)/255))
title(main="Economic Costs suffered by states")
legend
|
a6f22a72578b62170fcca646cf9233471bae0601
|
5f84c091215fd0dcffd015492f72ae2e373c89ce
|
/scripts/analyze reports - produce tables.R
|
abd4e7c6faa42dbf9d414e3e20ef96ef49289836
|
[] |
no_license
|
harell/Rare-Category-Exploitation
|
f0ffd28884923aa576eca0d32e997663d19891d1
|
3fcf8ac1d4bbb5aeb74723a17bd8a0719e6ce56c
|
refs/heads/master
| 2021-06-12T09:38:44.924128
| 2017-03-07T13:47:59
| 2017-03-07T13:47:59
| 54,336,455
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,352
|
r
|
analyze reports - produce tables.R
|
################################################################################
# Analyze Reports - Produce Tables
################################################################################
## Initialization
cat("\014"); rm(list = ls())
source("scripts/load_libraries.R")
invisible(sapply(list.files(pattern="[.]R$", path="./functions/", full.names=TRUE), source))
options(digits=3)
# 1. Choose x limits
Xlimits = c(0,48) # 48 = 4 years
# 2. Choose inducers
Chosen_inducers = c("SVM","GLM","Ensemble")
# 3. Choose policies
Chosen_policies = NA #c("(SVM) Random Instances","(SVM) Informativeness") # policies_metadata$names_new
# 3. Choose right boundary
x_max = 1200 #NA
################
# Get the data #
################
reports_folder = file.path(getwd(),"reports")
reports = import.reports(reports_folder)
reports = dplyr::arrange(reports, Policy, Repetition)
##################
# Pre-processing #
##################
## Change policies names
for(p in 1:nrow(policies_metadata))
{
original_name = policies_metadata[p,"names_original"]
new_name = policies_metadata[p,"names_new"]
reports[reports$Policy %in% original_name,"Policy"] = new_name
}# end changing policies names
################
# Create table #
################
params = unique(subset(reports,select=c("DATABASE_NAME","Policy","Repetition")))
params$AUCTM = NA
## Calculate AUC-TM
for(p in 1:nrow(params))
{
cat("\n AUCTM",p,"out ot",nrow(params))
report = subset(reports,
(DATABASE_NAME %in% params[p,"DATABASE_NAME"]) &
(Policy %in% params[p,"Policy"]) &
(Repetition %in% params[p,"Repetition"]),
select=c("Nl","Nl_minority","Nu_minority"))
params[p,"AUCTM"] = AUCTM(x=report[,"Nl"], y=report[,"Nl_minority"],
x_max=x_max, y_max=max(report$Nu_minority+report$Nl_minority))
}# end calculating AUCTM
## Calculate AUC-ROC
AUCROC_table = aggregate(AUC ~ DATABASE_NAME + Policy + Repetition,
subset(reports, Nl %in% x_max),
mean)
colnames(AUCROC_table) = c(colnames(AUCROC_table)[-ncol(AUCROC_table)],"AUCROC")
params = merge(params,AUCROC_table)
params = dplyr::arrange(params, DATABASE_NAME, Policy, Repetition)
# > head(params,3)
# DATABASE_NAME Policy Repetition AUCTM AUCROC
# 1 SATIMAGE (SVM) Informativeness 1 0.900 0.918
# 2 SATIMAGE (SVM) Informativeness 2 0.898 0.936
# 3 SATIMAGE (SVM) Informativeness 3 0.890 0.927
################
# Shrink table #
################
AUCTM = aggregate(AUCTM ~ DATABASE_NAME + Policy, params,
# function(x) c(quantile(x,0.025),mean=mean(x),quantile(x,0.975)))
function(x) c(quantile(x,probs=c(0.025,0.5,0.975))))
# DATABASE_NAME Policy AUCTM.2.5% AUCTM.50% AUCTM.97.5%
# 1 SATIMAGE (SVM) Informativeness 0.887 0.901 0.912
# 2 SATIMAGE (SVM) Random Instances 0.506 0.530 0.553
AUCROC = aggregate(AUCROC ~ DATABASE_NAME + Policy, params,
# function(x) c(quantile(x,0.025),mean=mean(x),quantile(x,0.975)))
function(x) c(quantile(x,probs=c(0.025,0.5,0.975))))
#####################
# Differences table #
#####################
# AUCTM_diff = reshape2::dcast(params[,c("DATABASE_NAME","Policy","Repetition","AUCTM")],
# Repetition ~ Policy)
# AUCTM_diff$diff = AUCTM_diff[,2]-AUCTM_diff[,3]
# plot(density(AUCTM_diff$diff), main="AUCTM diff")
#################
# Visualisation #
#################
# Box plot
par(mar=c(4,4,1,1), mfrow=c(1,2))
boxplot(AUCTM ~ Policy, data=params, main="AUC-TM Box plot")
boxplot(AUCROC ~ Policy, data=params, main="AUC-ROC Box plot")
par(mar=c(4,4,1,1), mfrow=c(1,1))
# Parallel coordinate plot
fig_title = paste(unique(params$DATABASE_NAME),"dataset")
fig <- ggplot(aes(x=Policy, y=AUCTM, group=Repetition, color=Policy), data=params) +
geom_path(show.legend=F, size=1) +
geom_point(show.legend=F, size=2) +
# Y axis
# scale_y_continuous(breaks = seq(0,1,0.1), limits = c(0,1)) +
# Plot attributes
ggtitle(fig_title) +
theme_bw()
# theme(axis.text.x=element_text(angle = -90, hjust = 0))
plot(fig)
|
178c2e7dd3a8e8aa71dc74db5be87e7b68a0f213
|
05c616dfe991d0009ab25e882a952ecd3e691b18
|
/plot1.R
|
1da13194f1ed10c3ee6ed6a8f1e1306103b3ce85
|
[] |
no_license
|
ncub8/ExData_Plotting
|
3c90a93b9176b12ac9c59f54ccdd12f5d06d2ea8
|
4d83b67bb98ba8b3e9ae96ace81a4e92fdeed3a7
|
refs/heads/master
| 2021-01-02T09:08:19.758921
| 2014-05-11T02:26:36
| 2014-05-11T02:26:36
| 19,621,203
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 936
|
r
|
plot1.R
|
#set up getting and cleaning data as a function
getData <- function(){
# file to get data
f <- "household_power_consumption.txt"
# use a sql select to get correct dates
selector <- "SELECT * from file WHERE Date = '1/2/2007' OR Date = '2/2/2007'"
#read the table the selector
powerData <- read.csv.sql(f, sql=selector, sep=";")
#read in data convert date and time columns
#powerData <- read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings = "?",stringsAsFactors=FALSE)
powerData$DateTime <- as.POSIXct(strptime(paste(powerData$Date,powerData$Time), "%d/%m/%Y %H:%M:%S"))
return(powerData)
}
pData <- getData()
lim <- c(0,1200)
#open png graphics device
png(filename = "plot1.png",
width = 480, height = 480, units = "px",
bg = "white")
hist(pData$Global_active_power,main="Global Active Power",
xlab="Global Active Power (Killowats)",col="red",ylim=lim)
dev.off()
|
8268def35c0fe0184d7659b7fb13de4d5b56b7dc
|
f034b5b16fc69d50c0036d8e99b58dd48dd698b9
|
/code/check_control.R
|
d21217f20d86fc53103ec629d6108e227b4c594c
|
[] |
no_license
|
maplefly/lx2020
|
69663814234e22c6e76abb4052499603de050598
|
f8ce27a86e0bc23240375eabc93a796f97a5ec29
|
refs/heads/master
| 2022-06-17T16:28:36.850671
| 2020-05-12T08:37:02
| 2020-05-12T08:37:02
| 262,951,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
check_control.R
|
#用于核查自检阶段
rm(list = ls())
source("code\\library.r")
source("code\\fun_general.r")
#############
FILE_TEST <- "E:\\V\\DMZZ\\1data\\test\\ins\\2019-鼎城-直流数据.xlsx"
#############定义保存的文件夹
FILE_SAVE = c("calc\\ins_check\\") #用于保存核查结果
raw <- xls_read(FILE_TEST,"直流充电连接控制时")
names(raw)
data <- raw[1:10,c(2,13:31,6,7)]
#输出数据
write.xlsx(t(data),paste(FILE_SAVE,"check_control.xls",sep=""))
#输出图片
img_dic = FILE_SAVE
data$no <- 1:nrow(data)
##输出图片1
data$img_name <- paste("control","_",substr(data$image1_name,1,7),"_",data$no,".png",sep="") #~~
img_url = as.character(data$image1_data)
img_name = data$img_name
img_num = length(img_url)
pic_down(img_url,img_dic,img_name,img_num)
|
4685382483890f383303f1a9b9f0da634e303199
|
c6cb61ab1c245610011fe976e8cd1ad462d875e6
|
/ML/LinearRegression/Regression & multiple regression in R script.R
|
ae0374b3d2bc95457120bf9c165a1de807bc6ee2
|
[] |
no_license
|
Pradyumn10/R-Files
|
468c7f9e7b36522d72accc8d11130b50355300f0
|
8b0f3576978b077a9ed0611eeb5f21612a97072b
|
refs/heads/main
| 2023-04-06T07:58:50.031556
| 2021-03-27T16:50:41
| 2021-03-27T16:50:41
| 352,122,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,191
|
r
|
Regression & multiple regression in R script.R
|
# import the Lungcapdata
LungCapdata=read.csv(file.choose(), header = T)
#Attach the Data
attach(LungCapdata)
#Check names
names(LungCapdata)
class(Age)
class(LungCap)
plot(Age, LungCap, main= "scatterpot")
pairs(LungCapdata[1:3])
#linear model
model= lm(LungCap~Age)
summary(model)
# Multiple Regression
dt = sort(sample(nrow(LungCapdata), nrow(LungCapdata)*.7))
train<-LungCapdata[dt,]
test<-LungCapdata[-dt,]
head(train)
head(test)
attach(train)
# Multiple Regression
Reg=lm(LungCap ~ Age + Height + Smoke + Gender + Caesarean)
#Reg=lm(LungCap ~., data = LungCapdata)#
summary(Reg)
# attach test set
attach(test)
test
newTest <- subset( test, select = -LungCap )
head(newTest)
# Prediction
prediction=predict(Reg, newTest, interval="predict")
head(prediction)
prediction = as.data.frame(prediction)
# Error Calculation
se=(test$LungCap)-(prediction$fit)
head(se)
se
# Writing data to csv file
write.csv(prediction, file = "LungCap_predict.csv")
#calculate Pearson Corelation between Age and Height
cor(Age,Height,method='pearson')
#ask for confidence intervals for the model coefficients
confint(model,conf.level=0.95)
|
fdb55496611e912365267f2cc89bcadddd202db7
|
21d259a197d60ee17eb7544c94c867bc56901d14
|
/PCA/pca.r
|
6e52cc3971fbf9fba66ba14676372ae2f7919363
|
[
"MIT"
] |
permissive
|
ZahraFarajollahi/Gene-expression-data-in-leukemia
|
b4f0a4e5a501f80e0ea2a4757bb380fdf53d67a3
|
10fa51d379477353376e83c829cc04346330b1d5
|
refs/heads/main
| 2023-07-09T12:41:08.618326
| 2021-08-17T08:34:17
| 2021-08-17T08:34:17
| 387,749,508
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
pca.r
|
library(ggplot2)
# read the file
x1= read.delim("E:/denboer2009-expr.txt")
rownames(x1)= x1[,1]
x3 = x1[,-1]
x= as.data.frame(t(x3))
# read the lables
y= read.delim("E:/denboer2009-pheno (2).txt")
rownames(y)= y[,1]
y= y[,-1]
#compute principal components of gene expressions
pc = prcomp(x)
# plot & save most important pcs
jpeg("PCA.jpg")
plot(pc)
title(sub= "Most important principal components of ALL gene expression data")
dev.off()
# draw & save distribution of different samples among PC1 and PC2
pcx = data.frame(pc$x)
sample = y[,5]
jpeg("2d-pc-scaterplot.jpg")
ggplot(pcx, aes(PC1,PC2, color = sample)) + geom_point(size=3) + labs(caption =
"This plot shows the distribution of different samples among PC1 and PC2.
It illustrates that PC1 & PC2 separate T-cell ALL from B-Cell ALL.
It is obvious that linear classifiers work well with this mapping. ") +
theme(plot.caption=element_text(hjust = -0.2))
dev.off()
# 3D scatterplot of PC1, PC2, AND PC3
plot3d(pcx[,1:3], type= 's', col = y[,6], size= 2)
|
ecfa5e8d40587789866ac32b3dcbc705a69f60d9
|
5bac3ce8fa5ce7921b2c318d46500020b5b4d3d1
|
/man/rndfp.Rd
|
bc54ec331d5da1193d880a87af293c96c9c23bd0
|
[
"Apache-2.0"
] |
permissive
|
CDK-R/fingerprint
|
ce621309e28d00e18e1a284795418e228c507895
|
8da6b320856538a05d5502b8be5191193d714e34
|
refs/heads/master
| 2022-10-26T09:01:21.536490
| 2022-10-16T23:08:52
| 2022-10-16T23:08:52
| 156,985,877
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 779
|
rd
|
rndfp.Rd
|
\name{random.fingerprint}
\alias{random.fingerprint}
\title{
Generate Randomized Fingerprints
}
\description{
A utility function that can be used to generate binary fingerprints
of a specified length with a specifed number of bit positions
(selected randomly) set to 1. Currently bit positions are selected uniformly
}
\usage{
random.fingerprint(nbit,on)
}
\arguments{
\item{nbit}{
The length of the fingerprint, that is, the total number of bits.
Must be a positive integer.
}
\item{on}{
How many positions should be set to 1
}
}
\value{
An object of class \code{fingerprint}
}
\examples{
# make a fingerprint vector
fp <- random.fingerprint(32, 16)
as.character(fp)
}
\keyword{logic}
\author{Rajarshi Guha \email{rguha@indiana.edu}}
|
348ea5dc34e5e7be6d4bcefedde943bbd37874aa
|
4a2f672e9075cd74dc287db0c0d024f157aa3bc7
|
/man/each_mRNA_pSup.Rd
|
81e5fc7991a04d67cd9f42b73868318bc968d13c
|
[] |
no_license
|
jabard89/SedSeqQuant
|
5dc830ed42ab460ef6c812c5c8f228f8fa2e8ce3
|
fe13dc1c1495b06547905e7bbee4aa5c271daea0
|
refs/heads/master
| 2023-07-27T00:27:35.139179
| 2020-08-20T05:34:39
| 2020-08-20T05:34:39
| 670,822,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,000
|
rd
|
each_mRNA_pSup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_pSup.R
\name{each_mRNA_pSup}
\alias{each_mRNA_pSup}
\title{Represent pSup for each transcript}
\usage{
each_mRNA_pSup(
wide_data,
chains = 4,
iter = 1000,
control = list(adapt_delta = 0.85)
)
}
\arguments{
\item{wide_data}{A wider data frame}
\item{chains}{A number, default to 4}
\item{iter}{A number, default to 1000}
\item{control}{A list of parameters, default to list(adapt_delta = 0.85)}
}
\description{
Clear format to represent the proportion of each transcript in the Supernatant.
}
\details{
The input data should be the wide format in terms of fractions.
Function calculate_pSup from this package is called in this function. Output
a data frame that shows the pSup value of each transcript in each fraction.
}
\examples{
\dontrun{
each_mRNA_pSup(wide_data)
each_mRNA_pSup(wide_data = mydata)
}
}
\seealso{
[rstan::sampling()], `browseVignettes("rstan")`
}
\keyword{pSup}
\keyword{supernatant}
|
ad03f864ce66d78483b909e913e572ec9a9f14fe
|
016ce892d3742148c23bfd1eb5d0e4553e549d37
|
/R/onloader.R
|
a586634fbc48abc7a4e4899bddb5a0f339453e2e
|
[] |
no_license
|
RobinHankin/cmvnorm
|
567dd1d4aeacc769d7cfa677161beb2da65af242
|
d2ca1279baa837f45090f9786eea6219e679a6d3
|
refs/heads/master
| 2023-08-08T17:06:25.029213
| 2023-07-31T20:56:37
| 2023-07-31T20:56:37
| 133,745,286
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
onloader.R
|
`.onLoad` <- function(libname, pkgname) {
assign( "sd.default", stats::sd , asNamespace(pkgname))
assign("var.default", stats::var, asNamespace(pkgname))
}
|
0ed465b5b17aa9c97133e810a3ed61888d19827a
|
35902ea94d808a4cf80050b3eba004d3fc2eef9d
|
/scripts/fit_model_horizontal.R
|
23c235c63c3d3771d8d0f783dfc661deb821246a
|
[
"CC0-1.0"
] |
permissive
|
Kucharssim/WALD-EM
|
79f2099b3490bba479f4a6700d524cbd71e607d4
|
9a34d02ba2f565b0291099b495f2683a0b0562b7
|
refs/heads/master
| 2023-01-06T05:08:52.399454
| 2020-11-10T10:55:43
| 2020-11-10T10:55:43
| 248,290,398
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
fit_model_horizontal.R
|
# Application of the Dynamic model of eye movements on the Renswoude's object familiarity data
library(rstan)
rstan_options(auto_write = FALSE)
library(here)
# load stan data
load(here::here("saves", "stan_data.Rdata"))
model <- rstan::stan_model(here::here("stan", "objects_central_distance_saliency_horizontal.stan"))
fit <- rstan::sampling(model, stan_data, chains = 10, cores = 10, warmup = 1000, iter = 2000)
save(fit, file = here::here("saves", "fit_model_horizontal.Rdata"))
|
de450e9c97f9f3be948bd98839c7aec8cdab9a68
|
478bed5a171f199e699dc6a8061ceddad199daea
|
/cachematrix.R
|
bb98f40e9725443b0396111af4a03002f5823b77
|
[] |
no_license
|
nicaless/ProgrammingAssignment2
|
a51d60dd18fae3333ee63ec985ff6b1145062160
|
f74ffbb63473598878a4e9efa87d95a99032ba4f
|
refs/heads/master
| 2021-01-21T19:44:25.826021
| 2015-01-18T01:43:45
| 2015-01-18T01:43:45
| 29,324,807
| 0
| 0
| null | 2015-01-16T00:17:00
| 2015-01-16T00:17:00
| null |
UTF-8
|
R
| false
| false
| 1,024
|
r
|
cachematrix.R
|
## makeCacheMatrix creates a special "matrix object
## that is actually a list of functions to help
## cache the inverse of the matrix
## makeCacheMatrix takes a numeric matrix and creates
## a list of functions to:
## set the inverse of the matrix, get the cached matrix
## set the matrix to a new matrix,
## and get the cached inverse of the matrix
makeCacheMatrix <- function(x=matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i<<-inverse
getinverse <- function() i
list(set=set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## cacheSolve computes the inverse of a "special" matrix
## and caches the result
## or retrieves the previous cached result if matrix is unchanged
## assumes matrix is always invertible
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return (i)
}
data <-x$get()
i <- solve(data)
x$setinverse(i)
i
}
|
2c9377a9c35c4f4d869c6965c827e51ceabdb35f
|
593d98cb8f5ebd5ef7c758f778c347c450665995
|
/R/Hello World.R
|
718281eef79b718afa411a8fce2db532a92d14b4
|
[] |
no_license
|
AltamashRafiq/PSLMdata
|
995ba1a47efd42a6049363ce24cd3bfe3bf4b9de
|
4e3fce4ec688e8885e160c0f11fe913912a04360
|
refs/heads/master
| 2020-06-09T17:43:45.046123
| 2019-06-24T10:31:42
| 2019-06-24T10:31:42
| 193,476,002
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
Hello World.R
|
hello_function = function(x){
print(paste("Hello ", x))
}
|
235c526059d5a2811f4217dea868190ceb05794e
|
712d7bcb5911eceea974198f8f047e0b36f619fe
|
/01 Supervised Learning : Classification/404 large tree.r
|
9f749ab4c64323b0bed126e957da828aafb705f0
|
[] |
no_license
|
chunhuayu/Machine-Learning-in-R
|
6e56c51cb383a5268b4b51b794e22a8893e61b5f
|
e26efba073cc93673f4b81aab6bae04721b06c17
|
refs/heads/master
| 2020-06-02T20:27:00.522742
| 2019-06-28T22:50:19
| 2019-06-28T22:50:19
| 191,299,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,822
|
r
|
404 large tree.r
|
### Building and evaluating a larger tree
# Previously, you created a simple decision tree that used the applicant's credit score and requested loan amount to predict the loan outcome.
# Lending Club has additional information about the applicants, such as home ownership status, length of employment, loan purpose, and past bankruptcies, that may be useful for making more accurate predictions.
# Using all of the available applicant data, build a more sophisticated lending model using the random training dataset created previously. Then, use this model to make predictions on the testing dataset to estimate the performance of the model on future loan applications.
# The rpart package is loaded into the workspace and the loans_train and loans_test datasets have been created.
### Instructions
# Use rpart() to build a loan model using the training dataset and all of the available predictors.
# ..............Again, leave the control argument alone.
# Applying the predict() function to the testing dataset, create a vector of predicted outcomes.
# ..............Don't forget the type argument.
# Create a table() to compare the predicted values to the actual outcome values.
# Compute the accuracy of the predictions using the mean() function.
### R
> # Grow a tree using all of the available applicant data
> loan_model <- rpart(outcome ~ ., data = loans_train, method = "class", control = rpart.control(cp = 0))
>
> # Make predictions on the test dataset
> loans_test$pred <- predict(loan_model, loans_test, type = "class")
>
> # Examine the confusion matrix
> table(loans_test$pred, loans_test$outcome)
default repaid
default 821 546
repaid 632 829
>
> # Compute the accuracy on the test dataset
> mean(loans_test$pred == loans_test$outcome)
[1] 0.5834512
>
|
5d55f2aa80561772cc7f29e80abc27836e1ce321
|
bf4f66294eb2e5bd7fe9e5f2decc5def7a748654
|
/R/simulation.R
|
824273fa531a1a5a2ae438650d38399e3d943742
|
[] |
no_license
|
jaSunny/DynamicPricingModeling
|
2325a560c495df0810043b70fb7e2cf0d933cb3a
|
85215df5ec317b223268f04643a13297c26efe08
|
refs/heads/master
| 2020-12-25T14:33:53.076753
| 2016-09-08T14:53:40
| 2016-09-08T15:10:33
| 67,712,501
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 607
|
r
|
simulation.R
|
simulateSales <- function(TT,N,a_opt,steps1,steps2,prices1,pi){
nt <- as.vector(rep(N, TT))
at <- as.vector(rep(0, TT))
at[1] <- a_opt[N]
for (t in 2:TT) {
#adjust prices
if(at[t-1]<=40){
index <- (at[t-1] -1 )/steps1 +1
}else{
index <- length(prices1)+(at[t-1] -41 )/steps2 +1
}
if(nt[t-1]>0 && runif(1,0,1)<pi[2,index]) { #we can adjust the sales probability here
nt[t] <- max(0,nt[t-1]-1)
}
else{
nt[t] <- max(0,nt[t-1])
}
if(nt[t]>0){
at[t] <- a_opt[nt[t]]
}
}
res <- data.frame(nt,at)
return(res)
}
|
26143978223ead07428af4ed35316d45db690311
|
14861c06e2d49b59120fb101101ff91a067ef8af
|
/R/shrink.R
|
99c174e387d3567f37ab4d7002938de45ed92d47
|
[] |
no_license
|
cran/mda
|
8697d7d339979138f75a2648d0601e5c1eb4a04a
|
33dcbc6062f93725dd9448dd92b348f8d8d43003
|
refs/heads/master
| 2023-07-06T05:12:07.668796
| 2023-06-23T03:00:02
| 2023-06-23T03:00:02
| 17,697,377
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55
|
r
|
shrink.R
|
shrink <-
function (object, ...)
UseMethod("shrink")
|
52a60e73dba9015cfa0d31ee4aceb5532e35d878
|
96e54a2f183ac913cd533b22560dbb6f9de98e64
|
/man/createCrossRMPlot.Rd
|
f2142ccc120c22b67aa901ff672c418d5ff866de
|
[] |
no_license
|
cran/KarsTS
|
fe9e7cb51abd77edc1cf461b92fe86e9c760b9a8
|
a61bf7a479a7eeba1d2af68ff0fab8041b3d3fe2
|
refs/heads/master
| 2021-08-16T07:19:03.010559
| 2021-01-14T19:50:05
| 2021-01-14T19:50:05
| 92,603,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 612
|
rd
|
createCrossRMPlot.Rd
|
\name{createCrossRMPlot}
\alias{createCrossRMPlot}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
createCrossRMPlot: creates a cross recurrence plot
}
\description{
This function creates a cross recurrence plot from a previously created cross recurrence matrix. It is used through the Plot Cross Recurrence Plot in the Plots Menu
}
%- maybe also 'usage' for other objects documented here.
\references{
Marwan,R., Romano, M.C., Thiel,M., Kurths,J.(2007): Recurrence plots for the analysis of complex systems. Physics Reports 438, 237-329.
}
\author{
Marina Saez Andreu
}
|
71219834b9ae1513bfb8b038305ff4f0e0781f67
|
a3f9b39352ae4409dab117b1a1c129a8778585fb
|
/ONSPriceQuotes.R
|
09c71cb17d69150980c7a3addb940ac0f0fbad0a
|
[] |
no_license
|
VictimOfMaths/Routine-Data
|
01a7a416b4f0bde909a0e15518c6cf767739f362
|
466ed22342dcb8ec941806497385f2b7f7e1d8ca
|
refs/heads/master
| 2023-07-20T10:29:15.453387
| 2023-07-17T11:52:15
| 2023-07-17T11:52:15
| 245,402,797
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82,209
|
r
|
ONSPriceQuotes.R
|
rm(list=ls())
library(tidyverse)
library(curl)
library(extrafont)
library(forcats)
library(scales)
library(paletteer)
library(lubridate)
library(ragg)
library(ggridges)
library(RcppRoll)
library(ggrepel)
library(ggtext)
theme_custom <- function() {
theme_classic() %+replace%
theme(plot.title.position="plot", plot.caption.position="plot",
strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
strip.clip="off",
plot.title=element_text(face="bold", size=rel(1.5), hjust=0,
margin=margin(0,0,5.5,0)),
text=element_text(family="Lato"),
plot.subtitle=element_text(colour="Grey40", hjust=0, vjust=1),
plot.caption=element_text(colour="Grey40", hjust=1, vjust=1, size=rel(0.8)),
axis.text=element_text(colour="Grey40"),
axis.title=element_text(colour="Grey20"),
legend.text=element_text(colour="Grey40"),
legend.title=element_text(colour="Grey20"))
}
#Create list of non-alcohol products to include
comparators <- c(220107, 220318)
#Glossary for datasets can be found here: https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fglossary/glossaryrevised.xls
#Download ONS price quotes - credit to Peter Donaghy (@peterdonaghy) for bringing this data to my attention
#May 2023
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesmay2023/upload-pricequotes202305.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2305 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#April 2023
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesapril2023/upload-pricequotes202304.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2304 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#March 2023
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesmarch2023/upload-pricequotes202303.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2303 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#February 2023
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesfebruary2023/upload-pricequotes202302.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2302 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#January 2023
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesjanuary2023/upload-pricequotes202301.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2301 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#December 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesdecember2022/upload-pricequotes202212.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2212 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#November 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesnovember2022/upload-pricequotes202211.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2211 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#October 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesoctober2022/upload-pricequotes202210.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2210 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#September 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesseptember2022/upload-pricequotes202209.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2209 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#August 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesaugust2022/upload-pricequotes202208.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2208 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#July 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesjuly2022/upload-pricequotes202207.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2207 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#June 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesjune2022/upload-pricequotes202206.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2206 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#May 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotesmay2022/upload-pricequotes202205.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2205 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
################
#April 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesapril2022/upload-pricequotes202204.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2204 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#March2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesmarch2022/upload-pricequotes202203.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2203 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Feb 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesfebruary2022/upload-pricequotes202202.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2202 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jan 2022
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesjanuary2022/upload-pricequotes202201.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2201 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Dec 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesdecember2021/upload-pricequotes202112.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2112 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Nov 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesnovember2021/upload-pricequotes202111.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2111 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Oct 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesoctober2021/upload-pricequotes202110.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2110 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Sep 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesseptember2021/upload-pricequotes202109.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2109 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Aug 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesaugust2021/upload-pricequotes202108.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2108 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jul 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesjuly2021/upload-pricequotes202107.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2107 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jun 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesjune2021/upload-pricequotes202106.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2106 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#May 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesmay2021/upload-pricequotes2021051.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2105 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Apr 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesapril2021/upload-pricequotes202104.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2104 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Mar 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesmarch2021/upload-pricequotes202103.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2103 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Feb 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesfebruary2021/upload-pricequotes202102.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2102 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jan 2021
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesjanuary2021/upload-pricequotes202101.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2101 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Dec 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesdecember2020/upload-pricequotes202012.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2012 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Nov 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesnovember2020/upload-pricequotes202011.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2011 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Oct 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesoctober2020/upload-pricequotes202010.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2010 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Sep 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesseptember2020/upload-pricequotes202009.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2009 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Aug 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesaugust2020/upload-pricequotes202008.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2008 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jul 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricesquotesjuly2020/upload-pricequotes202007.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2007 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jun 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesjune2020/upload-pricequotes202006.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2006 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#May 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricesquotesmay2020/upload-202005pricequotes.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2005 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Apr 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricesquotesapril2020/upload-202004pricequotes.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2004 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Mar 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricesquotesmarch2020/upload-pricequotes202003.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2003 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Feb 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricesquotesfebruary2020/upload-pricequotes202002.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2002 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jan 2020
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricesquotesjanuary2020/upload-pricequotes202001.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data2001 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Dec 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesdecember2019/upload-pricequotes201912v1.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1912 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Nov 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesnovember2019/upload-pricequotes201911.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1911 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Oct 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesoctober2019/upload-pricequotes201910.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1910 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Sep 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesseptember2019/upload-pricequotes201909.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1909 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Aug 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotes2019/upload-pricequotes201908.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1908 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jul 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotes2019/previous/v7/upload-pricequotes201907.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1907 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jun 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotes2019/previous/v6/upload-pricequotes201906.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1906 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#May 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotes2019/previous/v5/upload-pricequote201905.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1905 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Apr 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotes2019/previous/v4/upload-pricequote201904.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1904 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Mar 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotes2019/previous/v3/upload-pricequotes201903.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1903 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Feb 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotes2019/previous/v2/upload-pricequotes201902.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1902 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jan 2019
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotes2019/previous/v1/upload-pricequotes201901.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1901 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Dec 2018
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesdecember2018/upload-pricequotes201812.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data1812 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Nov 2018 - files provided zipped from here on back
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotenovember2018/pricequote201811.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1811 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Oct 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequoteoctober2018/pricequote201810.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1810 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Sep 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequoteseptember2018/pricequote201809.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1809 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Aug 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequoteaugust2018/pricequote201808.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1808 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jul 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotejuly2018/pricequote201807.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1807 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jun 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotejune2018/pricequote201806.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1806 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#May 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotemay2018/pricequote201805.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1805 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Apr 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequoteapril2018/pricequote201804.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1804 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Mar 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotemarch2018/pricequote201803.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1803 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Feb 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotefebruary2018/pricequote201802.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1802 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jan 2018
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotejanuary2018/pricequote201801.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1801 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Dec 2017
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesdecember2017/pricequote201712.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1712 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
##################################
#Mar-Nov 2017 all zipped together
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotes2017/pricequote201703to201711.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
#Nov 2017
data1711 <- read_csv(file.path(temp2, "price_quote_201711.zip")) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Oct 2017
data1710 <- read_csv(file.path(temp2, "price_quote_201710.zip")) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Sep 2017
data1709 <- read_csv(file.path(temp2, "price_quote_201709.zip")) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Aug 2017
data1708 <- read_csv(file.path(temp2, "price_quote_201708.zip")) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jul 2017
data1707 <- read_csv(file.path(temp2, "price_quote_201707.zip")) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jun 2017
data1706 <- read_csv(file.path(temp2, "price_quote_201706.zip")) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#May 2017
data1705 <- read_csv(file.path(temp2, "price_quote_201705.zip")) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Apr 2017
data1704 <- read_csv(file.path(temp2, "price_quote_201704.zip")) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Mar 2017
data1703 <- read_csv(file.path(temp2, "price_quote_201703.zip")) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Feb 2017
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotes2017/previous/v2/pricequote201702.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1702 <- read_csv(temp) %>%
set_names(toupper(colnames(.))) %>%
rename("STRATUM_WEIGHT"="STRATUM_WEI") %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDI %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Jan 2017
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=/economy/inflationandpriceindices/datasets/consumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes/pricequotes2017/previous/v1/pricequote201701.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data1701 <- read_csv(temp) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
##########
#From here on back it's quarterly
#Q4 2016
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesquarter42016/upload-pricequote2016q4.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data16q4 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Q3 2016
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotequarter32016/upload-pricequote2016q3.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data16q3 <- read_csv(temp) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Q2 2016
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotequarter22016/upload-pricequote2016q2.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data16q2 <- read_csv(temp) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Q1 2016
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotequarter12016/upload-pricequote2016q1.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data16q1 <- read_csv(temp) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Q4 2015
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotequarter42015/upload-pricequote2015q4.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data15q4 <- read_csv(temp) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Q3 2015
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesquarter32015/upload-pricequote2015q3.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data15q3 <- read_csv(temp) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Q2 2015
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesquarter22015/upload-pricequote2015q2.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data15q2 <- read_csv(temp) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Q1 2015
temp <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotesquarter12015/upload-pricequote2015q1.csv"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
data15q1 <- read_csv(temp) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#2014 data zipped together
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotes2014/pricequote2014.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data14q4 <- read_csv(file.path(temp2, "price_quote_2014_q4.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data14q3 <- read_csv(file.path(temp2, "price_quote_2014_q3.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data14q2 <- read_csv(file.path(temp2, "price_quote_2014_q2.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data14q1 <- read_csv(file.path(temp2, "price_quote_2014_q1.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#2013 data zipped together
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricesquote2013/pricequote2013.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data13q4 <- read_csv(file.path(temp2, "price_quote_2013_q4.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data13q3 <- read_csv(file.path(temp2, "price_quote_2013_q3.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data13q2 <- read_csv(file.path(temp2, "price_quote_2013_q2.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data13q1 <- read_csv(file.path(temp2, "price_quote_2013_q1.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#2012 data zipped together
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotes2012/pricequote2012.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data12q4 <- read_csv(file.path(temp2, "price_quote_2012_q4.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data12q3 <- read_csv(file.path(temp2, "price_quote_2012_q3.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data12q2 <- read_csv(file.path(temp2, "price_quote_2012_q2.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data12q1 <- read_csv(file.path(temp2, "price_quote_2012_q1.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#2011 data zipped together
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotes2011/pricequote2011.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data11q4 <- read_csv(file.path(temp2, "price_quote_2011_q4.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data11q3 <- read_csv(file.path(temp2, "price_quote_2011_q3.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data11q2 <- read_csv(file.path(temp2, "price_quote_2011_q2.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data11q1 <- read_csv(file.path(temp2, "price_quote_2011_q1.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#2010 data zipped together
temp <- tempfile()
temp2 <- tempfile()
url <- "https://www.ons.gov.uk/file?uri=%2feconomy%2finflationandpriceindices%2fdatasets%2fconsumerpriceindicescpiandretailpricesindexrpiitemindicesandpricequotes%2fpricequotes2010/pricequote2010.zip"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
data10q4 <- read_csv(file.path(temp2, "price_quote_2010_q4.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data10q3 <- read_csv(file.path(temp2, "price_quote_2010_q3.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data10q2 <- read_csv(file.path(temp2, "price_quote_2010_q2.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
data10q1 <- read_csv(file.path(temp2, "price_quote_2010_q1.csv")) %>%
set_names(toupper(colnames(.))) %>%
filter((ITEM_ID %in% comparators | substr(ITEM_ID, 1, 3) %in% c("310", "320")) &
VALIDITY %in% c(3,4)) %>%
mutate(date=as.Date(paste0(QUOTE_DATE, "01"), format="%Y%m%d"))
#Stick it all together
#2012 and older data includes item *codes* only, not descriptions and some item descriptions have changed
#while the item codes have remained the same
#Get code to description lookup
lookup <- bind_rows(data2305, data2304, data2303, data2302, data2301, data2212, data2211, data2210,
data2209, data2208, data2207, data2206, data2205, data2204, data2203, data2202,
data2201, data2112, data2111, data2110, data2109,
data2108, data2107, data2106, data2105, data2104, data2103, data2102, data2101,
data2012, data2011, data2010, data2009, data2008, data2007, data2006, data2005,
data2004, data2003, data2002, data2001, data1912, data1911, data1910, data1909,
data1908, data1907, data1906, data1905, data1904, data1903, data1902, data1901,
data1812, data1811, data1810, data1809, data1808, data1807, data1806, data1805,
data1804, data1803, data1802, data1801, data1712, data1711, data1710, data1709,
data1708, data1707, data1706, data1705, data1704, data1703, data1702, data1701,
data16q4, data16q3, data16q2, data16q1, data15q4, data15q3, data15q2, data15q1,
data14q4, data14q3, data14q2, data14q1, data13q4, data13q3, data13q2, data13q1) %>%
group_by(ITEM_ID, ITEM_DESC, date) %>%
summarise(count=n()) %>%
ungroup() %>%
#For codes where description has changed, take the most recent
arrange(date) %>%
group_by(ITEM_ID) %>%
slice_tail(n=1) %>%
ungroup() %>%
select(-c(count, date))
fulldata <- bind_rows(data2305, data2304, data2303, data2302, data2301, data2212, data2211, data2210,
data2209, data2208, data2207, data2206, data2205, data2204, data2203, data2202,
data2201, data2112, data2111, data2110, data2109,
data2108, data2107, data2106, data2105, data2104, data2103, data2102, data2101,
data2012, data2011, data2010, data2009, data2008, data2007, data2006, data2005,
data2004, data2003, data2002, data2001, data1912, data1911, data1910, data1909,
data1908, data1907, data1906, data1905, data1904, data1903, data1902, data1901,
data1812, data1811, data1810, data1809, data1808, data1807, data1806, data1805,
data1804, data1803, data1802, data1801, data1712, data1711, data1710, data1709,
data1708, data1707, data1706, data1705, data1704, data1703, data1702, data1701,
data16q4, data16q3, data16q2, data16q1, data15q4, data15q3, data15q2, data15q1,
data14q4, data14q3, data14q2, data14q1, data13q4, data13q3, data13q2, data13q1,
data12q4, data12q3, data12q2, data12q1, data11q4, data11q3, data11q2, data11q1,
data10q4, data10q3, data10q2, data10q1) %>%
select(-ITEM_DESC) %>%
merge(lookup) %>%
mutate(SHOP_TYPE=if_else(SHOP_TYPE==1, "Multiple", "Independent"),
product_cat=case_when(
ITEM_DESC %in% c("20 FILTER - OTHER BRAND", "5 CIGARS: SPECIFY BRAND",
"CIGARETTES 12", "CIGARETTES 15", "CIGARETTES18", "CIGARETTES 20",
"CIGARETTES 21", "CIGARETTES 22",
"CIGARETTES 8", "HAND ROLLING TOBACCO PACK 30GM",
"E-CIG REFILL BOTTL/CART 2-10ML") ~ "Tobacco",
ITEM_ID %in% comparators | ITEM_DESC=="BOTTLE OF MIXER 125-200ML" ~ "Other",
TRUE ~ "Alcohol"),
product_cat_detail=case_when(
ITEM_DESC %in% c("20 FILTER - OTHER BRAND", "CIGARETTES 12", "CIGARETTES 15", "CIGARETTES18",
"CIGARETTES 20", "CIGARETTES 21", "CIGARETTES 22", "CIGARETTES 8") ~ "Cigarettes",
ITEM_DESC=="5 CIGARS: SPECIFY BRAND" ~ "Cigars",
ITEM_DESC=="HAND ROLLING TOBACCO PACK 30GM" ~ "RYO Tobacco",
ITEM_DESC=="E-CIG REFILL BOTTL/CART 2-10ML" ~ "E-cigarettes",
ITEM_DESC %in% c("APPLE CIDER 4 CAN PK 440-500ML", "APPLE CIDER 500-750ML 4.5-5.5%",
"CIDER-PER PINT OR 500-568ML", "CIDER FLAVOURED BOTT 500-568ML",
"CIDER 4.5%-5.5% ABV PINT/BOTTL") ~ "Cider",
ITEM_DESC %in% c("BITTER-4CANS-440-500ML", "BOTTLE OF LAGER IN NIGHTCLUB",
"LAGER 10-24 BOTTLES 250-330ML",
"LAGER 10 - 24 CANS (440-500ML)", "LAGER 4 BOTTLES- PREMIUM",
"SPEC'Y BEER BOTT 500ML 4-5.5", "STOUT - 4 CAN PACK",
"BOTTLED PREMIUM LAGER 4.3-7.5%","DRAUGHT BITTER (PER PINT)",
"DRAUGHT STOUT PER PINT") ~ "Beer",
ITEM_DESC %in% c("FORTIFIED WINE (70-75CL)", "RED WINE- EUROPEAN 75CL",
"RED WINE- NEW WORLD 75CL", "ROSE WINE-75CL BOTTLE",
"SPARKLING WINE 75CL MIN 11%ABV", "WHITE WINE- EUROPEAN 75CL",
"WHITE WINE- NEW WORLD 75CL", "BOTTLE OF CHAMPAGNE 75 CL",
"BOTTLE OF WINE 70-75CL", "BOTTLE OF CHAMPAGNE") ~ "Wine",
ITEM_DESC %in% c("BRANDY 70CL BOTTLE", "CREAM LIQUER 70CL-1LT 14-20%",
"GIN BOTTLE 70CL", "PRE MIXED SPIRIT 250-330ML",
"RUM WHITE EG BACARDI 70CL BOTT", "VODKA-70 CL BOTTLE", "WHISKY-70 CL BOTTLE",
"GIN PER NIP", "LIQUEUR PER NIP SPECIFY ML", "SPIRIT BASED DRINK 250-330MLS",
"SPIRIT BASED DRINK 275ML", "VODKA (PER NIP) SPECIFY ML",
"WHISKY (PER NIP) SPECIFY ML") ~ "Spirits",
TRUE ~ "Other"),
#Separate out on- and off-trade (currently done on the basis of inspecting prices)
channel=case_when(
ITEM_DESC %in% c("APPLE CIDER 4 CAN PK 440-500ML", "BITTER-4CANS-440-500ML",
"BRANDY 70CL BOTTLE", "CIDER FLAVOURED BOTT 500-568ML",
"CREAM LIQUER 70CL-1LT 14-20%", "FORTIFIED WINE (70-75CL)",
"GIN BOTTLE 70CL", "LAGER 10-24 BOTTLES 250-330ML",
"LAGER 10 - 24 CANS (440-500ML)", "LAGER 4 BOTTLES- PREMIUM",
"PRE MIXED SPIRIT 250-330ML", "RED WINE- EUROPEAN 75CL",
"RED WINE- NEW WORLD 75CL", "ROSE WINE-75CL BOTTLE",
"RUM WHITE EG BACARDI 70CL BOTT", "SPARKLING WINE 75CL MIN 11%ABV",
"SPEC'Y BEER BOTT 500ML 4-5.5", "STOUT - 4 CAN PACK",
"VODKA-70 CL BOTTLE", "WHISKY-70 CL BOTTLE",
"WHITE WINE- EUROPEAN 75CL", "WHITE WINE- NEW WORLD 75CL",
"APPLE CIDER 500-750ML 4.5-5.5%") ~ "Off-trade",
ITEM_DESC %in% c("BOTTLE OF CHAMPAGNE 75 CL", "BOTTLE OF CHAMPAGNE", "BOTTLE OF MIXER 125-200ML",
"BOTTLE OF WINE 70-75CL", "BOTTLED PREMIUM LAGER 4.3-7.5%",
"CIDER 4.5%-5.5% ABV PINT/BOTTL", "DRAUGHT BITTER (PER PINT)",
"DRAUGHT STOUT PER PINT", "GIN PER NIP", "LAGER - PINT 3.4-4.2%",
"LIQUEUR PER NIP SPECIFY ML", "PREMIUM LAGER - PINT 4.3-7.5%",
"SPIRIT BASED DRINK 275ML", "VODKA (PER NIP) SPECIFY ML",
"WHISKY (PER NIP) SPECIFY ML", "WINE, PER 175 - 250 ML SERVING",
"CIDER-PER PINT OR 500-568ML", "SPIRIT BASED DRINK 250-330MLS") ~ "On-trade",
TRUE ~ "N/A"),
region=case_when(
REGION==2 ~ "London", REGION==3 ~ "South East England", REGION==4 ~ "South West England",
REGION==5 ~ "East Anglia", REGION==6 ~ "East Midlands", REGION==7 ~ "West Midlands",
REGION==8 ~ "Yorkshire & Humberside", REGION==9 ~ "North West England",
REGION==10 ~ "North East England", REGION==11 ~ "Wales", REGION==12 ~ "Scotland",
REGION==13 ~ "Northern Ireland")) %>%
select(ITEM_ID, ITEM_DESC, SHOP_CODE, PRICE, STRATUM_WEIGHT, STRATUM_TYPE, SHOP_TYPE,
SHOP_WEIGHT, date, product_cat, product_cat_detail, channel, region)
#Write data out
write.csv(fulldata, "X:/ScHARR/SARG_SAPM_3_5/General/Data/ONS Price Quotes/Fulldata.csv")
#Show which items are included at each time point
agg_png("Outputs/ONSPriceQuotesTable.png", units="in", width=12, height=7, res=500)
fulldata %>%
group_by(ITEM_DESC, date) %>%
summarise(count=n()) %>%
ungroup() %>%
filter(!ITEM_DESC %in% c("INDIAN TAKEAWAY")) %>%
ggplot(aes(x=date, y=fct_rev(ITEM_DESC), fill=count))+
geom_tile()+
scale_fill_paletteer_c("viridis::mako", direction=-1, limits=c(0,NA), name="Number of\nobservations")+
scale_x_date(name="Month")+
scale_y_discrete(name="Product description")+
theme_custom()
dev.off()
#GRAPHS
#Regional variation by individual item
regmeans <- fulldata %>%
group_by(ITEM_DESC, date, region) %>%
summarise(meanprice=weighted.mean(PRICE, STRATUM_WEIGHT)) %>%
ungroup() %>%
#calculate rolling averages
group_by(ITEM_DESC, region) %>%
mutate(roll_meanprice=roll_mean(meanprice, n=6, align="center", fill=NA)) %>%
ungroup()
agg_tiff("Outputs/ONSPriceQuotesLagerxReg.tiff", units="in", width=8, height=6, res=500)
ggplot(regmeans %>% filter(ITEM_DESC=="LAGER - PINT 3.4-4.2%"),
aes(x=date, y=roll_meanprice, colour=region))+
geom_line(show.legend=FALSE)+
geom_text_repel(data=regmeans %>% filter(ITEM_DESC=="LAGER - PINT 3.4-4.2%" &
date==max(date[!is.na(roll_meanprice)])),
aes(x=max(date[!is.na(roll_meanprice)]), y=roll_meanprice, label = region,
colour=region),
family = "Lato", direction = "y", xlim = c(as.Date("2023-02-01"), as.Date("2025-06-01")),
hjust = 0, segment.color = NA, box.padding = .1, show.legend = FALSE, size=rel(2.5))+
scale_x_date(name="", limits=c(as.Date("2010-01-01"), as.Date("2025-06-01")),
labels=c("", "2010", "2015", "2020", "", ""))+
scale_y_continuous(name="Mean price observed", labels=dollar_format(prefix="£"), limits=c(0,NA))+
scale_colour_manual(values=c("#0e3724", "#008c5c", "#33b983", "#0050ae", "#9b54f3", "#bf8cfc",
"#551153", "#ac0000", "#c85b00", "#f98517", "grey10", "grey70"))+
theme_custom()+
theme(plot.title=element_markdown())+
labs(title="The price of a pint is highest in <span style='color:#33b983;'>London</span> and <span style='color:#bf8cfc;'>Northern Ireland",
subtitle="Average observed price for a pint of lager (3.4-4.2% ABV), rolling 5-month average\n",
caption="Data from ONS price quotes | Plot by @VictimOfMaths")
dev.off()
agg_tiff("Outputs/ONSPriceQuotesBitterxReg.tiff", units="in", width=8, height=6, res=500)
ggplot(regmeans %>% filter(ITEM_DESC=="DRAUGHT BITTER (PER PINT)"),
aes(x=date, y=roll_meanprice, colour=region))+
geom_line(show.legend=FALSE)+
geom_text_repel(data=regmeans %>% filter(ITEM_DESC=="DRAUGHT BITTER (PER PINT)" &
date==max(date[!is.na(roll_meanprice)])),
aes(x=max(date[!is.na(roll_meanprice)]), y=roll_meanprice, label = region,
colour=region),
family = "Lato", direction = "y", xlim = c(as.Date("2023-02-01"), as.Date("2025-06-01")),
hjust = 0, segment.color = NA, box.padding = .1, show.legend = FALSE, size=rel(2.5))+
scale_x_date(name="", limits=c(as.Date("2010-01-01"), as.Date("2025-06-01")),
labels=c("", "2010", "2015", "2020", "", ""))+
scale_y_continuous(name="Mean price observed", labels=dollar_format(prefix="£"), limits=c(0,NA))+
scale_colour_manual(values=c("#0e3724", "#008c5c", "#33b983", "#0050ae", "#9b54f3", "#bf8cfc",
"#551153", "#ac0000", "#c85b00", "#f98517", "Grey10", "Grey70"))+
theme_custom()+
theme(plot.title=element_markdown())+
labs(title="The price of a pint is highest in <span style='color:#33b983;'>London</span> and <span style='color:#bf8cfc;'>Northern Ireland",
subtitle="Average observed price for a pint of bitter, rolling 5-month average\n",
caption="Data from ONS price quotes | Plot by @VictimOfMaths")
dev.off()
agg_tiff("Outputs/ONSPriceQuotesPremLagerxReg.tiff", units="in", width=8, height=6, res=500)
ggplot(regmeans %>% filter(ITEM_DESC=="PREMIUM LAGER - PINT 4.3-7.5%"),
aes(x=date, y=roll_meanprice, colour=region))+
geom_line(show.legend=FALSE)+
geom_text_repel(data=regmeans %>% filter(ITEM_DESC=="PREMIUM LAGER - PINT 4.3-7.5%" &
date==max(date[!is.na(roll_meanprice)])),
aes(x=max(date[!is.na(roll_meanprice)]), y=roll_meanprice, label = region,
colour=region),
family = "Lato", direction = "y", xlim = c(as.Date("2023-02-01"), as.Date("2025-06-01")),
hjust = 0, segment.color = NA, box.padding = .1, show.legend = FALSE, size=rel(2.5))+
scale_x_date(name="", limits=c(as.Date("2010-01-01"), as.Date("2025-06-01")),
labels=c("", "2010", "2015", "2020", "", ""))+
scale_y_continuous(name="Mean price observed", labels=dollar_format(prefix="£"), limits=c(0,NA))+
scale_colour_manual(values=c("#0e3724", "#008c5c", "#33b983", "#0050ae", "#9b54f3", "#bf8cfc",
"#551153", "#ac0000", "#c85b00", "#f98517", "Grey10", "Grey70"))+
theme_custom()+
theme(plot.title=element_markdown())+
labs(title="The price of a pint is highest in <span style='color:#33b983;'>London</span> and <span style='color:#bf8cfc;'>Northern Ireland",
subtitle="Average observed price for a pint of premium lager(4.3-7.5% ABV), rolling 5-month average\n",
caption="Data from ONS price quotes | Plot by @VictimOfMaths")
dev.off()
#Calculate product means
prodmeans <- fulldata %>%
group_by(ITEM_DESC, date) %>%
summarise(meanprice=weighted.mean(PRICE, STRATUM_WEIGHT)) %>%
ungroup() %>%
#calculate rolling averages
group_by(ITEM_DESC) %>%
mutate(roll_meanprice=roll_mean(meanprice, n=6, align="center", fill=NA)) %>%
ungroup()
labels1 <- prodmeans %>%
filter(ITEM_DESC %in% c("WHITE WINE- EUROPEAN 75CL",
"RED WINE- EUROPEAN 75CL",
"ROSE WINE-75CL BOTTLE",
"SPARKLING WINE 75CL MIN 11%ABV")) %>%
filter(date==max(date)-months(3)) %>%
mutate(label=c("Red wine", "Rose wine", "Sparkling wine", "White wine"))
agg_tiff("Outputs/ONSPriceQuotesOffWine.tiff", units="in", width=8, height=6, res=500)
prodmeans %>%
filter(ITEM_DESC %in% c("WHITE WINE- EUROPEAN 75CL",
"RED WINE- EUROPEAN 75CL",
"ROSE WINE-75CL BOTTLE",
"SPARKLING WINE 75CL MIN 11%ABV")) %>%
ggplot(aes(x=date, y=roll_meanprice, colour=ITEM_DESC))+
geom_line(show.legend=FALSE)+
geom_text_repel(data=labels1, aes(x=date, y=roll_meanprice, label=label, colour=ITEM_DESC),
family = "Lato", direction = "y", xlim = c(as.Date("2023-02-10"), NA),
hjust = 0, segment.color = NA, box.padding = .3, show.legend = FALSE)+
scale_x_date(name="", limits=c(NA_Date_, as.Date("2025-06-01")),
breaks=as.Date(c("2010-01-01", "2015-01-01", "2020-01-01")),
labels=c("2010", "2015", "2020"))+
scale_y_continuous(name="Average price per bottle", labels=label_dollar(prefix="£"))+
scale_colour_manual(values=c("#BE294C", "Pink", "#0FB2D3", "#EEEDC4"))+
theme_custom()+
labs(title="Wine prices have been rising recently",
subtitle="Rolling 6-month average price for a 75cl bottle of wine based ONS' price quotes data",
caption="Data from ONS | Plot from @VictimOfMaths")
dev.off()
labels2 <- prodmeans %>%
filter(ITEM_DESC %in% c("CIDER 4.5%-5.5% ABV PINT/BOTTL", "DRAUGHT BITTER (PER PINT)",
"LAGER - PINT 3.4-4.2%", "PREMIUM LAGER - PINT 4.3-7.5%")) %>%
filter(date==max(date)-months(3)) %>%
mutate(label=c("Cider", "Bitter", "Lager", "Premium Lager"))
agg_tiff("Outputs/ONSPriceQuotesOnBeer.tiff", units="in", width=8, height=6, res=500)
prodmeans %>%
filter(ITEM_DESC %in% c("CIDER-PER PINT OR 500-568ML", "CIDER 4.5%-5.5% ABV PINT/BOTTL",
"DRAUGHT BITTER (PER PINT)",
"LAGER - PINT 3.4-4.2%", "PREMIUM LAGER - PINT 4.3-7.5%")) %>%
mutate(ITEM_DESC=if_else(ITEM_DESC=="CIDER-PER PINT OR 500-568ML",
"CIDER 4.5%-5.5% ABV PINT/BOTTL", ITEM_DESC)) %>%
ggplot(aes(x=date, y=roll_meanprice, colour=ITEM_DESC))+
geom_line(show.legend=FALSE)+
geom_text_repel(data=labels2, aes(x=date, y=roll_meanprice, label=label, colour=ITEM_DESC),
family = "Lato", direction = "y", xlim = c(as.Date("2022-07-10"), NA),
hjust = 0, segment.color = NA, box.padding = .3, show.legend = FALSE)+
scale_x_date(name="", limits=c(NA_Date_, as.Date("2024-06-01")),
breaks=as.Date(c("2010-01-01", "2015-01-01", "2020-01-01")),
labels=c("2010", "2015", "2020"))+
scale_y_continuous(name="Average price per pint", labels=label_dollar(prefix="£"))+
scale_colour_paletteer_d("awtools::mpalette")+
theme_custom()+
labs(title="The price of a pint has risen sharply",
subtitle="Rolling 6-month average price for a pint of cider/beer bought for consumption on the premises",
caption="Data from ONS Price Quotes | Plot from @VictimOfMaths")
dev.off()
labels3 <- prodmeans %>%
filter(ITEM_DESC %in% c("LAGER - PINT 3.4-4.2%",
"WINE, PER 175 - 250 ML SERVING",
"VODKA (PER NIP) SPECIFY ML")) %>%
filter(date==max(date)-months(3)) %>%
mutate(label=c("Pint of lager", "Shot of vodka", "Glass of wine"))
agg_tiff("Outputs/ONSPriceQuotesOnSelect.tiff", units="in", width=8, height=6, res=500)
prodmeans %>%
filter(ITEM_DESC %in% c("LAGER - PINT 3.4-4.2%",
"WINE, PER 175 - 250 ML SERVING",
"VODKA (PER NIP) SPECIFY ML")) %>%
ggplot(aes(x=date, y=roll_meanprice, colour=ITEM_DESC))+
geom_line(show.legend=FALSE)+
geom_text_repel(data=labels3, aes(x=date, y=roll_meanprice, label=label, colour=ITEM_DESC),
family = "Lato", direction = "y", xlim = c(as.Date("2022-07-10"), NA),
hjust = 0, segment.color = NA, box.padding = .3, show.legend = FALSE)+
scale_x_date(name="", limits=c(NA_Date_, as.Date("2024-06-01")),
breaks=as.Date(c("2010-01-01", "2015-01-01", "2020-01-01")),
labels=c("2010", "2015", "2020"))+
scale_y_continuous(name="Average price per bottle", labels=label_dollar(prefix="£"))+
scale_colour_manual(values=c("Orange", "#0FB2D3", "#BE294C"))+
theme_custom()+
labs(title="The price of drinking in the pub has risen sharply",
subtitle="Rolling 6-month average prices of selected items bought for consumption on the premises",
caption="Data from ONS' Price Quotes | Plot from @VictimOfMaths")
dev.off()
labels4 <- prodmeans %>%
filter(ITEM_DESC %in% c("BITTER-4CANS-440-500ML", "LAGER 10 - 24 CANS (440-500ML)",
"RED WINE- EUROPEAN 75CL", "APPLE CIDER 4 CAN PK 440-500ML",
"VODKA-70 CL BOTTLE", "WHISKY-70 CL BOTTLE")) %>%
filter(date==max(date)-months(3)) %>%
mutate(label=c("4 cans of cider", "4 cans of bitter", "Slab of lager",
"Bottle of red wine", "Bottle of vodka", "Bottle of whisky"))
agg_tiff("Outputs/ONSPriceQuotesOffSelect.tiff", units="in", width=8, height=6, res=500)
prodmeans %>%
filter(ITEM_DESC %in% c("BITTER-4CANS-440-500ML", "LAGER 10 - 24 CANS (440-500ML)",
"WHITE WINE- EUROPEAN 75CL", "APPLE CIDER 4 CAN PK 440-500ML",
"VODKA-70 CL BOTTLE", "WHISKY-70 CL BOTTLE")) %>%
ggplot(aes(x=date, y=roll_meanprice, colour=ITEM_DESC))+
geom_line(show.legend=FALSE)+
geom_text_repel(data=labels4, aes(x=date, y=roll_meanprice, label=label, colour=ITEM_DESC),
family = "Lato", direction = "y", xlim = c(as.Date("2022-07-10"), NA),
hjust = 0, segment.color = NA, box.padding = .3, show.legend = FALSE)+
scale_x_date(name="", limits=c(NA_Date_, as.Date("2024-06-01")),
breaks=as.Date(c("2010-01-01", "2015-01-01", "2020-01-01")),
labels=c("2010", "2015", "2020"))+
scale_y_continuous(name="Average price per bottle", labels=label_dollar(prefix="£"))+
#scale_colour_manual(values=c("Orange", "#0FB2D3", "#BE294C"))+
theme_custom()+
labs(title="The price of drinking in the pub has risen sharply",
subtitle="Rolling 6-month average prices of selected items bought for consumption on the premises",
caption="Data from ONS' Price Quotes | Plot from @VictimOfMaths")
dev.off()
pricedists <- fulldata %>%
mutate(Country=case_when(region=="Scotland" ~ "Scotland",
region=="Wales" ~ "Wales",
region=="Northern Ireland" ~ "Northern Ireland",
TRUE ~ "England"))
agg_tiff("Outputs/ONSPriceQuotesPintRidgeplot.tiff", units="in", width=8, height=6, res=800)
pricedists %>% filter(ITEM_DESC=="LAGER - PINT 3.4-4.2%" &
Country=="England") %>%
ggplot(aes(x=PRICE, y=as.factor(year(date)), fill = after_stat(x)))+
geom_density_ridges_gradient(rel_min_height = 0.01, show.legend=FALSE)+
scale_x_continuous(name="Price per pint", labels=label_dollar(prefix="£"))+
scale_y_discrete(name="")+
scale_fill_paletteer_c("scico::lajolla")+
theme_custom()+
theme(panel.grid.major.x=element_line(colour="grey95"))+
labs(title="There is much more variation in the price of a pint these days",
subtitle="Distribution of prices of a pint of 3.4-4.2% lager in pubs in England\n",
caption="Data from ONS Price Quotes | Plot by @VictimOfMaths")
dev.off()
agg_tiff("Outputs/ONSPriceQuotesPintStdPrmRidgeplot.tiff", units="in", width=8, height=7, res=800)
pricedists %>% filter(ITEM_DESC %in% c("LAGER - PINT 3.4-4.2%", "PREMIUM LAGER - PINT 4.3-7.5%") &
Country=="England") %>%
ggplot(aes(x=PRICE, y=as.factor(year(date)), fill=ITEM_DESC))+
geom_density_ridges(rel_min_height = 0.01, alpha=0.6)+
scale_x_continuous(name="Price per pint", labels=label_dollar(prefix="£"))+
scale_y_discrete(name="")+
scale_fill_paletteer_d("colorblindr::OkabeIto", name="",
labels=c("Standard lager", "Premium lager"))+
theme_custom()+
theme(legend.position="none")+
theme(panel.grid.major.x=element_line(colour="grey95"),
plot.title=element_markdown())+
labs(title="Prices have changed for both <span style='color:#E69F00'>standard</span> and <span style='color:#56B4E9'>premium</span> lager",
subtitle="Distribution of prices of a pint of standard (3.4-4.2%) or premium (4.3-7.5%) lager in pubs in England\n",
caption="Data from ONS Price Quotes | Plot by @VictimOfMaths")
dev.off()
agg_tiff("Outputs/ONSPriceQuotesWineRidgeplot.tiff", units="in", width=8, height=6, res=800)
pricedists %>% filter(ITEM_DESC %in% c("RED WINE- EUROPEAN 75CL",
"RED WINE- NEW WORLD 75CL",
"RED WINE 75CL BOTTLE",
"ROSE WINE-75CL BOTTLE",
"WHITE WINE- EUROPEAN 75CL",
"WHITE WINE- NEW WORLD 75CL") &
Country=="England") %>%
ggplot(aes(x=PRICE, y=as.factor(year(date)), fill = after_stat(x)))+
geom_density_ridges_gradient(rel_min_height = 0.01, show.legend=FALSE)+
scale_x_continuous(name="Price per bottle", labels=label_dollar(prefix="£"),
limits=c(2.5,15), breaks=c(3,4,5,6,7,8,9,10,11,12,13,14,15))+
scale_y_discrete(name="")+
scale_fill_paletteer_c("pals::ocean.dense")+
theme_custom()+
theme(panel.grid.major.x=element_line(colour="grey95"))+
labs(title="The £7 bottle of wine is the new £5 bottle",
subtitle="Distribution of prices of a bottle of wine in shops in England\n",
caption="Data from ONS Price Quotes | Plot by @VictimOfMaths")
dev.off()
agg_tiff("Outputs/ONSPriceQuotesWineRegMeans.tiff", units="in", width=7, height=5, res=800)
pricedists %>% filter(ITEM_DESC %in% c("RED WINE- EUROPEAN 75CL",
"RED WINE- NEW WORLD 75CL",
"RED WINE 75CL BOTTLE",
"ROSE WINE-75CL BOTTLE",
"WHITE WINE- EUROPEAN 75CL",
"WHITE WINE- NEW WORLD 75CL") &
Country=="England") %>%
group_by(region, date) %>%
summarise(Meanprice=mean(PRICE), .groups="drop") %>%
group_by(region) %>%
mutate(rollmean=roll_mean(Meanprice, n=6, align="center", fill=NA)) %>%
ggplot(aes(x=date, y=rollmean, colour=region))+
geom_line()+
geom_text_repel(data=. %>% filter(date==max(date[!is.na(rollmean)])),
aes(x=max(date[!is.na(rollmean)]), y=rollmean, label = region,
colour=region),
family = "Lato", direction = "y", xlim = c(as.Date("2023-05-01"), as.Date("2027-01-01")),
hjust = 0, segment.color = NA, box.padding = .1, show.legend = FALSE, size=rel(2.5))+
scale_x_date(limits=as.Date(c("2010-01-01", "2027-01-01")), name="",
breaks=as.Date(c("2010-01-01", "2014-01-01", "2018-01-01",
"2022-01-01")),
labels=c("2010", "2014", "2018", "2022"))+
scale_y_continuous(name="Mean price per bottle", labels=label_dollar(prefix="£"))+
scale_colour_paletteer_d("LaCroixColoR::paired")+
theme_custom()+
theme(panel.grid.major.y=element_line(colour="grey95"),
legend.position="off")+
labs(title="London has expensive taste in wine, East Anglia, not so much",
subtitle="Rolling 6-month average price paid for a bottle of wine in shops in English regions",
caption="Data from ONS Price Quotes | Plot by @VictimOfMaths")
dev.off()
|
9e5d030bb0532992fa71c3d12d8d4eec1733457b
|
ed0dd01c900f6927c9095a1aaaac33999554dcb5
|
/openrouteservice.R
|
3eeed88a669756e23e90b50e42b68c80674c9bee
|
[] |
no_license
|
andrewargeros/NiceRide-2020
|
338b6548af33c07724ba84d45aa18858e52197d3
|
757abd59008de8ee252bea17a9cdabfbf823dd45
|
refs/heads/master
| 2022-11-22T23:55:43.070524
| 2020-07-19T18:46:20
| 2020-07-19T18:46:20
| 280,929,060
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,365
|
r
|
openrouteservice.R
|
library(openrouteservice)
ors_api_key("5b3ce3597851110001cf6248749acdcca12b47c4bcc96d81689a334a")
trips = paste0(bike$start_station, "-", bike$end_station)
trips = as.data.frame(table(trips))
lat1 = member.bike[which(member.bike$Station=="Lake Calhoun Center"),"Latitude"]
lon1 = member.bike[which(member.bike$Station=="Lake Calhoun Center"),"Longitude"]
lat2 = member.bike[which(member.bike$Station=="Englewood Ave & N Asbury Street"), "Latitude"]
lon2 = member.bike[which(member.bike$Station=="Englewood Ave & N Asbury Street"), "Longitude"]
lat3 = member.bike[which(member.bike$Station=="IDS Center"), "Latitude"]
lon3 = member.bike[which(member.bike$Station=="IDS Center"), "Longitude"]
lat4 = member.bike[which(member.bike$Station=="Lake Nokomis"), "Latitude"]
lon4 = member.bike[which(member.bike$Station=="Lake Nokomis"), "Longitude"]
coords = list(c(lon1, lat1), c(lon2, lat2))
coords2 = list(c(lon3, lat3), c(lon4, lat4))
directs = ors_directions(coords)
directs2 = ors_directions(coords2)
pt1 = ors_geocode(lon1, lat1)
ors_profile("roadbike")
library(leaflet)
leaflet() %>%
addProviderTiles(providers$Stamen.Toner) %>%
addGeoJSON(directs, fill = F, color = "#EE2737", opacity = 1) %>%
addGeoJSON(directs2, fill = F, color = "#EE2737", opacity = 1) %>%
addGeoJSON(pt1, color = "#EE2737") %>%
fitBBox(directs$bbox)
names(providers)
|
5826f4fa4ca430ce848d659a0f67d211ab2d4a93
|
fcb33243b791f03fb81ce65e3cd1f0a71cc41b4e
|
/TimeSeries_BikeSharing/TimeSeries_BikeSharing.R
|
5f322da2370c9296807630c0c4d34e4858bc2ecc
|
[] |
no_license
|
yesika1/Time_Series_Forecasting
|
d840a72640813937ab5f07c4651dca61b0e50cd5
|
66896f0dabb06714783773c1f48404b717793be6
|
refs/heads/master
| 2021-04-12T02:59:44.500120
| 2018-03-19T23:24:34
| 2018-03-19T23:24:34
| 125,931,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,847
|
r
|
TimeSeries_BikeSharing.R
|
# ---------------------------------------------------------------------------------------- #
#
# TimeSeries_BikeSharing.R
# Author: Yesika Contreras
#
# Exponential smoothing & ARIMA time series models to predict the demand of bike rentals.
#
# Bike Sharing Dataset
# Dataset on the daily number of bicycles checkouts from a bike sharing service,
# being part of the UCI Machine Learning Repository
#
# Process following Ruslana Dalinina, with subtles additions as:
# - Test for additive or multiplicative seasonality
# - Splitting in train and test data before the generation of the models.
# - Adding an Exponential smoothing approach for forecasting
# Ruslana Dalinina approach available: https://www.datascience.com/blog/introduction-to-forecasting-with-arima-in-r-learn-data-science-tutorials
#
# R scripts generated
# Sys.Date() # "2018-03-16"
#
# ---------------------------------------------------------------------------------------- #
#============================================================
# Libraries
#============================================================
library(ggplot2)
library(forecast)
library(tseries)
library(TStools)
#if (!require("devtools")){install.packages("devtools")}
#devtools::install_github("trnnick/TStools")
#============================================================
# Importing data
#============================================================
#setwd('path')
daily_df <- read.csv('day.csv', header=TRUE, stringsAsFactors=FALSE)
head(daily_df)
str(daily_df) #731 obs. of 19 variables
summary(daily_df)
#============================================================
# Cleaning data
#============================================================
# Transforming dteday:date to Date type
daily_df$dteday = as.Date(daily_df$dteday)
# Looking for any outliers, volatility, or irregularities.
sum(is.na(daily_df)) #0
# ploting rental bikes per day (Demand)
#-------------------------------
count_plot <- ggplot(daily_df, aes(dteday, cnt)) +
geom_line() +
scale_x_date('month') +
ggtitle('Daily Bike Checkouts') +
ylab('Rental Bicycle count ')
# cnt: count of total rental bikes including both casual and registered
# Possible Outliers: In some cases, the number of bicycles checked out dropped below 100
# on day and rose to over 4,000 the next day.
#============================================================
# Transforming data to time series (ts)
#============================================================
# Creating a time series object with ts() and passing tsclean()
# tsclean() identifies and replaces outliers using series smoothing and decomposition.
# tsclean() also inputes missing values in the series if there are any
count_ts <- ts(daily_df$cnt)
# ts(df$col, frequency=dataFreq, start=startEntry) #create a time series
# plot (tsclean(count_ts), main= 'Cleaned Rental Bicycle Count' )
daily_df$clean_cnt <- tsclean(count_ts)
clean_count_plot <- ggplot(daily_df, aes(dteday, clean_cnt)) +
geom_line(color= 'cornflowerblue') +
ggtitle('Daily Bike Checkouts II') +
ylab('Cleaned Rental Bicycle Count')
# After cleaning:
# Even after removing outliers, the daily data is still pretty volatile. Visually,
# we could a draw a line through the series tracing its bigger troughs and peaks while
# smoothing out noisy fluctuations (moving average).
# The wider the window of the moving average, the smoother original series becomes.
# Smoothing the series with weekly or monthly moving average (ma)
# -------------------------------
# Taking weekly or monthly moving average,
# smoothing the series into something more stable and therefore predictable
# # High-order (level) aggregation: A simple option is to convert a TS to higher-order, like quarters instead of months, years instead of quarters.
# Centered moving averages:
# Main idea is to average values of TS within k periods of t,
# with m is the moving average order. m=2k+1
# so, with MA=m we have k observations at the beginning and end, using the observation in between for averaging.
# we usually use odd numbers because even numbers is an average of two asymmetric moving averages.
daily_df$cnt_ma7 <- ma(daily_df$clean_cnt, order=7) # order =daily using the clean count with no outliers # generates Na's=6, m7 is centered in the 4 data, which is the first value to appear.
daily_df$cnt_ma30 <- ma(daily_df$clean_cnt, order=30)
ma_plot <- ggplot() +
geom_line(data = daily_df, aes(x = dteday, y = clean_cnt, color = "Daily Counts")) +
geom_line(data = daily_df, aes(x = dteday, y = cnt_ma7, color = "Weekly Moving Average")) +
geom_line(data = daily_df, aes(x = dteday, y = cnt_ma30, color = "Monthly Moving Average")) +
ylab('Bicycle Count')
#Selecting time framework for modeling:
# The higher the window parameter, the more we capture the global trend, but miss on local trends
# We will model the smoothed series of weekly moving average (blue line). ts = cnt_ma7
#============================================================
# Decomposing Data
#============================================================
# Decomposing data is the process of extracting the components of seasonality, trend, and cycle.
# Deconstructing the series can help to understand the behavior
# before building a forecasting model.
# Seasonality: fluctuations in the data related to calendar cycles.
# For example, more people might be riding bikes in the summer, and less during colder months.
# Usually, seasonality is fixed value; for instance, quarter(4) or month of the year.
# Trend component is the overall pattern of the series: Is the number of bikes rented increasing or decreasing over time.
# Cycle component consists of decreasing or increasing patterns that are not seasonal.
# Usually, trend and cycle components are grouped together & estimated using moving averages.
# Residual or error: part of the series that can't be explained/attributed to the components.
# ARIMA models can be fitted to both seasonal and non-seasonal data.
# Seasonal ARIMA requires a more complicated specification of the model structure.
# We will explore how to de-seasonalize the series and use a non-seasonal ARIMA model.
# Decomposing time series using stl().
# -------------------------------
# It calculates the seasonal component of the series using smoothing, and
# adjusts the original series by subtracting seasonality
# stl() by default assumes additive model structure.
# Use allow.multiplicative.trend=TRUE to incorporate the multiplicative model.
# Using the smoothed ts (cnt_ma7) we generate a new ts with 30 days frequency before decomposing
count_ma7 <- ts(na.omit(daily_df$cnt_ma7), frequency=30) #Set frequency to 12 for monthly, 4 for quaterly data, & 30 for daily.
decomp = stl(count_ma7, s.window="periodic") # s.window is the seasonal window=speed of seasonal changes, "periodic" is for constant seasonal effects.
plot(decomp, main='Decomposed time series')
# Calculate de-seasonal component using seasadj()
# ------------------------------
deseasonal_cnt <- seasadj(decomp) # de-seasonal time series. We will modeling with this set.
# Testing Stationarity with the augmented Dickey-Fuller test
# -------------------------------
# Fitting an ARIMA model requires the series to be stationary.
# A series is stationary when its mean, variance, and autocovariance are time invariant.
# Modeling stable series with consistent properties involves less uncertainty.
# The augmented Dickey-Fuller (ADF) test for stationarity.
# The null hypothesis assumes that the series is non-stationary (there is a presence of a trend component).
adf.test(count_ma7, alternative = "stationary") # on ts with freq 30 before decomp
# Dickey-Fuller = -0.2557, Lag order = 8, p-value = 0.99
# alternative hypothesis: stationary
# Since p-value = 0.99 > 0.05 we cannot reject the hypotesis that there is not difference (ho).
# Then, A formal ADF test does not reject the null hypothesis of non-stationarity.
# The bicycle data is non-stationary. The average number of bike checkouts changes over time.
# We can observe the trend in the decomposed plot
# Trend means a consistent slope direction
# (1) Additive trend:
# Y(t) = Trend(t) + Season(t) + Error(t)
# (2) Multiplicative trend:
# Y(t) = Trend(t) * Season(t) * Error(t)
# (3) Stationarity: no observed trend, only errors.
# Testing multiplicative or Additive seasonality
# ---------------------------------
test_multiplicative <- TStools::mseastest(count_ma7)
test_multiplicative$is.multiplicative # FALSE
# Then the ts is additive
#============================================================
# Autocorrelations Plots and Choosing Model Order
#============================================================
# Usually, non-stationary series can be corrected by a simple transformation such as differencing.
# Differencing the series can help in removing its trend or cycles.
# order of differencing: The number of differences performed is represented by the d component of ARIMA.
# Autocorrelation plots (ACF) visual tool in determining whether a series is stationary.
# If the series is correlated with its lags then, generally, there are some trend or seasonal components.
# ACF plots display correlation between a series and its lags.
# ACF plots can help in determining the order of the M A (q) model.
# Partial autocorrelation plots (PACF), display correlation between a variable and its lags
# that is not explained by previous lags. PACF plots are useful when determining the order of the AR(p) model.
par(mfrow=c(1,2))
Acf(count_ma7, main='Autocorrelation plot (ACF)')
Pacf(count_ma7, main='Partial autocorrelation plot (PACF)')
dev.off()
# ACF plot shows that the ts has significant autocorrelations with many lags.
# PACF plot only shows a spike at lags 1 and 7.
# Then the ACF autocorrelations could be due to carry-over correlation from the first or early lags
# Differencing until getting stationarity
# -------------------------------
# Start differencing the deseasonal ts with the order of d = 1 and re-evaluate if further differencing is needed.
count_differenced1 = diff(deseasonal_cnt, differences = 1)
#plotting differenced ts
plot(count_differenced1, main = 'Differencing the De-seasonalized Time Series ')
# Evaluating stationarity
adf.test(count_differenced1, alternative = "stationary")
# Augmented Dickey-Fuller Test, data: count_differenced1
# Dickey-Fuller = -9.9255, Lag order = 8, p-value = 0.01
# alternative hypothesis: stationary
# Since p-value = 0.01 < 0.05 we can reject the hypotesis that there is not difference (ho).
# Then, A formal ADF test rejects the null hypothesis of non-stationarity on differenced data.
# Now the data transformed looks stationary, there is not visible strong trend.
# Thus, Differencing of order 1 terms is sufficient and should be included in the model.
# Choosing p & q for the model:
# -------------------------------
# spikes at particular lags of the differenced series can help to choose of p or q for our model.
par(mfrow=c(1,2))
Acf(count_differenced1, main='ACF for Differenced Time Series')
Pacf(count_differenced1, main='PACF for Differenced Time Series')
dev.off()
# From ACF: There are significant auto correlations at lag 1, 2 and 7,8.
# From PACF: Partial correlation plots show a significant spike at lag 1 and 7.
# Conclusion: test models with AR or MA components of order 1, 2, or 7.
# A spike at lag 7 might suggest that there is a seasonal pattern present, perhaps as day of the week.
#============================================================
# Fitting an ARIMA model
#============================================================
# arima(): Manually specify the order parameters of the model
# auto.arima(): automatically generate a set of optimal (p, d, q) that optimizes model fit criteria.
# auto.arima() also allows the user to specify maximum order for (p, d, q), which is set to 5 by default.
# Splitting data in training and testing using the window() function
# ----------------
# We are going to leave as testing set the same window that we want to predit (~30 days)
test_Arima <- window(ts(deseasonal_cnt), start=700)
train_Arima <- window(ts(deseasonal_cnt), start=1, end=699)
#plot(train_Arima)
model_Arima_train <- auto.arima(train_Arima, seasonal=TRUE) # there is presence of seasonal component in original data
# Series: train_ts
# ARIMA(2,1,0)
# Coefficients:
# ar1 ar2
# 0.2557 0.0870
#s.e. 0.0377 0.0378
#sigma^2 estimated as 25631: log likelihood=-4532.36
#AIC=9070.72 AICc=9070.76 BIC=9084.37
# Arima (p, d, q) components= p: AR order, d: degree of differencing, q: MA order
# A good way to think about it is (AR, I, MA)
# Y = (Auto-Regressive Parameters) + (Moving Average Parameters), the 'I' part of the model (the Integrative part)
# Arima(2,1,0): the model uses an autoregressive term of second lag,
# incorporates differencing of degree 1, and a moving average model of order 0.
# AR(1) coefficient p tells us that the next value in the series is taken as a dampened previous value by a factor of x
# (AIC): Akaike information criteria
# (BIC): Baysian information criteria
#============================================================
# Model Evaluation
#============================================================
# Examining ACF and PACF plots for model residuals.
# If model order parameters and structure are correctly specified, we would expect no significant autocorrelations present.
tsdisplay(residuals(model_Arima_train), lag.max=31, main='ARIMA (2,1,0) Model Residuals')
# There is a clear pattern present in ACF/PACF and model residuals plots repeating at lag 7.
# This suggests that our model may be better off with a different specification, such as p = 7 or q = 7.
model_Arima2 <- arima(train_Arima, order=c(2,1,7))
# Call: arima(x = train_ts, order = c(2, 1, 7))
# Coefficients:
# ar1 ar2 ma1 ma2 ma3 ma4 ma5 ma6 ma7
# 0.2564 0.0188 0.1298 0.1302 0.1017 0.1028 0.1083 0.1293 -0.8545
#s.e. 0.0459 0.0435 0.0282 0.0271 0.0271 0.0277 0.0256 0.0260 0.0307
#sigma^2 estimated as 13942: log likelihood = -4330.96, aic = 8681.93
tsdisplay(residuals(model_Arima2), lag.max=31, main='ARIMA (2,1,7) Model Residuals')
# The model uses an autoregressive term of second lag,
# incorporates differencing of degree 1, and a moving average model of order 7.
# Result: There are no significant autocorrelations present in the residuals. The model was correctly specified.
accuracy(model_Arima2)
# ME RMSE MAE MPE MAPE MASE ACF1
# Training set 4.768835 117.9898 87.72782 0.125315 2.191441 0.7040459 -0.0004262534
#============================================================
# Forecasting
#============================================================
# We can specify forecast horizon h periods ahead for predictions to be made.
# We are going to predict the values for the next month, h= 25
forecast_test <- forecast(model_Arima2,h=25)
plot(forecast_test, main= ' Forecasting using Arima Model')
lines(ts(deseasonal_cnt))
# forecast estimates are provided with confidence bounds: 80% confidence limits shaded in darker blue,
# and 95% in lighter blue.
# Longer term forecasts will usually have more uncertainty,which is reflected in the wider confidence bounds when time progress.
# The pattern in confidence bounds may signal the need for a more stable model (lower expected error associated with the estimations).
# Future work:
# -------------
# Generating an exponential smoothing model, would help make the model more accurate using a weighted combinations of seasonality, trend, and historical values to make predictions.
# On the other hand, daily bicycle demand probably dependend on other factors (weather, holidays, time of the day) that could be addressed with an ARMAX or dynamic regression models.
#============================================================
# Exponential Smoothing Approach - Holt-Winters
#============================================================
# to triple exponential (Holt-Winters) that addresses level, trend and season.
# For all three components, select model types with three letters:
# A = additive
# M = multiplicative
# N = none
# Z = automatic selection
test_Hw <- ts(count_ma7[c(700:725)],frequency = 30)
train_Hw <- ts(count_ma7[-c(700:725)],frequency = 30)
# using ets: Error, Trend, Seasonality
model_HoltWinters <- HoltWinters(train_Hw,seasonal="additive")
# Holt-Winters exponential smoothing with trend and additive seasonal component.
#Call: HoltWinters(x = train_Hw, seasonal = "additive")
# Smoothing parameters:
# alpha: 0.9431843
# beta : 0.002067271
# gamma: 1
tsdisplay(residuals(model_HoltWinters), lag.max=31, main='Holt-winters Model Residuals')
qqnorm(model_HoltWinters$residuals, main="Q-Q plot. Residuals model Holt-Winters")
qqline(model_HoltWinters$residuals, col= "orange")
accuracy(model_HoltWinters)
# ME RMSE MAE MPE MAPE MASE ACF1
# Training set 3.103773 164.8497 118.5047 0.1005193 2.937568 0.9541444 0.04234134
forecast_HoltWinters <- forecast(model_HoltWinters2,h=25)
plot(forecast_HoltWinters, main= ' Forecasting using HoltWinters')
lines(ts(count_ma7))
#============================================================
# Comparing models
#============================================================
#Plotting Forecasting:
par(mfrow=c(2,1))
plot(forecast_test, main= ' Forecasting using Arima Model')
lines(ts(deseasonal_cnt))
plot(forecast_HoltWinters, main= ' Forecasting using HoltWinters Model')
lines((count_ma7))
dev.off()
|
d98a2a57a63872068c0a705af9fbc2f883b7e5bb
|
1204e461bc3a2756b2205c01fcb3e847e9a262c8
|
/R/interpolate.R
|
a59e2af902adffe38d6353a5895c0fe7976d302c
|
[
"MIT"
] |
permissive
|
mrc-ide/cinterpolate
|
936fb3db19bad15c995d92ea04748dcfc7ed2343
|
c48d493512eb7460f3dfec062c8b74de5917a7ff
|
refs/heads/master
| 2021-10-26T08:56:09.544985
| 2019-04-11T17:07:08
| 2019-04-11T17:07:08
| 105,753,847
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,519
|
r
|
interpolate.R
|
##' Create an interpolation function, using the same implementation as
##' would be available from C code. This will give very similar
##' answers to R's \code{\link{splinefun}} function. This is not the
##' primary intended use of the package, which is mostly designed for
##' use from C/C++. This function primarily exists for testing this
##' package, and for exploring the interface without writing C code.
##'
##' @title Create an interpolation function
##'
##' @param x Independent variable
##'
##' @param y Dependent variable
##'
##' @param type Character string indicating the interpolation type
##' ("constant", "linear" or "spline").
##'
##' @param scalar Return a function that will compute only a single
##' \code{x} input at a time. This is more similar to the C
##' interface and is equivalent to dropping the first dimension of
##' the output.
##'
##' @param fail_on_extrapolate Logical, indicating if extrapolation
##' should cause an failure (rather than an NA value)
##'
##' @return A function that can be used to interpolate the function(s)
##' defined by \code{x} and \code{y} to new values of {x}.
##'
##' @export
##' @useDynLib cinterpolate, .registration = TRUE
##'
##' @examples
##'
##' # Some data to interpolate
##' x <- seq(0, 8, length.out = 20)
##' y <- sin(x)
##' xx <- seq(min(x), max(x), length.out = 500)
##'
##' # Spline interpolation
##' f <- cinterpolate::interpolation_function(x, y, "spline")
##' plot(f(xx) ~ xx, type = "l")
##' lines(sin(xx) ~ xx, col = "grey", lty = 2)
##' points(y ~ x, col = "red", pch = 19, cex = 0.5)
##'
##' # Linear interpolation
##' f <- cinterpolate::interpolation_function(x, y, "linear")
##' plot(f(xx) ~ xx, type = "l")
##' lines(sin(xx) ~ xx, col = "grey", lty = 2)
##' points(y ~ x, col = "red", pch = 19, cex = 0.5)
##'
##' # Piecewise constant interpolation
##' f <- cinterpolate::interpolation_function(x, y, "constant")
##' plot(f(xx) ~ xx, type = "s")
##' lines(sin(xx) ~ xx, col = "grey", lty = 2)
##' points(y ~ x, col = "red", pch = 19, cex = 0.5)
##'
##' # Multiple series can be interpolated at once by providing a
##' # matrix for 'y'. Each series is interpolated independently but
##' # simultaneously.
##' y <- cbind(sin(x), cos(x))
##' f <- cinterpolate::interpolation_function(x, y, "spline")
##' matplot(xx, f(xx), type = "l", lty = 1)
interpolation_function <- function(x, y, type, scalar = FALSE,
fail_on_extrapolate = FALSE) {
if (!is.character(type) || length(type) != 1L || is.na(type)) {
stop("Expected 'type' to be a scalar character")
}
dim <- dim(y)
if (is.null(dim)) {
is_array <- FALSE
} else {
is_array <- TRUE
if (length(dim) > 2) {
y <- matrix(y, length(x))
}
dim <- dim[-1L]
}
is_array <- !is.null(dim)
ptr <- .Call(Cinterpolate_prepare, as_numeric(x), as_numeric(y), type,
fail_on_extrapolate)
if (scalar) {
ret <- function(x) {
if (length(x) != 1L) {
stop("Expected a single 'x' value")
}
y <- .Call(Cinterpolate_eval, ptr, as_numeric(x))
if (is_array) {
dim(y) <- dim
}
y
}
} else {
ret <- function(x) {
y <- .Call(Cinterpolate_eval, ptr, as_numeric(x))
if (is_array) {
dim(y) <- c(length(x), dim)
}
y
}
}
attr(ret, "info") <- function() .Call(Cinterpolate_data_info, ptr)
ret
}
as_numeric <- function(a) {
if (storage.mode(a) == "integer") {
storage.mode(a) <- "numeric"
}
a
}
|
a975795d1c2f6bfa34a48d165bbd28e496529bea
|
0810a1fd288ac8616ccf5e8ad40b23899e2a87a4
|
/R/MAPE.R
|
9e2c41fdb4ce8c35c65da6ba5a863dca88165195
|
[] |
no_license
|
limingyuan2322/MathQueryFunction
|
78a716244e93f12f3e0e0362cd6372d8fe85f30a
|
8087e6b773fed9de82e77c804a0ebf4d4bece7f6
|
refs/heads/main
| 2023-01-29T19:51:20.252874
| 2020-12-13T09:43:36
| 2020-12-13T09:43:36
| 321,027,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 752
|
r
|
MAPE.R
|
#' Mean Absolute Percentage Error (MAPE)
#'
#' @description Measures absolute error as a percentage of the forecast.
#' Function don't have default value, setting by yourself.
#'
#' @param Real This is a numeric vector, it must be the smae lenght with
#' Forecast vector.
#'
#' @param Forecast This is a numeric vector, it must be the smae lenght with
#' Real vector.
#'
#' @return
#' @export
#'
#' @examples Mean_Absolute_Percentage_Error(Numeric_vector1,Numeric_vector2)
Mean_Absolute_Percentage_Error <- function (Real,Forecast) {
absPecent <- c()
for (i in 1:length(Real)){
absPecent[i] = abs((Real[i] - Forecast[i])/Real[i])
}
tot = sum(absPecent)
MAPE = tot / (100*length (tot))
print(MAPE)
}
|
af4381bfe5b1d83f5f07b9b8446caa4173707117
|
2fb2c0914b9bdd759620a66131b1875378f89b06
|
/man/get_all_crude.Rd
|
3ae86160630ff188d47b4307f8718cb97216c095
|
[
"MIT"
] |
permissive
|
owenjonesuob/BANEScarparking
|
5e4a82ee3152d480c295a9b56b5e761c495be4a1
|
f9b92520730423655bab24bca4cb8d8d3911d6ba
|
refs/heads/master
| 2021-01-12T01:18:21.813611
| 2020-04-13T14:41:45
| 2020-04-13T14:49:06
| 78,367,955
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 834
|
rd
|
get_all_crude.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_records.R
\name{get_all_crude}
\alias{get_all_crude}
\title{Download all raw records from Bath: Hacked datastore}
\usage{
get_all_crude()
}
\value{
The full dataset of car parking records
}
\description{
Reads the entire CSV file found on the Bath: Hacked datastore
(\url{http://bit.ly/2i3Y1uF}). The data frame created can subsequently be
updated using \code{\link{refuel_crude}}.\cr
\emph{\strong{Warning:} The file is very large! This may take a while to
run, depending on your internet connection.}
}
\examples{
\dontrun{
raw_data <- get_all_crude()
str(raw_data)
}
}
\seealso{
\itemize{
\item \code{\link{refuel_crude}} for updating raw records
\item \code{\link{refuel}} for updating data already processed with
\code{\link{refine}}
}
}
|
59f23a0266da557af3aea78d36f93f5d171137f7
|
df77a46155382712d5b0c5d391f658fd6341e311
|
/concepts/great_learning/demo1.R
|
172d8e421b5b7cec12c8a3911f3500b7bf8d9ae7
|
[] |
no_license
|
Candy128x/r-concept
|
fb9fa16d54f2aef229dc8c900f9e145ed5cc7e0e
|
5402d177f8079b6591716aa04933e1bc9ea68ec4
|
refs/heads/master
| 2022-12-31T14:31:09.144400
| 2020-10-16T10:32:35
| 2020-10-16T10:32:35
| 302,610,103
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,219
|
r
|
demo1.R
|
# Variable: tem store data
a = "mobile"
print(a)
a = "pen"
print(a)
a = "makeup"
print(a)
cat("\n--------------------------------------------------")
cat("\n Data Structure")
cat("\n\n Vector Demo:")
# - Homogenious(all element has same data types) Single Dimention Data Frame
vec1 <- c(1,2,3)
cat("\n vec1 =", vec1)
vec2 <- c("a","b","c")
cat("\n vec2 =", vec2)
cat("\n vec2 -> class =", class(vec2) )
vec3 <- c(T,F,T)
cat("\n vec3 =", vec3)
mixbag1 <- c(1,T,2,F)
cat("\n mixbag1 =", mixbag1)
cat("\n mixbag1 -> class =", class(mixbag1) ) # Logical operend convert into numerical, Bcoz numerical precidance higher then logical
mixbag1b <- c(1,T,2,F,3)
cat("\n mixbag1b =", mixbag1b)
mixbag1c <- c(1,T,2,F,T,F)
cat("\n mixbag1c =", mixbag1c)
mixbag2 <- c(1,"a",2,"b")
cat("\n mixbag2 =", mixbag2) # Converts into Character values has higher predience compare to numerical
cat("\n mixbag2 -> class =", class(mixbag2) )
cat("\n access 1st element -> mixbag2[1] =", mixbag2[1])
cat("\n access 3rd element -> mixbag2[3] =", mixbag2[3])
cat("\n access from 1 to 3 element -> mixbag2[1:3] =", mixbag2[1:3])
mixbag3 <- c(1,"a",T)
cat("\n mixbag3 =", mixbag3)
cat("\n mixbag3 -> class =", class(mixbag3) ) # Precidence(Low to Higher): Logical > Numerical > Character
cat("\n\n List Demo:")
# Single Dimentional Hetrogenous(We able to Store diff data types of element) Data Structure
l1 <- list(1,"a",TRUE)
cat("\n l1 =")
# paste("\n l1 =", l1)
print(l1) # It's divide into compart/component
print( class(l1[[1]]) )
print( class(l1[[2]]) )
print( class(l1[[3]]) )
l2 <- list(c(1,2,3), c("a","b","c"), c(T,F,T), l1)
cat("\n List of Vector =")
print(l2)
print(l2[[4]][2])
cat("\n\n Matrix Demo:")
m1 <- matrix(c(1,2,3,4,5,6))
cat("\n m1 =")
print(m1)
m1b <- matrix(c(1,2,3,4,5,6), nrow=2, ncol=3)
cat("\n m1b =")
print(m1b)
m1c <- matrix(c(1,2,3,4,5,6), nrow=2, ncol=3, byrow=T)
cat("\n m1c =")
print(m1c)
cat("\n Access element: m1c[2,1] =", m1c[2,1])
cat("\n Access element: m1c[1,3] =", m1c[1,3])
cat("\n\n Array Demo:")
# Multi Dimentional, Homogeneous Data Structure
vec1 = c(1,2,3,4,5,6,7,8,9,10,11,12)
cat("\n vec1 =", vec1)
cat("\n arr1 =")
arr1 = array(vec1, dim=c(2,3,2))
print(arr1)
cat("\n Access element =", arr1[1,2,2])
cat("\n Access element =", arr1[2,3,1])
cat("\n\n Factor Demo:")
# - It set a level on each element, This feature helps to build ML model
# - Level maintain by alphabetical acending order
fruits <- c("apple", "banana", "chikoo")
cat("\n fruits =", fruits)
cat("\n fruits -> factor =", as.factor(fruits))
cat("\n fruits -> factor =\n")
print(as.factor(fruits))
names <- c('chintu', 'fazal', 'afzal')
cat("\n names =", names)
cat("\n names -> factor =",as.factor(names))
cat("\n names -> factor =\n")
print(as.factor(names))
cat("\n\n Data Frame Demo:")
# - Two dimentional, Hetrogeneous data structure
# - Presents data in Table, CSV form
fruits_name <- c("apple", "banana", "chikoo")
fruits_cost <- c(10,20,30)
df1 <- data.frame(fruit_name=fruits_name, fruit_cost=fruits_cost)
cat("\n df1 =\n")
print(df1)
cat("\n Inndividual column extract =\n")
print(df1$fruit_name)
cat("\n--------------------------------------------------")
cat("\n In Build Functions")
cat("\n\n iris inbuilt data frame: \n")
print(iris)
cat("\n View: \n")
View(iris) # To view data frame
cat("\n str: \n")
str(iris) # To see structure
cat("\n head: \n")
head(iris) # Show top 6 records
head(iris, 3) # Show top 3 records
head(iris, 12) # Show top 12 records
cat("\n tail: \n")
tail(iris) # Show bottom, last 6 records. Opposite of head
tail(iris, 10) # Show last 10 records
cat("\n table: \n")
# table(iris)
table(iris$Species)
cat("\n min: \n")
min(iris$Sepal.Length) # Get minimum value from column Sepal
cat("\n min: \n")
max(iris$Sepal.Length) # Get maximum value from column Sepal
cat("\n mean: \n")
mean(iris$Sepal.Length) # Get average value from column Sepal
cat("\n range: \n")
range(iris$Sepal.Length) # Get min & max value from column Sepal
cat("\n--------------------------------------------------")
cat("\n Disicision Making Statement")
writeLines("\n")
# Multi Line Copmments.
if(FALSE) {
'
Execute by: Rscript demo1.R
OutPut:
ashish@ashish-Vostro-3478:.../tutorialspoint$ Rscript demo1.R
[1] "mobile"
[1] "pen"
[1] "makeup"
--------------------------------------------------
Data Structure
Vector Demo:
vec1 = 1 2 3
vec2 = a b c
vec2 -> class = character
vec3 = TRUE FALSE TRUE
mixbag1 = 1 1 2 0
mixbag1 -> class = numeric
mixbag1b = 1 1 2 0 3
mixbag1c = 1 1 2 0 1 0
mixbag2 = 1 a 2 b
mixbag2 -> class = character
access 1st element -> mixbag2[1] = 1
access 3rd element -> mixbag2[3] = 2
access from 1 to 3 element -> mixbag2[1:3] = 1 a 2
mixbag3 = 1 a TRUE
mixbag3 -> class = character
List Demo:
l1 =[[1]]
[1] 1
[[2]]
[1] "a"
[[3]]
[1] TRUE
[1] "numeric"
[1] "character"
[1] "logical"
List of Vector =[[1]]
[1] 1 2 3
[[2]]
[1] "a" "b" "c"
[[3]]
[1] TRUE FALSE TRUE
[[4]]
[[4]][[1]]
[1] 1
[[4]][[2]]
[1] "a"
[[4]][[3]]
[1] TRUE
[[1]]
[1] "a"
Matrix Demo:
m1 = [,1]
[1,] 1
[2,] 2
[3,] 3
[4,] 4
[5,] 5
[6,] 6
m1b = [,1] [,2] [,3]
[1,] 1 3 5
[2,] 2 4 6
m1c = [,1] [,2] [,3]
[1,] 1 2 3
[2,] 4 5 6
Access element: m1c[2,1] = 4
Access element: m1c[1,3] = 3
Array Demo:
vec1 = 1 2 3 4 5 6 7 8 9 10 11 12
arr1 =, , 1
[,1] [,2] [,3]
[1,] 1 3 5
[2,] 2 4 6
, , 2
[,1] [,2] [,3]
[1,] 7 9 11
[2,] 8 10 12
Access element = 9
Access element = 6
Factor Demo:
fruits = apple banana chikoo
fruits -> factor = 1 2 3
fruits -> factor =
[1] apple banana chikoo
Levels: apple banana chikoo
names = chintu fazal afzal
names -> factor = 2 3 1
names -> factor =
[1] chintu fazal afzal
Levels: afzal chintu fazal
Data Frame Demo:
df1 =
fruit_name fruit_cost
1 apple 10
2 banana 20
3 chikoo 30
Inndividual column extract =
[1] "apple" "banana" "chikoo"
--------------------------------------------------
In Build Functions
iris inbuilt data frame:
Sepal.Length Sepal.Width Petal.Length Petal.Width Species
1 5.1 3.5 1.4 0.2 setosa
2 4.9 3.0 1.4 0.2 setosa
3 4.7 3.2 1.3 0.2 setosa
4 4.6 3.1 1.5 0.2 setosa
5 5.0 3.6 1.4 0.2 setosa
6 5.4 3.9 1.7 0.4 setosa
7 4.6 3.4 1.4 0.3 setosa
8 5.0 3.4 1.5 0.2 setosa
9 4.4 2.9 1.4 0.2 setosa
10 4.9 3.1 1.5 0.1 setosa
11 5.4 3.7 1.5 0.2 setosa
12 4.8 3.4 1.6 0.2 setosa
13 4.8 3.0 1.4 0.1 setosa
14 4.3 3.0 1.1 0.1 setosa
15 5.8 4.0 1.2 0.2 setosa
16 5.7 4.4 1.5 0.4 setosa
17 5.4 3.9 1.3 0.4 setosa
18 5.1 3.5 1.4 0.3 setosa
19 5.7 3.8 1.7 0.3 setosa
20 5.1 3.8 1.5 0.3 setosa
21 5.4 3.4 1.7 0.2 setosa
22 5.1 3.7 1.5 0.4 setosa
23 4.6 3.6 1.0 0.2 setosa
24 5.1 3.3 1.7 0.5 setosa
25 4.8 3.4 1.9 0.2 setosa
26 5.0 3.0 1.6 0.2 setosa
27 5.0 3.4 1.6 0.4 setosa
28 5.2 3.5 1.5 0.2 setosa
29 5.2 3.4 1.4 0.2 setosa
30 4.7 3.2 1.6 0.2 setosa
31 4.8 3.1 1.6 0.2 setosa
32 5.4 3.4 1.5 0.4 setosa
33 5.2 4.1 1.5 0.1 setosa
34 5.5 4.2 1.4 0.2 setosa
35 4.9 3.1 1.5 0.2 setosa
36 5.0 3.2 1.2 0.2 setosa
37 5.5 3.5 1.3 0.2 setosa
38 4.9 3.6 1.4 0.1 setosa
39 4.4 3.0 1.3 0.2 setosa
40 5.1 3.4 1.5 0.2 setosa
41 5.0 3.5 1.3 0.3 setosa
42 4.5 2.3 1.3 0.3 setosa
43 4.4 3.2 1.3 0.2 setosa
44 5.0 3.5 1.6 0.6 setosa
45 5.1 3.8 1.9 0.4 setosa
46 4.8 3.0 1.4 0.3 setosa
47 5.1 3.8 1.6 0.2 setosa
48 4.6 3.2 1.4 0.2 setosa
49 5.3 3.7 1.5 0.2 setosa
50 5.0 3.3 1.4 0.2 setosa
51 7.0 3.2 4.7 1.4 versicolor
52 6.4 3.2 4.5 1.5 versicolor
53 6.9 3.1 4.9 1.5 versicolor
54 5.5 2.3 4.0 1.3 versicolor
55 6.5 2.8 4.6 1.5 versicolor
56 5.7 2.8 4.5 1.3 versicolor
57 6.3 3.3 4.7 1.6 versicolor
58 4.9 2.4 3.3 1.0 versicolor
59 6.6 2.9 4.6 1.3 versicolor
60 5.2 2.7 3.9 1.4 versicolor
61 5.0 2.0 3.5 1.0 versicolor
62 5.9 3.0 4.2 1.5 versicolor
63 6.0 2.2 4.0 1.0 versicolor
64 6.1 2.9 4.7 1.4 versicolor
65 5.6 2.9 3.6 1.3 versicolor
66 6.7 3.1 4.4 1.4 versicolor
67 5.6 3.0 4.5 1.5 versicolor
68 5.8 2.7 4.1 1.0 versicolor
69 6.2 2.2 4.5 1.5 versicolor
70 5.6 2.5 3.9 1.1 versicolor
71 5.9 3.2 4.8 1.8 versicolor
72 6.1 2.8 4.0 1.3 versicolor
73 6.3 2.5 4.9 1.5 versicolor
74 6.1 2.8 4.7 1.2 versicolor
75 6.4 2.9 4.3 1.3 versicolor
76 6.6 3.0 4.4 1.4 versicolor
77 6.8 2.8 4.8 1.4 versicolor
78 6.7 3.0 5.0 1.7 versicolor
79 6.0 2.9 4.5 1.5 versicolor
80 5.7 2.6 3.5 1.0 versicolor
81 5.5 2.4 3.8 1.1 versicolor
82 5.5 2.4 3.7 1.0 versicolor
83 5.8 2.7 3.9 1.2 versicolor
84 6.0 2.7 5.1 1.6 versicolor
85 5.4 3.0 4.5 1.5 versicolor
86 6.0 3.4 4.5 1.6 versicolor
87 6.7 3.1 4.7 1.5 versicolor
88 6.3 2.3 4.4 1.3 versicolor
89 5.6 3.0 4.1 1.3 versicolor
90 5.5 2.5 4.0 1.3 versicolor
91 5.5 2.6 4.4 1.2 versicolor
92 6.1 3.0 4.6 1.4 versicolor
93 5.8 2.6 4.0 1.2 versicolor
94 5.0 2.3 3.3 1.0 versicolor
95 5.6 2.7 4.2 1.3 versicolor
96 5.7 3.0 4.2 1.2 versicolor
97 5.7 2.9 4.2 1.3 versicolor
98 6.2 2.9 4.3 1.3 versicolor
99 5.1 2.5 3.0 1.1 versicolor
100 5.7 2.8 4.1 1.3 versicolor
101 6.3 3.3 6.0 2.5 virginica
102 5.8 2.7 5.1 1.9 virginica
103 7.1 3.0 5.9 2.1 virginica
104 6.3 2.9 5.6 1.8 virginica
105 6.5 3.0 5.8 2.2 virginica
106 7.6 3.0 6.6 2.1 virginica
107 4.9 2.5 4.5 1.7 virginica
108 7.3 2.9 6.3 1.8 virginica
109 6.7 2.5 5.8 1.8 virginica
110 7.2 3.6 6.1 2.5 virginica
111 6.5 3.2 5.1 2.0 virginica
112 6.4 2.7 5.3 1.9 virginica
113 6.8 3.0 5.5 2.1 virginica
114 5.7 2.5 5.0 2.0 virginica
115 5.8 2.8 5.1 2.4 virginica
116 6.4 3.2 5.3 2.3 virginica
117 6.5 3.0 5.5 1.8 virginica
118 7.7 3.8 6.7 2.2 virginica
119 7.7 2.6 6.9 2.3 virginica
120 6.0 2.2 5.0 1.5 virginica
121 6.9 3.2 5.7 2.3 virginica
122 5.6 2.8 4.9 2.0 virginica
123 7.7 2.8 6.7 2.0 virginica
124 6.3 2.7 4.9 1.8 virginica
125 6.7 3.3 5.7 2.1 virginica
126 7.2 3.2 6.0 1.8 virginica
127 6.2 2.8 4.8 1.8 virginica
128 6.1 3.0 4.9 1.8 virginica
129 6.4 2.8 5.6 2.1 virginica
130 7.2 3.0 5.8 1.6 virginica
131 7.4 2.8 6.1 1.9 virginica
132 7.9 3.8 6.4 2.0 virginica
133 6.4 2.8 5.6 2.2 virginica
134 6.3 2.8 5.1 1.5 virginica
135 6.1 2.6 5.6 1.4 virginica
136 7.7 3.0 6.1 2.3 virginica
137 6.3 3.4 5.6 2.4 virginica
138 6.4 3.1 5.5 1.8 virginica
139 6.0 3.0 4.8 1.8 virginica
140 6.9 3.1 5.4 2.1 virginica
141 6.7 3.1 5.6 2.4 virginica
142 6.9 3.1 5.1 2.3 virginica
143 5.8 2.7 5.1 1.9 virginica
144 6.8 3.2 5.9 2.3 virginica
145 6.7 3.3 5.7 2.5 virginica
146 6.7 3.0 5.2 2.3 virginica
147 6.3 2.5 5.0 1.9 virginica
148 6.5 3.0 5.2 2.0 virginica
149 6.2 3.4 5.4 2.3 virginica
150 5.9 3.0 5.1 1.8 virginica
View:
str:
"data.frame": 150 obs. of 5 variables:
$ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
$ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
$ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
$ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
$ Species : Factor w/ 3 levels "setosa","versicolor",..: 1 1 1 1 1 1 1 1 1 1 ...
head:
Sepal.Length Sepal.Width Petal.Length Petal.Width Species
1 5.1 3.5 1.4 0.2 setosa
2 4.9 3.0 1.4 0.2 setosa
3 4.7 3.2 1.3 0.2 setosa
4 4.6 3.1 1.5 0.2 setosa
5 5.0 3.6 1.4 0.2 setosa
6 5.4 3.9 1.7 0.4 setosa
Sepal.Length Sepal.Width Petal.Length Petal.Width Species
1 5.1 3.5 1.4 0.2 setosa
2 4.9 3.0 1.4 0.2 setosa
3 4.7 3.2 1.3 0.2 setosa
Sepal.Length Sepal.Width Petal.Length Petal.Width Species
1 5.1 3.5 1.4 0.2 setosa
2 4.9 3.0 1.4 0.2 setosa
3 4.7 3.2 1.3 0.2 setosa
4 4.6 3.1 1.5 0.2 setosa
5 5.0 3.6 1.4 0.2 setosa
6 5.4 3.9 1.7 0.4 setosa
7 4.6 3.4 1.4 0.3 setosa
8 5.0 3.4 1.5 0.2 setosa
9 4.4 2.9 1.4 0.2 setosa
10 4.9 3.1 1.5 0.1 setosa
11 5.4 3.7 1.5 0.2 setosa
12 4.8 3.4 1.6 0.2 setosa
tail:
Sepal.Length Sepal.Width Petal.Length Petal.Width Species
145 6.7 3.3 5.7 2.5 virginica
146 6.7 3.0 5.2 2.3 virginica
147 6.3 2.5 5.0 1.9 virginica
148 6.5 3.0 5.2 2.0 virginica
149 6.2 3.4 5.4 2.3 virginica
150 5.9 3.0 5.1 1.8 virginica
Sepal.Length Sepal.Width Petal.Length Petal.Width Species
141 6.7 3.1 5.6 2.4 virginica
142 6.9 3.1 5.1 2.3 virginica
143 5.8 2.7 5.1 1.9 virginica
144 6.8 3.2 5.9 2.3 virginica
145 6.7 3.3 5.7 2.5 virginica
146 6.7 3.0 5.2 2.3 virginica
147 6.3 2.5 5.0 1.9 virginica
148 6.5 3.0 5.2 2.0 virginica
149 6.2 3.4 5.4 2.3 virginica
150 5.9 3.0 5.1 1.8 virginica
table:
setosa versicolor virginica
50 50 50
min:
[1] 4.3
min:
[1] 7.9
mean:
[1] 5.843333
range:
[1] 4.3 7.9
--------------------------------------------------
Disicision Making Statement
'
}
|
e9deb88e351954800d3277094aeb022f9821e785
|
284a248a59458fe52cedd46192626d228a6d0ff8
|
/Class 5 -Data Manipulation v2_short.R
|
2a348e011c07ba36fcb4d93098291d3a2a970c7d
|
[] |
no_license
|
shreesh12/MICA_AMMA
|
95ca29b8d84763a372896ee3c3c4ea7746086d18
|
499860854e53177e3e535889579fe4b8c17e2828
|
refs/heads/master
| 2021-01-23T09:26:27.344778
| 2017-09-07T22:42:01
| 2017-09-07T22:42:01
| 102,580,837
| 13
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,947
|
r
|
Class 5 -Data Manipulation v2_short.R
|
# ----------------------- Read data
# set up library
setwd("C:\\YYYYYY\\AMMA 2017\\Data\\data_2017\\data_2017")
# read data
prd_spend <-read.csv(file="prod_spend.csv",
head=T)
names(prd_spend)
str(prd_spend)
summary(prd_spend)
# ------------------------ Add column
# Scenario 1: Prod Group: Reward and Non-Reward
table(prd_spend$Prod_Code)
####------------------------------ Rename Column Name
# get existing column names
names(prd_spend)
colnames(prd_spend)
rownames(prd_spend)
# Change Name of a column using index
colnames(prd_spend)[5] <-"product.grouping"
colnames(prd_spend)
#### ------------------------------------ KEEP and DROP a column
# create a duplicate data frame
prd_spend1 <-prd_spend
names(prd_spend1)
# keep a list of columns
prd_spend1 <- prd_spend1[,c(1,2)]
prd_spend1 <- prd_spend1[,c("Balance","Spend_Value")]
names(prd_spend1)
# drop one column using index
prd_spend1 <-prd_spend
names(prd_spend1)
prd_spend1 <- prd_spend1[,-2] # we have to sure of the index
names(prd_spend1)
# drop column using variable name
prd_spend1 <- prd_spend1[,!names(prd_spend1) %in% c("Spend.Volume","Profit")]
names(prd_spend1)
# drop a list of columns
prd_spend1 <-prd_spend
names(prd_spend1)
prd_spend1 <- prd_spend1[,!names(prd_spend1) %in% c("sqr_spend","sqroot_spend", "exp_spend" )]
names(prd_spend1)
#### -------------------------- Change Type of a column
# Cut and Chanding Factor to Character
prd_spend$spend_level <-cut(prd_spend$Spend_Value,
breaks=c(min(prd_spend$Spend_Value),500,1000,2000,max(prd_spend$Spend_Value)),
labels =c("0-500","500-1000","1000-2000","2000-High"))
class(prd_spend$spend_level)
mode(prd_spend$spend_level)
table(prd_spend$spend_level)
prd_spend$prd_grp <- ifelse( prd_spend$Prod_Code =="FB" | prd_spend$Prod_Code == "QFF","Reward","Non Reward" )
table(prd_spend$prd_grp)
# using %in%
prd_spend$prd_grp <- ifelse( prd_spend$Prod_Code %in% c("FB","QFF"),"Reward","Non Reward" )
table(prd_spend$prd_grp)
## -------------------Row or Observation Changes ------------
# create index
hundreth <- seq(from=2,to=nrow(prd_spend),by=100)
head(hundreth)
# Scenario 1
prd_spend.even <-prd_spend[hundreth,]
nrow(prd_spend.even)
## ---
names(prd_spend)
summary(prd_spend$Balance)# scenario 2
names(prd_spend)
# Select customers which have over median spend
spend_over_median <- prd_spend[prd_spend$Balance>4940,]
# Two condition - & element wise comparison and && overall comparison
prd_select <-prd_spend[prd_spend$Balance <100 & prd_spend$Spend_Value>200,]
# Use which function
prd_select <-prd_spend[which(prd_spend$Balance <100 & prd_spend$Spend_Value>200),]
# Scenario 3: randomly select 1000 rows
sample_index <-sample(1:nrow(prd_spend ), 1000,
replace=FALSE)
prd_spend.sample <- prd_spend[sample_index,]
prd_random <- prd_spend [sample(1:nrow(prd_spend ), 1000,
replace=FALSE),]
summary(prd_spend)
summary(prd_random)
both.row.col <-prd_spend[1:10,c(2,5)]
## Select both observations and variables
prd_var_obs <- subset(prd_spend,
prd_spend$Balance <100 & prd_spend$Spend_Value>200,
select=c("Balance","Spend_Value"))
# Find - way to drop variables using subset function
### Sort or order
order <-order(prd_spend$Spend_Value)
prd_spend[875,]
prd_spend$Spend_Value[875]
max(prd_spend$Spend_Value)
min(prd_spend$Spend_Value)
prd_spend.ordered <- prd_spend[order(prd_spend$Spend_Value, decreasing =T),]
### Descening order
prd_spend.ordered <- prd_spend[order(prd_spend$Spend_Value,decreasing =T),]
prd_spend.ordered <- prd_spend[order(prd_spend$Spend_Value,decreasing =F),]
### Descening order with multiple columns
prd_spend.ordered <- prd_spend[order(prd_spend$Prod_Code,prd_spend$Spend_Value,decreasing =T),]
### Multiple Columns with different ordar
prd_spend.ordered1 <- prd_spend[order(-prd_spend$Spend_Value,prd_spend$Prod_Code),]
### arrange function from plyr
library(plyr)
names(prd_spend)
prd_spend.ordered2 <- arrange(prd_spend,
desc(Prod_Code),
Spend_Value)
### Aggregation
install.packages("sqldf")
library(sqldf)
names(prd_spend)
#Average spend by product
avg_prd <- sqldf('select Prod_Code,
avg(Spend_Value) as avg_spend_val,
sum(Spend_Value) as sm_spend_val
from prd_spend
group by Prod_Code')
#aggregate function
?aggregate
avg_prd1 <- aggregate(prd_spend$Spend_Value,
by=list(prd_spend$Prod_Code),
data = prd_spend,
mean)
# r average two columns
prd_spend$bal_grp <- cut(prd_spend$Balance, breaks=c(-15000,0,2000,5000,6000,8000,85000))
table(prd_spend$bal_grp)
avg_prd2 <- aggregate(prd_spend$Spend_Value,
by =list(prd_spend$Prod_Code,prd_spend$bal_grp),
data = prd_spend,
mean)
avg_prd2 <- aggregate(Spend_Value~Prod_Code+bal_grp,
data = prd_spend,
mean)
#Average spend by product
avg_prd3 <- sqldf('select Prod_Code,
bal_grp,
avg(Spend_Value) as avg_spend_val,
sum(Spend_Value) as sm_spend_val
from prd_spend
group by Prod_Code, bal_grp')
# Month and Year Sales
sales <-read.csv(file="weekly.sales.csv",
stringsAsFactors =F,
head=T)
names(sales)
class(sales$Date)
head(sales$Date)
sales$Date <- as.Date(sales$Date, format="%m/%d/%Y")
sales$month <-format(sales$Date,"%b")
sales$year <- format(sales$Date,"%Y")
#Sales by month and year
sales.month.year <- sqldf('select year,
month,
sum(Sales) as sales
from sales
group by year,
month')
table(sales.month.year$year)
# Reference
#http://dni-institute.in/blogs/r-quiz-data-frame-manipulations/
#http://dni-institute.in/blogs/data-frame-manipulations-in-r/
# ----------- Assignment 4---------------------------------
# Analysis of student performance in Math by downloading https://archive.ics.uci.edu/ml/datasets/student+performance datasets
# Q(1) Answer following questions -
# a) What is average Grades for Male and Female students
# b) Which combination of Guardian and Sudent Gender has highest Grades for G1, G2 and G3
# Q(2) Students Absences (variable:absences) can be broken into 4/5 groups each group has same % students
# Find average Grades for different level of absences
|
6ee629284b01f6380ea1fc383fdaa4251b77cc63
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/RPEnsemble/R/RPModel.R
|
fe1f4d0dba5f2ef42981056b5203134c9afe214b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,494
|
r
|
RPModel.R
|
RPModel <-
function(Model.No #Model number
, n # sample size
, p # dimension
, Pi = 1/2 # class 1 prior
)
{
if (Model.No == 1)
{
Y1 <- rmultinom(1,n,c(Pi,1-Pi))
Y <- c(rep(1,Y1[1,1]),rep(2,Y1[2,1]))
mu <- rep(1/8,p)
U <- runif(Y1[1,1]*p)
X1 <- matrix(log(2*U)*(U < 1/2) - log(2-2*U) * (U >= 1/2),Y1[1,1],p)
X2 <- mvrnorm(Y1[2,1],mu,diag(p))
X <- rbind(X1,X2)
}
if (Model.No == 2)
{
if (p <= 5) stop("model 2 requires p > 5")
Y1 <- rmultinom(1,n,c(Pi,1-Pi))
Y <- c(rep(1,Y1[1,1]),rep(2,Y1[2,1]))
mu <- c(rep(2,5),rep(0,p-5))
U1 <- rchisq(Y1[1,1],1)
U2 <- rchisq(Y1[2,1],2)
Sigma1 <- diag(p)
Sigma2 <- 0.5*diag(p)+0.5*c(rep(1,5),rep(0,p-5))%*%t(c(rep(1,5),rep(0,p-5))) + 0.5*diag(c(rep(0,5),rep(1,p-5)))
X1 <- mvrnorm(Y1[1,1],rep(0,p),Sigma1)/sqrt(U1/1)
X2 <- t(mu + t(mvrnorm(Y1[2,1],rep(0,p),Sigma2)/sqrt(U2/2)))
X <- rbind(X1,X2)
}
if (Model.No == 3)
{
if (p <= 5) stop("model 3 requires p > 5")
Y1 <- rmultinom(1,n,c(Pi,1-Pi))
Y <- c(rep(1,Y1[1,1]),rep(2,Y1[2,1]))
Y11 <- rmultinom(1,Y1[1,1],c(1/2,1/2))
mu <- c(rep(1,5), rep(0,p-5))
Sigma <- diag(p)
X1 <- rbind(t(matrix(mu/2,p,Y11[1,1])),t(matrix(mu/2,p,Y11[2,1]))) + mvrnorm(Y1[1,1],rep(0,p),Sigma)
X2 <- cbind(matrix(rcauchy(Y1[2,1]*5),Y1[2,1],5), matrix(rnorm(Y1[2,1]*(p-5),0,1),Y1[2,1],p-5))
X <- rbind(X1,X2)
}
if (Model.No == 4)
{
if (p != 50) stop("model 4 requires p = 50")
R <- NULL
load("R.RData")
Y1 <- rmultinom(1,n,c(Pi,1-Pi))
Y <- c(rep(1,Y1[1,1]),rep(2,Y1[2,1]))
mu <- c(rep(1,3), rep(0,p-3))
Sigma1 <- 0.5*diag(c(rep(1,3),rep(0,p-3)))+0.5*c(rep(1,3),rep(0,p-3))%*%t(c(rep(1,3),rep(0,p-3))) + 0.5*diag(c(rep(0,3),rep(1,p-3)))+0.5*c(rep(0,3),rep(1,p-3))%*%t(c(rep(0,3),rep(1,p-3)))
Sigma2 <- 1.5*diag(c(rep(1,3),rep(0,p-3)))+0.5*c(rep(1,3),rep(0,p-3))%*%t(c(rep(1,3),rep(0,p-3))) + 0.5*diag(c(rep(0,3),rep(1,p-3)))+0.5*c(rep(0,3),rep(1,p-3))%*%t(c(rep(0,3),rep(1,p-3)))
X1 <- mvrnorm(Y1[1,1],R%*%rep(0,p),R%*%Sigma1%*%t(R))
X2 <- mvrnorm(Y1[2,1],R%*%mu,R%*%Sigma2%*%t(R))
X <- rbind(X1,X2)
}
return(list(x=X,y=Y))
}
|
9a6b5977e11db296b066d2aa0bb7762108d7e934
|
0e0e94e315aa1d408aa37bb2396a927d15dfe669
|
/Practica2/Pregunta5.R
|
baac0b9f0781c08b2a103c7af6eace09150b6a1c
|
[] |
no_license
|
DanielHCh/CursoR-2018
|
ef9905c9bc9a6c8f9fc8fcc6e6f09dfded0b7a38
|
97acba0797f1f8598e745344cb303ad6fca6b5c0
|
refs/heads/master
| 2020-03-08T01:35:19.840135
| 2018-06-25T04:30:43
| 2018-06-25T04:30:43
| 127,834,765
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 804
|
r
|
Pregunta5.R
|
#Author: Daniel Alfredo Hidalgo Chávez
#código: 20132097F
#Item a
#Declaramos e inicializamos variables
minum <- 12
fact <- 1
#Caso inicial y particular
if(minum == 0 || minum == 1){
fact
}else{
while(minum >= 2){
fact <- fact * minum
minum <- minum - 1
}
fact
}
#Item b
unacadena <- "R fever"
index <- 1
ecount <- 0
resultado <- unacadena
#De cada cadena se extrae un caracter, si caracter es "e" o "E" sumamos 1 a ecount y si ecount es 2 extraemos la cadena hasta una posición anterior de la segunda "e" o "E"
while(ecount<2 && index<=nchar(unacadena)){
caracter <- substr(unacadena, index, index)
if("e" == caracter || "E" == caracter){
ecount <- ecount + 1
}
if(ecount == 2){
resultado <- substr(unacadena, 1, index - 1)
}
index <- index + 1
}
resultado
|
5b4b09fafba8a1291784c27814b7173ae6df9039
|
febc2989044387bfbdd2606b04682716df7e1340
|
/R/teste_nao_parametrico.R
|
70c5c0d0af67a974dea0e35463a690213465e628
|
[] |
no_license
|
Rayanne-matos/Trabalho-final-da-disciplina-de-R
|
30f61774f8d4f58a7a18f7d0a6b7711bdfbf90d9
|
ec511610dc62bfc2b7381ab69e2f30e1b60ff953
|
refs/heads/master
| 2021-04-18T17:10:23.156621
| 2020-03-23T22:39:05
| 2020-03-23T22:39:05
| 249,565,580
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,188
|
r
|
teste_nao_parametrico.R
|
#script para testar a normalidade e rodar anova dos dados do primeiro ano
#criando o objeto
dados <- read.table("data/dados_primeiro ano.txt",header = T)
#lendo os daddos
dados
# subset Fazenda Murici 1 ano
fz1 <- subset(dados,x=="FM")
# testando a normalidade de FM
shapiro.test(fz1$y)
# Teste não paramétrico, pois os dados não são normais
kruskal.test(data=fz1,y~Slopes)
# subset Campus Machado 1 ano
cm1 <- subset(dados,x=="CM")
# testando a normalidade de CM
shapiro.test(cm1$y)
# Teste não paramétrico, pois os dados não são normais
kruskal.test(data=cm1,y~Slopes)
#script para testar a normalidade e rodar anova dos dados do segundo ano
#criando o objeto
dados <- read.table("data/dados_segundo ano.txt",header = T)
#lendo os daddos
dados
# subset Fazenda Murici 2 ano
fz2 <- subset(dados,x=="FM")
# testando a normalidade de FM
shapiro.test(fz2$y)
# Teste não paramétrico, pois os dados não são normais
kruskal.test(data=fz2,y~Slopes)
# subset Campus Machado 2 ano
cm2 <- subset(dados,x=="CM")
# testando a normalidade de CM
shapiro.test(cm2$y)
# Teste não paramétrico, pois os dados não são normais
kruskal.test(data=cm2,y~Slopes)
|
41e0712af296ddca6c71162e6f0d4cac04109df0
|
40acfdd1e20e2bc5bb13f45b97af819ada4f41dd
|
/pipeline/pds_r_medication_sql_select.R
|
c267389c761fbce25359df25f5c555b8a5372a4c
|
[] |
no_license
|
PennTURBO/medication-knowledgegraph-pipeline
|
b96728be3c9671499b06cba3c49f075299c4940b
|
c5f562f1b02de4d866256f7aba978b01ddbdbeaa
|
refs/heads/master
| 2023-04-15T02:19:33.954966
| 2021-03-12T19:39:48
| 2021-03-12T19:39:48
| 181,075,722
| 3
| 1
| null | 2021-03-18T18:18:45
| 2019-04-12T20:02:09
|
R
|
UTF-8
|
R
| false
| false
| 2,893
|
r
|
pds_r_medication_sql_select.R
|
# set the working directory to medication-knowledgegraph-pipeline/pipeline
# for example,
# setwd("~/GitHub/medication-knowledgegraph-pipeline/pipeline")
# get global settings, functions, etc. from https://raw.githubusercontent.com/PennTURBO/turbo-globals
# some people (https://www.r-bloggers.com/reading-an-r-file-from-github/)
# say it’s necessary to load the devtools package before sourcing from GitHub?
# but the raw page is just a http-accessible page of text, right?
# requires a properly formatted "turbo_R_setup.yaml" in medication-knowledgegraph-pipeline/config
# or better yet, a symbolic link to a centrally loated "turbo_R_setup.yaml", which could be used by multiple pipelines
# see https://github.com/PennTURBO/turbo-globals/blob/master/turbo_R_setup.template.yaml
source(
"https://raw.githubusercontent.com/PennTURBO/turbo-globals/master/turbo_R_setup_action_versioning.R"
)
# Java memory is set in turbo_R_setup.R
print(getOption("java.parameters"))
####
# VPN and tunnel may be required
# set that up outside of this script
pdsDriver <-
JDBC(driverClass = "oracle.jdbc.OracleDriver",
classPath = config$oracle.jdbc.path)
pds.con.string <- paste0(
"jdbc:oracle:thin:@//",
config$pds.host,
":",
config$pds.port,
"/",
config$pds.database
)
pdsConnection <-
dbConnect(pdsDriver,
pds.con.string,
config$pds.user,
config$pds.pw)
my.query <- "
SELECT
rm.PK_MEDICATION_ID,
rm.FULL_NAME,
rm.GENERIC_NAME,
rm.RXNORM,
COUNT(DISTINCT pe.EMPI) AS empi_count
FROM
mdm.R_MEDICATION rm
LEFT JOIN MDM.ORDER_MED om ON
rm.PK_MEDICATION_ID = om.FK_MEDICATION_ID
LEFT JOIN MDM.PATIENT_ENCOUNTER pe ON
om.FK_PATIENT_ENCOUNTER_ID = pe.PK_PATIENT_ENCOUNTER_ID
GROUP BY
rm.PK_MEDICATION_ID,
rm.FULL_NAME,
rm.GENERIC_NAME,
rm.RXNORM"
# 30 minutes
print(Sys.time())
timed.system <- system.time(source.medications <-
dbGetQuery(pdsConnection, my.query))
print(Sys.time())
print(timed.system)
# Close connection
dbDisconnect(pdsConnection)
# should have applied this in the SQL query
# dput(colnames(source.medications))
# c("FK_MEDICATION_ID", "FULL_NAME", "GENERIC_NAME", "RXNORM", "EMPI_COUNT")
# MEDICATION_COUNT is a lousy name. That column actullay contains a count of unique people (by EMPI)
# who received an order for that reference medication (without filtering out canceled orders, etc.)
colnames(source.medications) <-
c("MEDICATION_ID",
"FULL_NAME",
"GENERIC_NAME",
"RXNORM",
"MEDICATION_COUNT")
# add option for saving as delimited text
# write.table(
# source.medications,
# config$source.medications.savepath,
# append = FALSE,
# quote = TRUE,
# sep = "|",
# row.names = FALSE,
# col.names = TRUE
# )
# save.image("pds_r_medication_sql_select.Rdata")
save(source.medications,
version.list,
file = config$source.medications.Rdata.path)
|
86f0fdb59627febbad2cfcacd2a429b120f86273
|
e5c7242d01831977a106b240c15f41330ecffe5f
|
/bin/setup_orthomcl_summary_gv35_id70.R
|
c42dd318b284952647312d4dd20b7da1dcc5994a
|
[] |
no_license
|
roxanahickey/gardnerella
|
541bc2e23379cafac5562b69216a7e1db98f3ec6
|
8a5230afc18d806cd41f32b256fda8d4a624457e
|
refs/heads/master
| 2021-01-11T01:02:37.769405
| 2017-07-25T06:14:05
| 2017-07-25T06:14:05
| 22,445,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,807
|
r
|
setup_orthomcl_summary_gv35_id70.R
|
## Set up to run orthomcl_summary_part1.R for Gardnerella-only OrthoMCL (70% clustering)
## Roxana Hickey <roxana.hickey@gmail.com>
## Last updated: 2015-05-21
## set up custom inputs and outputs
dir.out <- "results/orthomcl70-gv35"
dir.fig <- "results/figures/orthomcl70-gv35"
## read in ID key containing short and full strain names
genome.key <- read.table("data/reference/key_gv35_id.txt", sep="\t", header=T)
## read in orthoMCL groups
ortho.clust.in <- readLines("data/reference/orthomcl70_gv35_groups.txt")
ortho.sing.in <- readLines("data/reference/orthomcl70_gv35_single.txt")
clust.mx.in <- read.table("data/reference/orthomcl70_gv35_groups_binary.txt", header=T)
## key for fasta headers
key.fasta <- read.table("data/reference/key_gv35_fid_cds_fasta.txt", header=T, sep="\t")
## key for PATRIC gene features
key.feat <- read.table("data/reference/anno_gv35.features.tab", header=T, sep="\t", quote="")
# keep only CDS feature types
key.feat.cds <- subset(key.feat, feature_type=="CDS")
key.feat.cds <- merge(key.feat.cds, key.fasta[,c("cds_id", "locus_tag", "id_short")])
## key for EC, FigFam, GO and pathway annotations
key.anno <- list()
key.anno$ec <- read.table("data/reference/anno_gv35.ec", header=T, sep="\t", quote="")
key.anno$ec <- merge(key.fasta[,3:8], key.anno$ec)
key.anno$ec$anno_type <- rep("ec", nrow(key.anno$ec))
key.anno$figfam <- read.table("data/reference/anno_gv35.figfam", header=T, sep="\t", quote="")
key.anno$figfam <- merge(key.fasta[,3:8], key.anno$figfam)
key.anno$figfam$anno_type <- rep("figfam", nrow(key.anno$figfam))
key.anno$go <- read.table("data/reference/anno_gv35.go", header=T, sep="\t", quote="")
key.anno$go <- merge(key.fasta[,3:8], key.anno$go)
key.anno$go$anno_type <- rep("go", nrow(key.anno$go))
key.anno$path <- read.table("data/reference/anno_gv35.path", header=T, sep="\t", quote="")
key.anno$path <- merge(key.fasta[,3:8], key.anno$path)
key.anno$path$anno_type <- rep("path", nrow(key.anno$path))
## pick custom colors
library(wesanderson)
# col.cust <- wes_palette("Darjeeling", 5, type="discrete")[1:5]
# col.cust.hc <- col.cust[-3]
# col.cust.hc <- wes_palette("GrandBudapest", 4, type="discrete")
# col.cust.7 <- c("#083344", "#4CACD1", "#BEDDCA", "#CDBA1E", "#8A0743", "#4B2F4D", "#C17A22")
# col.cust <- c("#4CACD1", "#BEDDCA", "#CDBA1E", "#8A0743", "#4B2F4D")
# col.cust.hc <- col.cust[-2]
# col.cust.6 <- c("#1E1538", "#AC1412", "#FEC82D", "#C63804", "#D1767C", "#722444")
# col.cust.6 <- c("#1E1538", "#AC1412", "goldenrod3", "#C63804", "#D1767C", "#722444")
# col.cust <- col.cust.6[-2]
# col.cust.hc <- col.cust[-3]
col.cust <- c("royalblue3","darkturquoise","gold3","tomato2","mediumorchid3")
# col.cust <- c("#3A5FCD", "#00CED1", "#CDAD00", "#EE5C42", "#B452CD")
col.cust.hc <- col.cust[-3]
col.bw.seq <- "RdPu"
|
dc85671f36d6f3a459b9df966b9b37f338bfa63c
|
2d039ddb08cedd400cef3440b8a969e47a2c23fc
|
/P3/datos_temperatura_horarios.R
|
962ffe21ca363d5af4f60e36b8cabf9938abcdf9
|
[] |
no_license
|
LucianoAndrian/Estadistica2_2019
|
ff052aaf8559f5693ee265b5bdf05d8f416a445b
|
068283d56dacd0971cb5212d915b9acf11eed72c
|
refs/heads/master
| 2023-04-20T02:58:06.412694
| 2021-05-06T02:46:18
| 2021-05-06T02:46:18
| 195,433,870
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 678
|
r
|
datos_temperatura_horarios.R
|
#print('setear directorio')
#setwd("C:/Users/Melu/Dropbox/Métodos estadísticos/Practica7")
#cat("\014") #Limpiar consola si lo quiero hacer sin
#función uso CTL+L
#print('setear directorio')
#setwd("C:/Users/Melu/Dropbox/Métodos estadísticos/Practica7")
datostemperaturahorarios<-read.table('datoshorarios.txt')
tiempohora<-seq(from=as.POSIXct("2018-01-01 00:00:00", tz="UTC"),
to=as.POSIXct("2018-01-07 23:59:59", tz="UTC"), by="hour")
plot(datostemperaturahorarios$tiempo,datostemperaturahorarios$temperatura,type='l')
###pienso podriamos filtrar variabilidad menor a 24 hs, nos interesaria observar ondas de mayor frecuencia
|
d001088c44d885e997ace92d65cab485f1efc725
|
9182c0f56e863455af66cf094143ca1afcb1cf57
|
/code for plot 4.R
|
9e4d913dca668c36cfa0d282025b687e50cd46fb
|
[] |
no_license
|
pemberton7/ExData_Plotting1
|
f4a7af9e32f969ef417133c531be40936fb8c9ad
|
ad5ae7dae2f687322543bce0bd6254f754c7f1f4
|
refs/heads/master
| 2020-04-25T15:58:05.346716
| 2019-02-27T11:04:53
| 2019-02-27T11:04:53
| 172,894,375
| 0
| 0
| null | 2019-02-27T10:29:20
| 2019-02-27T10:29:19
| null |
UTF-8
|
R
| false
| false
| 1,598
|
r
|
code for plot 4.R
|
## Set working directory to where the data is stored
setwd("~/coursera")
##Unzip folder
dataset <- unzip("exdata_data_household_power_consumption.zip")
##Read in the file
dataset <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrows=2075259,
check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
##Subset the dates wanted
dates <- subset(dataset, Date %in% c("1/2/2007", "2/2/2007"))
##Change time and date column class
dates2 <- as.Date(dates$Date, format="%d/%m/%Y")
date_time <- paste(as.Date(dates2), dates$Time)
dates$datetime <- as.POSIXct(date_time)
##Set parameter limits
par(mfrow = c(2,2))
##Plot graph 1
par("mar" = c(4, 4, 2, 2))
with(dates, {plot(Global_active_power~datetime, type = "l", ylab = "Global Active Power")})
##Plot graph 2
with(dates, {plot(Voltage~datetime, type = "l", ylab = "Voltage", xlab = "datetime")})
##Plot graph 3, specify size of the legend that is doesn't cover the graph
with(dates, {
plot(Sub_metering_1~datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~datetime,col='Red')
lines(Sub_metering_3~datetime,col='Blue')
})
legend("topright",legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3") , col = c("black", "red", "blue"), lty = 1,
lwd = 1, bty = "n", cex = 0.9, pt.cex = cex)
##Plot graph 4
with(dates, {plot(Global_reactive_power~datetime, type = "l", xlab = "datetime", ylab = "Global_reactive_power")})
##Make into a png file
dev.copy(png, file = "plot4.png", width=480, height=480)
dev.off()
|
ab69211cb01ead210e92b549e00c7cffaf8edd90
|
192dd0acad8c23498f5463c0ecec5fec08ab644b
|
/Course 9/Theory/asdf/server.R
|
ca4e74d73c4431e4bf628f0233c0aac4a1048028
|
[] |
no_license
|
tafuenza/R-Course
|
adf8606a0fcfa4206813076cd9597d0c7d5af7ea
|
6bc7a0f41f206d13fa6a5aee49bcaf184900d128
|
refs/heads/master
| 2020-09-21T09:37:34.025642
| 2020-06-05T20:38:16
| 2020-06-05T20:38:16
| 224,723,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 890
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
observe({
x <- input$inCheckboxGroup
# Can use character(0) to remove all choices
if (is.null(x))
x <- character(0)
# Can also set the label and select items
updateSelectInput(session, "inSelect",
label = paste("Select input label", length(x)),
choices = x,
selected = tail(x, 1)
)
})
})
|
770c8978a001b69f46671392a4652e038c967e31
|
7ef9e8be9e82440db096f0a0a35b9834327b707f
|
/man/create_tidy_table_one.Rd
|
d0c881e5ab8578498a687a21334380cfd06dbadf
|
[
"MIT"
] |
permissive
|
emilelatour/tidytableone
|
8b455a0db3688d246edbb303d4d7447a8f86eaf0
|
adc4b925b708bf02adac47a2e3fc80c7a6c5511c
|
refs/heads/master
| 2022-06-02T12:50:42.601604
| 2022-05-16T21:57:10
| 2022-05-16T21:57:10
| 208,478,965
| 3
| 2
|
NOASSERTION
| 2019-09-28T23:17:22
| 2019-09-14T17:41:35
|
R
|
UTF-8
|
R
| false
| true
| 3,231
|
rd
|
create_tidy_table_one.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create-tidy-table-one.R
\name{create_tidy_table_one}
\alias{create_tidy_table_one}
\title{Tidy table one}
\usage{
create_tidy_table_one(data, strata = NULL, .vars, na_level = "(Missing)", ...)
}
\arguments{
\item{data}{A data frame or tibble containing the varibales to be summarized.}
\item{strata}{Character vector of the stratifying (grouping) variable.
\strong{Currently required}}
\item{.vars}{Character vector of the variable names to be summarized. If
empty, then all variables in the given data frame are used.}
\item{na_level}{Character string of the text to replace \code{NA} in the strata
variable, if any exist.}
\item{...}{Additonal arguments. Not used.}
}
\value{
A tibble with the following results
\describe{
\item{strata}{Level of the stratifying variable}
\item{var}{Variable/column name}
\item{n}{Number of records}
\item{n_distinct}{Numer of distinct values}
\item{complete}{Number of non-missing observations}
\item{missing}{Number of missing observations}
\item{mean}{Mean}
\item{sd}{Standard deviation}
\item{p0}{Minimum}
\item{p25}{25th percentile}
\item{p50}{Median}
\item{p75}{75th percentile}
\item{p100}{Maximum}
\item{cv}{Coefficient of variation}
\item{shapiro_test}{Shapiro-Wilkes test: p-value}
\item{ks_test}{Kolmogorov-Smirnov test: p-value}
\item{ad_test}{Anderson-Darling test for normality: p-value}
\item{level}{Level of the variable}
\item{n_level}{Total number in the variable's group}
\item{n_strata}{Total number in the variable group and strata}
\item{chisq_test}{Chi square test: p-value}
\item{fisher_test}{Fisher's exact test: p-value}
\item{check_categorical_test}{Is Chi square OK? Consider Fisher}
\item{oneway_test}{Oneway anova test: p-value, equivalent to t-test when only 2 groups}
\item{kruskal_test}{Kruskal-Wallis Rank Sum Test: p-value, equivalent to Mann-Whitney U test when only 2 groups}
\item{bartlett_test}{Bartlett's test for homogeneity of variances: p-value}
\item{levene_test}{Levene's test for homogeneity of variances: p-value}
\item{smd}{Standarized mean difference}
}
}
\description{
Creates a tidy data frame of the results that can go into a "Table 1" of
summary descriptive statistics of a study sample. Inpiration for this is owed
to the \code{tableone} package by Kazuki Yoshida.
}
\examples{
library(dplyr)
tab1 <- create_tidy_table_one(data = pbc_mayo,
strata = "trt",
.vars = c("time", "status", "trt", "age", "sex", "ascites", "hepato",
"spiders", "edema", "bili", "chol", "albumin", "copper", "alk_phos",
"ast", "trig", "platelet", "protime", "stage"))
dplyr::glimpse(tab1)
library(ggplot2) # diamonds data set
tab2 <- create_tidy_table_one(data = diamonds,
strata = "cut",
.vars = c("carat",
# "cut", # Don't have to include the strata variable
"color",
"clarity",
"depth",
"table",
"price"))
dplyr::glimpse(tab2)
}
|
75e94a65218dfe60a11ab30090aed7c7c86f8c68
|
3fe1a27ecb52798609e99b5c7a2af1b95166c03d
|
/man/darkpix.Rd
|
3c1886a269d2b7f4045f197cf140af32998efc72
|
[] |
no_license
|
mbedward/ecbtools
|
6cbbfbe3c18734b75c42f0bd292aeab69f7bee69
|
16c5df2857f83fadfaac35447e21388daa69bfdb
|
refs/heads/master
| 2023-02-16T19:29:39.773238
| 2021-01-17T20:26:25
| 2021-01-17T20:26:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,594
|
rd
|
darkpix.Rd
|
\name{darkpix}
\alias{darkpix}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Calculates percentage of dark pixels in an image.
}
\description{
This function takes a colour image file, typically a JPEG, and counts the percentage of pixels below a certain darkness threshold. By default, only a central circle representing the hemispherical horizon is counted. The threshold at which darkness is detected can be altered.
}
\usage{
darkpix(file, cutoff = 100, clip.circle = T)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{file}{
Character string for the filename of the image file to be processed.
}
\item{cutoff}{
The cutoff value, from 0-255. Pixels with brighness values below this threshold are considered dark.
}
\item{clip.circle}{
True/fale - if true, central circle of image is used. If false, whole image is used.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
A value representing the percentage of dark pixels in the image.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Grant Williamson <grant.williamson@utas.edu.au>
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{darkpix.dir}}
}
\examples{
rlogo=system.file("pictures/Rlogo.jpg", package="rgdal")[1]
darkpix(rlogo,cutoff=100,clip.circle=T)
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
}
\keyword{ hemispherical }
\keyword{ photo }
\keyword{ image }
\keyword{ cover }
|
1e2b68499ac387ecbd688c8910e659c823b75b03
|
1dbe1bc1097ee5c9ea3d196b50b5708383b3fb5a
|
/R/libGraphics.R
|
2c019efd41e50823f954920bbe4f02ba9ec166b1
|
[] |
no_license
|
cran/ClickClust
|
a3ed1adaafc1f296fc1d2ec123e58d963a7ab4e6
|
5e68e8aca28f96bca2d2c26141ce84f8dd4d8a97
|
refs/heads/master
| 2021-01-19T00:14:56.577695
| 2016-10-23T00:20:21
| 2016-10-23T00:20:21
| 19,249,803
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,759
|
r
|
libGraphics.R
|
calc.props <- function(y){
sums <- apply(y, 1, sum)
sums <- ifelse(sums == 0, 1, sums)
p <- sweep(y, 1, FUN = "/", sums)
return(p)
}
calc.color <- function(colors, levels, x){
z <- ifelse(x == 0, 1, which(x <= levels)[1]-1)
return(colors[z])
}
click.plot <- function(X, y = NULL, file = NULL, id, states = NULL, marg = 1, font.cex = 2, font.col = "black", cell.cex = 1, cell.lwd = 1.3, cell.col = "black", sep.lwd = 1.3, sep.col = "black", obs.lwd = NULL, colors = c("lightcyan", "pink", "darkred"), col.levels = 8, legend = TRUE, leg.cex = 1.3, top.srt = 0, frame = TRUE){
K <- max(id)
p <- dim(X)[1]
n <- dim(X)[3]
P1 <- X
for (i in 1:n) P1[,,i] <- calc.props(X[,,i])
Nk <- NULL
last <- 0
if (!is.null(y)) y.new <- NULL
if (is.null(obs.lwd)){ # calculate median colors
P.med <- array(rep(NA, p^2*K), c(p, p, K))
for (k in 1:K){
ind <- which(id == k)
nk <- length(ind)
Nk <- c(Nk, nk)
P.med[,,k] <- apply(P1[,,ind], c(1,2), median)
}
} else { # sort data according to group assignments
P <- P1
for (k in 1:K){
ind <- which(id == k)
nk <- length(ind)
Nk <- c(Nk, nk)
P[,,(last+1):(last+nk)] <- P1[,,ind]
last <- last + nk
if (!is.null(y)){
y.new <- c(y.new, y[ind])
}
}
}
Nk.cum <- cumsum(Nk)
colors <- colorRampPalette(colors)(col.levels)
levels = seq(0.0, 1.0, length.out = col.levels+1)
grid <- seq(0, 1, length.out = p + 1)
grid.step <- grid[2] - grid[1]
par(mar = rep(0.1, 4))
if (legend){
if (is.null(y)){
plot( c(-grid.step/2 * marg, 1), c(-grid.step, 1 + grid.step/2 * marg), type = "n", xlab = "", ylab = "", axes = FALSE)
} else {
plot( c(-grid.step/2 * marg, 1 + grid.step), c(-grid.step, 1 + grid.step/2 * marg), type = "n", xlab = "", ylab = "", axes = FALSE)
}
} else {
if (is.null(y)){
plot( c(-grid.step/2 * marg, 1), c(0, 1 + grid.step/2 * marg), type = "n", xlab = "", ylab = "", axes = FALSE)
} else {
plot( c(-grid.step/2 * marg, 1 + grid.step), c(0, 1 + grid.step/2 * marg), type = "n", xlab = "", ylab = "", axes = FALSE)
}
}
if (frame) box()
# state numbers
y1 <- 1 + grid.step / 3 * marg
for (j in 1:p){
x1 <- (grid[j] + grid[j+1]) / 2
if (is.null(states)){
text(x1, y1, j, cex = font.cex, col = font.col, srt = top.srt)
} else {
text(x1, y1, states[j], cex = font.cex, col = font.col, srt = top.srt)
}
}
x1 <- -grid.step / 3 * marg
for (j in 1:p){
y1 <- (grid[j] + grid[j+1]) / 2
if (is.null(states)){
text(x1, y1, p+1-j, cex = font.cex, col = font.col)
} else {
text(x1, y1, states[p+1-j], cex = font.cex, col = font.col)
}
}
# margin between cells
eps <- grid.step / 20 / cell.cex
if (is.null(obs.lwd)){ # median color polygons
Nk.cum <- c(0, Nk.cum)
step <- (grid.step - 2 * eps) / n
for (j in 1:p){
x1 <- grid[j]
y1 <- grid[j]
for (i in 1:p){
for (k in 1:K){
polygon(c(grid[j]+eps, grid[j+1]-eps, grid[j+1]-eps, grid[j]+eps),
c(grid[p-i+1]+eps+(Nk.cum[k]+0.5)*step,
grid[p-i+1]+eps+(Nk.cum[k]+0.5)*step,
grid[p-i+1]+eps+(Nk.cum[k+1]+0.5)*step,
grid[p-i+1]+eps+(Nk.cum[k+1]+0.5)*step),
col = calc.color(colors, levels, P.med[i,j,k]),
border = sep.col, lwd = sep.lwd)
}
}
}
# cell frames
for (j in 1:p){
x1 <- grid[j]
y1 <- grid[j]
for (i in 1:p){
polygon(c(grid[j]+eps, grid[j+1]-eps, grid[j+1]-eps, grid[j]+eps),
c(grid[p-i+1]+eps, grid[p-i+1]+eps, grid[p-i+2]-eps, grid[p-i+2]-eps),
border = cell.col, lwd = cell.lwd)
}
}
if (!is.null(y)){ # additional column of cells to represent betas
for (j in 1:p){
for (k in 1:K){
polygon(c(max(grid) + 5*eps*1.2, max(grid) + grid.step / 2 + 5*eps*1.2,
max(grid) + grid.step / 2 + 5*eps*1.2, max(grid) + 5*eps*1.2),
c(grid[p-j+1]+eps+(Nk.cum[k]+0.5)*step,
grid[p-j+1]+eps+(Nk.cum[k]+0.5)*step,
grid[p-j+1]+eps+(Nk.cum[k+1]+0.5)*step,
grid[p-j+1]+eps+(Nk.cum[k+1]+0.5)*step),
col = calc.color(colors, levels, mean(y[id == k] == j)),
border = sep.col, lwd = sep.lwd)
}
}
# cell frames
for (j in 1:p){
polygon(c(max(grid) + grid.step / 2 + 5*eps*1.2, max(grid) + 5*eps*1.2,
max(grid) + 5*eps*1.2, max(grid) + grid.step / 2 + 5*eps*1.2),
c(grid[p-j+1]+eps, grid[p-j+1]+eps, grid[p-j+2]-eps, grid[p-j+2]-eps),
border = cell.col, lwd = cell.lwd)
}
}
} else { # observation lines
step <- (grid.step - 2 * eps) / n
for (j in 1:p){
for (i in 1:p){
curr <- P[i,j,]
for (h in 1:n){
lines(c(grid[j]+eps*1.2, grid[j+1]-eps*1.2),
c(grid[p-i+1]+eps+h*step, grid[p-i+1]+eps+h*step),
col = calc.color(colors, levels, curr[h]), lwd = obs.lwd)
}
if (K != 1){
for (k in 1:(K-1)){
lines(c(grid[j]+eps*1.2, grid[j+1]-eps*1.2),
c(grid[p-i+1]+eps+(Nk.cum[k]+0.5)*step,
grid[p-i+1]+eps+(Nk.cum[k]+0.5)*step),
col = sep.col, lwd = sep.lwd)
}
}
}
}
# cell frames
for (j in 1:p){
x1 <- grid[j]
y1 <- grid[j]
for (i in 1:p){
polygon(c(grid[j]+eps, grid[j+1]-eps, grid[j+1]-eps, grid[j]+eps),
c(grid[p-i+1]+eps, grid[p-i+1]+eps, grid[p-i+2]-eps, grid[p-i+2]-eps),
border = cell.col, lwd = cell.lwd)
}
}
if (!is.null(y)){ # additional column of cells to represent betas
for (j in 1:p){
for (i in 1:n){
lines(c(max(grid) + 5*eps*1.2, max(grid) + grid.step / 2 + 5*eps*1.2),
c(grid[p-j+1]+eps+i*step, grid[p-j+1]+eps+i*step),
col = calc.color(colors, levels, y.new[i] == j), lwd = obs.lwd)
}
if (K != 1){
for (k in 1:(K-1)){
lines(c(max(grid) + 5*eps*1.2, max(grid) + grid.step / 2 + 5*eps*1.2),
c(grid[p-j+1]+eps+(Nk.cum[k]+0.5)*step,
grid[p-j+1]+eps+(Nk.cum[k]+0.5)*step),
col = sep.col, lwd = sep.lwd)
}
}
}
# cell frames
for (j in 1:p){
polygon(c(max(grid) + grid.step / 2 + 5*eps*1.2, max(grid) + 5*eps*1.2,
max(grid) + 5*eps*1.2, max(grid) + grid.step / 2 + 5*eps*1.2),
c(grid[p-j+1]+eps, grid[p-j+1]+eps, grid[p-j+2]-eps, grid[p-j+2]-eps),
border = cell.col, lwd = cell.lwd)
}
}
}
# construct legend
if (legend){
sep.X <- seq(0+eps, 1-eps, length.out = col.levels + 1)
for (j in 1:col.levels){
polygon(c(sep.X[j], sep.X[j+1], sep.X[j+1], sep.X[j]),
c(-grid.step*0.5, -grid.step*0.5, -grid.step*0.25, -grid.step*0.25),
border = cell.col, lwd = cell.lwd, col = colors[j])
text(sep.X[j], -grid.step*0.75, round(levels[j], digits = 2), cex = leg.cex)
}
text(1-eps, -grid.step*0.75, 1, cex = leg.cex, col = font.col)
}
if (!is.null(file)) dev.copy2pdf(file = file)
}
|
c4a307c9e102cd5e3ff933e2195236c8149df096
|
dc5ffded3fc6bfa452e7963ebfbe3c21ace8427c
|
/Skrypty/pkg_Application/api_Application.R
|
311c73951e9c39d22b1bb69b60477e206a990458
|
[] |
no_license
|
frdanconia/keras_and_shiny
|
85ba687e26fcedf1864fc71eec82467409a223e5
|
fe71d32cf7df3d28d84c1b5f371e49fbe7bc9508
|
refs/heads/master
| 2022-03-29T11:48:51.609724
| 2020-01-27T23:00:21
| 2020-01-27T23:00:21
| 236,614,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,651
|
r
|
api_Application.R
|
#' @export
runApplication <- function(ui, server, port) {
# INPUT: layout of the page, server logic function, port on localhost to run the app
# OPERATIONS: run the app on selected port
# OUTPUT: none
app <- shinyApp(ui = ui, server = server)
runApp(app, port = port)
}
#' @export
createTensor <- function(img_mtrx) {
# INPUT: a single image pixel intensity "matrix" (in fact a tensor 28 x 28 x 1 that is the result of loadAndPrepareImage)
# OPERATIONS: convert to a tensor 1 x 28 x 28 x 1 (to be compatible with model)
# OUTPUT: a single image pixel intensity tensor (1 x 28 x 28 x 1)
img_tensor <- array(dim = c(1, 28, 28, 1))
img_tensor[1, , , ] <- img_mtrx
return(img_tensor)
}
#' @export
predictClass <- function(model, data_tensor) {
# INPUT: Keras model, a single pixel intenstity tensor 1 x 28 x 28 x 1
# OPERATIONS: use the model to predict class on a new example
# OUTPUT: a class label (digit 0-9 as character)
class <- predict_classes(model, data_tensor)
return(as.character(class))
}
#' @export
predictProbabilities <- function(model, data_tensor) {
# INPUT: Keras model, a single pixel intenstity tensor 1 x 28 x 28 x 1
# OPERATIONS: calculates class probabilities (the output of the softmax layer) and convert them into a data frame
# OUTPUT: a data frame with class probabilities
prob <- predict_proba(model, data_tensor)
prob_df <- data.frame(class = as.character(0:9), probability = round(as.vector(prob), 5))
return(prob_df)
}
# Re-exports:
#' @export
Modeling::loadModel
#' @export
DataPreparation::loadAndPrepareImage
#' @export
DataPreparation::normalizePixelIntensities
|
bc4f0d614df485311a175fb172f8c78359caf54f
|
8a259d840c6622076f86df16c492d6d138b3e58d
|
/logistic_regression/GermanCredit.R
|
36a9d2e230839574500464e102d8e61b44010266
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
james-m-foster/high-order-langevin
|
98d4191ff024a191864c048c3d38f0f410fc988f
|
e97d6fe55d5d0b783b45de6bc46d5535c6aaf9f3
|
refs/heads/master
| 2022-11-04T08:12:34.357554
| 2022-11-01T10:24:48
| 2022-11-01T10:24:48
| 208,257,498
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,374
|
r
|
GermanCredit.R
|
library(unbiasedmcmc)
set.seed(1)
# Load dataset
data(germancredit)
X <- scale(X)
X[,1] <- rep(1, nrow(X))
n <- nrow(X)
p <- ncol(X)
design_matrix <- unname(X)
tdesign_matrix <- t(design_matrix)
response <- Y
new_response <- 2*response - 1 # map to {-1,1}
nsamples <- nrow(design_matrix)
dimension <- ncol(design_matrix)
# Prior covariance for regression coefficients
sigma2 <- 10
sigma_prior <- diag(sigma2, dimension, dimension)
identity_matrix <- diag(1, dimension, dimension)
zero_vec <- matrix(0, nrow = dimension, ncol = 1)
logistic_setting <- logisticregression_precomputation(Y, X, zero_vec, sigma_prior)
# Log sigmoid function
stable_log_sigmoid <- function(x){
output <- vector(mode = "logical", length = length(x))
mask <- x > 0
nmask <- !mask
output[mask] <- -log(1+exp(-x[mask]))
output[nmask] <- x[nmask] - log1p(exp(x[nmask]))
return(output)
}
# Sigmoid function
sigmoid <- function(x) {
1 / (1 + exp(-x))
}
# Log density of the posterior (up to a constant)
logtarget <- function(beta){
xbeta <- design_matrix %*% beta
loglikelihood <- sum(stable_log_sigmoid(new_response*xbeta))
return(loglikelihood - sum(beta ** 2)/(2*sigma2))
}
# Gradient of log density of the posterior
gradlogtarget <- function(beta){
xbeta <- design_matrix %*% beta
tdesign_matrix %*% (sigmoid(-new_response * xbeta) * new_response) - beta / sigma2
}
|
deaf9e4ff0dd616ad6f2e2bc628d6b018bc5100f
|
363d5945d553cec4c04a5cc0409cda15f4863fd9
|
/sicegarPerformanceSimulations/sicegarExtPerformanceTest_part1P.R
|
4a476fed6d687b42c26a4293de1d8c0ed6ecd03f
|
[] |
no_license
|
wilkelab/sicegar
|
ac4d8db6a04bd38c4f1e12c34008ee9fcd2c6c06
|
08bf9c45d2b32554168fe21e673015122e3c37e9
|
refs/heads/master
| 2023-02-09T21:06:44.677219
| 2021-05-08T00:57:47
| 2021-05-08T00:57:47
| 48,201,374
| 8
| 4
| null | 2023-01-31T01:42:12
| 2015-12-17T22:23:14
|
R
|
UTF-8
|
R
| false
| false
| 13,930
|
r
|
sicegarExtPerformanceTest_part1P.R
|
# The Sicegar Noise Analyze
# Scanned Parameter different cond.
# Initial distributions that needs to be predicted: sigmoidal / double sigmoidal 2
# noise types: additive / multiplicative 2
# temporal distributions: uniform + normal + 3 beta distibutions 5
# noise levels: "seq(from= 0, to = 1.5, length.out = 11)" 11
# distinct parameters sets that will be used to generate initial distributions 50
# distinct runs for given parameter set and with given temporal distributions 1
# Total 11000
###*****************************
# INITIAL COMMANDS TO RESET THE SYSTEM
rm(list = ls())
if (is.integer(dev.list())){dev.off()}
cat("\014")
seedNo = 14159
set.seed(seedNo)
###*****************************
###*****************************
# Required packages
# Sicegar and Data Related
require("sicegar")
require("tidyverse")
require("cowplot")
require("dplyr")
# Parallel
require("doMC")
require("foreach")
###*****************************
#********************************************
# ARRANGE BACKENDS
## use the multicore library
# a.
ProcCount <- 7 # registers specified number of workers or
registerDoMC(ProcCount) # Or, reserve all all available cores
# b.
#registerDoMC() # Automatically assign cores
getDoParWorkers() # check how many cores (workers) are registered
#********************************************
###*****************************
# Parameters
initial_model = c("SM", "DSM")
#initial_model = c("DSM")
noise_type_vector = c("additive", "multiplicative")
time_sampling_vector = c("equidistant", "uniform", "beta_0.5_1.5", "beta_2_2", "beta_1.5_0.5")
noise_parameter_vector <- seq(from= 0, to = 1.5, length.out = 11) # used value in supplementary figure is "seq(from = 0, to = 1.5, length.out = 11)"
distinct_model_parameters <- 50 # Used value in supplementary figure is "50"
distinct_runs <- 1 # used value in supplementary figure is "1"
n_samples = 55
t_min = 3
t_max = 30
# Thresholds for data generation
threshold_intensity_range = 0.1
threshold_minimum_for_intensity_maximum = 0.3
threshold_startPoint = 0
threshold_t0_max_int = 0.05
threshold_sm_tmax_IntensityRatio = 0.85
threshold_dsm_tmax_IntensityRatio = 0.75
###*****************************
###*****************************
# Generate the data frame that will include all parameters
df <- expand.grid(true_model = initial_model,
noise_type = noise_type_vector,
time_sampling = time_sampling_vector,
noise_parameter = noise_parameter_vector,
distinct_model_parameters = seq(from = 1, to = distinct_model_parameters),
distinct_runs = distinct_runs)
df %>%
dplyr::arrange(true_model,
noise_type,
time_sampling,
noise_parameter,
distinct_model_parameters,
distinct_runs) %>%
dplyr::group_by() %>%
dplyr::mutate(run_no = seq(1:n())) -> df
df$par_work <- rep(x = seq(1:ProcCount), length.out = nrow(df))
###*****************************
###*****************************
# Choose initial model parameters
# Sigmoidal check Functions
sigmoidalCheck <- function(maximum_SM, slope_param_SM, midpoint_SM)
{
# generate data based on the parameters
time_temp = seq(from = t_min, to = t_max, length.out = 100) # generate time points
# generate intensity
original_intensity_temp <- sicegar::sigmoidalFitFormula(x = time_temp,
maximum = maximum_SM,
slopeParam = slope_param_SM,
midPoint = midpoint_SM)
# generate data frame based on time and intensity and do the sigmoidal fit
# to find the start point
df_temp <- data.frame(time = time_temp, intensity = original_intensity_temp)
normalizedInput = sicegar::normalizeData(dataInput = df_temp)
sigmoidalModel <- sicegar::multipleFitFunction(dataInput = normalizedInput, model = "sigmoidal")
sigmoidalModel <- sicegar::parameterCalculation(parameterVector = sigmoidalModel)
AIC <- sigmoidalModel$AIC_value
cat(paste0("\n","AIC: ", AIC,"\n"))
#sm_fit_obj <- sicegar::fitAndCategorize(dataInput = df_temp)
startPoint_x <- sigmoidalModel$startPoint_x
# find t0 intensity
t0_intensity <- sicegar::sigmoidalFitFormula(x = 0,
maximum = maximum_SM,
slopeParam = slope_param_SM,
midPoint = midpoint_SM)
# find last observed point intensity
tmax_intensity <- sicegar::sigmoidalFitFormula(x = t_max,
maximum = maximum_SM,
slopeParam = slope_param_SM,
midPoint = midpoint_SM)
max_original_intensity <- max(original_intensity_temp)
min_original_intensity <- min(original_intensity_temp)
# check 1: intensity range
check_1 <- max_original_intensity - min_original_intensity > threshold_intensity_range
# check 2: intensiy maximum
check_2 <- max_original_intensity > threshold_minimum_for_intensity_maximum
# check 3: start intensity check
check_3 <- t0_intensity < threshold_t0_max_int
# check 4: startPoint_x
check_4 <- startPoint_x > threshold_startPoint
# check 5. reach larger than %85 of maximum at t = t_max
check_5 <- tmax_intensity / maximum_SM > threshold_sm_tmax_IntensityRatio
if(all(c(check_1, check_2, check_3, check_4, check_5)==1)==1){flag_sm = 1}
if(!all(c(check_1, check_2, check_3, check_4, check_5)==1)==1){flag_sm = 0}
return(flag_sm)
}
# Double-sigmoidal check function
doublesigmoidalCheck <- function(final_asymptoteIntensity_ratio_DSM, maximum_DSM,
slope1_param_DSM, midpoint1_param_DSM,
slope2_param_DSM, midPoint_distance_param_DSM)
{
# generate data based on the parameters
time_temp = seq(from = t_min, to = t_max, length.out = 100) # generate time points
# generate intensity
original_intensity_temp <- sicegar::doublesigmoidalFitFormula(x=time_temp,
finalAsymptoteIntensityRatio = final_asymptoteIntensity_ratio_DSM,
maximum = maximum_DSM,
slope1Param = slope1_param_DSM,
midPoint1Param = midpoint1_param_DSM,
slope2Param = slope2_param_DSM,
midPointDistanceParam = midPoint_distance_param_DSM)
# generate data frame based on time and intensity and do the double sigmoidal fit
# to find the start point
df_temp <- data.frame(time = time_temp, intensity = original_intensity_temp)
normalizedInput = sicegar::normalizeData(dataInput = df_temp)
doubleSigmoidalModel <- sicegar::multipleFitFunction(dataInput = normalizedInput, model = "doublesigmoidal")
doubleSigmoidalModel <- sicegar::parameterCalculation(parameterVector = doubleSigmoidalModel)
AIC <- doubleSigmoidalModel$AIC_value
cat(paste0("\n","AIC: ", AIC,"\n"))
startPoint_x <- doubleSigmoidalModel$startPoint_x
# find t0 intensity
t0_intensity <- sicegar::doublesigmoidalFitFormula(x=0,
finalAsymptoteIntensityRatio = final_asymptoteIntensity_ratio_DSM,
maximum = maximum_DSM,
slope1Param = slope1_param_DSM,
midPoint1Param = midpoint1_param_DSM,
slope2Param = slope2_param_DSM,
midPointDistanceParam = midPoint_distance_param_DSM)
# find last observed point intensity
tmax_intensity <- sicegar::doublesigmoidalFitFormula(x=t_max,
finalAsymptoteIntensityRatio = final_asymptoteIntensity_ratio_DSM,
maximum = maximum_DSM,
slope1Param = slope1_param_DSM,
midPoint1Param = midpoint1_param_DSM,
slope2Param = slope2_param_DSM,
midPointDistanceParam = midPoint_distance_param_DSM)
max_original_intensity <- max(original_intensity_temp)
min_original_intensity <- min(original_intensity_temp)
# find x value for the maximum intensity
maximum_x <- doubleSigmoidalModel$maximum_x
# check 1: intensity range
check_1 <- max_original_intensity - min_original_intensity > threshold_intensity_range
# check 2: intensiy maximum
check_2 <- max_original_intensity > threshold_minimum_for_intensity_maximum
# check 3: start intensity check
check_3 <- t0_intensity < threshold_t0_max_int
# check 4: startPoint_x
check_4 <- startPoint_x > threshold_startPoint
# check 5. if the max_x is before tmax
check_5 <- maximum_x < t_max
# check 6. drops %75 of maximum at t = t_max
check_6 <- tmax_intensity / max_original_intensity < threshold_dsm_tmax_IntensityRatio
if(all(c(check_1, check_2, check_3, check_4, check_5, check_6)==1)==1){flag_dsm = 1}
if(!all(c(check_1, check_2, check_3, check_4, check_5, check_6)==1)==1){flag_dsm = 0}
return(flag_dsm)
}
# Sigmoidal random parameter generation
sigmoidal_parameters <- function(true_model, run_no)
{
cat(paste0("\n","run no: ", run_no,"\n"))
if(true_model == "SM")
{
flag_sm = 0
while(flag_sm == 0)
{
maximum_SM <- runif(n = 1, min = 0.3, max = 20) # used value in supplementary figure is "runif(1, 0.3, 20)"
slope_param_SM <- tan(runif(n = 1, min = 0.0, max = pi/2)) # used value in supplementary figure is "tan(runif(n = 1, min = 0.0, max = pi/2))"
midpoint_SM <- runif(n = 1, min = 3, max = 27) # used value in supplementary figure is "runif(1, 3, 27)"
flag_sm <- sigmoidalCheck(maximum_SM, slope_param_SM, midpoint_SM)
}
output <- list(maximum_SM = maximum_SM, slope_param_SM = slope_param_SM, midpoint_SM = midpoint_SM)
}
else
{
output <- list(maximum_SM = NA, slope_param_SM = NA, midpoint_SM = NA)
}
output2 <- purrr::flatten(as.vector(output))
return(output2)
}
# Double-sigmoidal random parameter generation
double_sigmoidal_parameters <- function(true_model, run_no)
{
cat(paste0("\n","run no: ", run_no,"\n"))
if(true_model == "DSM")
{
flag_dsm = 0
while(flag_dsm == 0)
{
final_asymptoteIntensity_ratio_DSM <- runif(n = 1, min = 0, max = 0.85)
maximum_DSM <- runif(n = 1, min = 0.3, max = 20)
slope1_param_DSM <- tan(runif(n = 1, min = 0.0, max = pi/2))
midpoint1_param_DSM <- runif(n = 1, min = 3, max = 26)
slope2_param_DSM <- tan(runif(n = 1, min = 0.0, max = pi/2))
midPoint_distance_param_DSM = runif(n = 1, min = 1, max = 27 - midpoint1_param_DSM)
flag_dsm <- doublesigmoidalCheck(final_asymptoteIntensity_ratio_DSM, maximum_DSM,
slope1_param_DSM, midpoint1_param_DSM,
slope2_param_DSM, midPoint_distance_param_DSM)
}
output <- list(final_asymptoteIntensity_ratio_DSM = final_asymptoteIntensity_ratio_DSM,
maximum_DSM = maximum_DSM,
slope1_param_DSM = slope1_param_DSM,
midpoint1_param_DSM = midpoint1_param_DSM,
slope2_param_DSM = slope2_param_DSM,
midPoint_distance_param_DSM = midPoint_distance_param_DSM)
}
else
{
output <- list(final_asymptoteIntensity_ratio_DSM = NA,
maximum_DSM = NA,
slope1_param_DSM = NA,
midpoint1_param_DSM = NA,
slope2_param_DSM = NA,
midPoint_distance_param_DSM = NA)
}
output2 <- purrr::flatten(as.vector(output))
return(output2)
}
browser()
# add model parameters to df
df2_sm_list <- foreach(counter01 = 1 : ProcCount) %dopar%
{
df %>%
dplyr::filter(par_work == counter01) %>%
group_by(true_model, noise_type, time_sampling,
noise_parameter, distinct_model_parameters, distinct_runs, run_no, par_work) %>%
dplyr::do(sm_param = sigmoidal_parameters(.$true_model, .$run_no)) -> df2_sm_sub
}
df2_sm_list %>%
purrr::map_df(bind_rows) %>%
dplyr::arrange(run_no)-> df2_sm
df2_dsm_list <- foreach(counter01 = 1 : ProcCount) %dopar%
{
df %>%
dplyr::filter(par_work == counter01) %>%
group_by(true_model, noise_type, time_sampling,
noise_parameter, distinct_model_parameters, distinct_runs, run_no, par_work) %>%
dplyr::do(dsm_param = double_sigmoidal_parameters(.$true_model, .$run_no)) -> df2_dsm_sub
}
df2_dsm_list %>%
purrr::map_df(bind_rows) %>%
dplyr::arrange(run_no)-> df2_dsm
dplyr::left_join(df2_sm, df2_dsm) -> df2
# add time parameters to df
df2 %>%
dplyr::mutate(n_samples = n_samples, t_min = t_min, t_max = t_max) -> df2
# Save the df
df <- df2
browser()
save(... = df, file = "distinct_runs_supplementary_fig.Rda", compression_level = 9)
###*****************************
|
d91df53445a2055358c85a1487939a32226e2442
|
5dab4ac68f4bef4ce0ff8ba361397f740311fabd
|
/requirements.R
|
d32d0f4a2f9930f69fe013775bf257ff5081d25a
|
[] |
no_license
|
zmalosh/MaloshSoccer
|
2ca2c234e5758413aab8111908f698288b7448cd
|
29d28587397a22171ac0ab378312d4d289afe83c
|
refs/heads/master
| 2020-06-16T23:06:47.331988
| 2019-10-08T03:44:46
| 2019-10-08T03:44:46
| 195,728,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 69
|
r
|
requirements.R
|
require(tidyverse)
require(jsonlite)
require(httr)
require(lubridate)
|
1c35ba29ee17f3c7d30d55b6c16ca293538aec59
|
a3b22019c25f279739d04d99f3e716e88aaa1a73
|
/plot2.R
|
0837860d6a53143e4316b124b1af7b82e6d0cb35
|
[] |
no_license
|
joeswaminathan/ExData_Plotting1
|
95f3daa860c05b3be1ec2f329bb7d1e2fba576f9
|
b5c7b036d5dc19baf0b16c50d5e06d8101af7133
|
refs/heads/master
| 2020-05-29T11:44:35.973156
| 2015-11-08T15:25:53
| 2015-11-08T15:25:53
| 45,773,875
| 0
| 0
| null | 2015-11-08T09:12:54
| 2015-11-08T09:12:53
| null |
UTF-8
|
R
| false
| false
| 830
|
r
|
plot2.R
|
plot2 <- function()
{
library(datasets)
library(data.table)
fileurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl, destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
DT <- read.csv2("household_power_consumption.txt", sep=";")
DT <- subset(DT, strptime(Date, "%d/%m/%Y") <= strptime("02/02/2007", "%d/%m/%Y") & strptime(Date, "%d/%m/%Y") >= strptime("01/02/2007", "%d/%m/%Y"))
DT <- within(DT, datetime <- strptime(paste(Date, Time), "%d/%m/%Y %T"))
DT <- within(DT, Global_active_power <- as.numeric(as.character(Global_active_power)))
png("figure/plot2.png")
plot(DT$datetime, DT$Global_active_power, type = "l", xlab ="", ylab="Global Active Power (kilowatts)")
dev.off()
}
|
8ebd7b8a614f93c928dda9ea71f569acfe80f8d9
|
2001bb778b621ff646b2951c3b9e6995f8d40c2f
|
/plot_stats_density.R
|
cc8bb587441cd5b0a20d2480a62d4fe21b986a0a
|
[] |
no_license
|
emelaas/landsat_ARD
|
15d8238ec04026634eb8a81207cbf33a7a682202
|
b2c4beb007349cb80d42be52e3df87e4b38645e7
|
refs/heads/master
| 2021-04-29T23:46:20.607859
| 2018-08-28T21:56:29
| 2018-08-28T21:56:29
| 121,563,656
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,252
|
r
|
plot_stats_density.R
|
below5 <- raster('/projectnb/modislc/projects/landsat_sentinel/ARD/tifs/below5_sum.tif')
below5_vals <- getValues(below5)
w1 <- which(below5_vals<=2)
w2 <- which(below5_vals>2 & below5_vals<6)
w3 <- which(below5_vals>=6)
setwd('/projectnb/modislc/projects/landsat_sentinel/ARD/tifs')
in_dirs_tile <- list.files(path=getwd(),
pattern=glob2rx("cor*tif"),full.names=T,include.dirs=T,recursive=TRUE)
s <- stack(in_dirs_tile)
s_vals <- getValues(s)
s_vals1 <- s_vals[w1,]
s_vals2 <- s_vals[w2,]
s_vals3 <- s_vals[w3,]
colnames(s_vals) <- c('Chill','Photo','SW Mar 17','SW Jan1 (Mean AGDD)')
dens1 <- apply(s_vals1, 2, density, na.rm=TRUE)
dens2 <- apply(s_vals2, 2, density, na.rm=TRUE)
dens3 <- apply(s_vals3, 2, density, na.rm=TRUE)
#x11(h=4,w=11)
pdf(h=4,w=11,'/projectnb/modislc/projects/landsat_sentinel/ARD/figures/NCC/cor_density_all.pdf')
par(mfrow=c(1,3))
plot(NA, xlim=c(0,1), ylim=range(sapply(dens1, "[", "y")),xlab='Correlation Coefficient',ylab='',
main=expression('0-2 months with T'[avg]*' < 5'~degree*'C'))
mapply(lines, dens1, col=1:length(dens1),lwd=4)
plot(NA, xlim=c(0,1), ylim=range(sapply(dens2, "[", "y")),xlab='Correlation Coefficient',ylab='',
main=expression('3-5 months with T'[avg]*' < 5'~degree*'C'))
mapply(lines, dens2, col=1:length(dens2),lwd=4)
plot(NA, xlim=c(0,1), ylim=range(sapply(dens3, "[", "y")),xlab='Correlation Coefficient',ylab='',
main=expression('6-8 months with T'[avg]*' < 5'~degree*'C'))
mapply(lines, dens3, col=1:length(dens3),lwd=4)
legend("topleft", legend=colnames(s_vals), fill=1:length(dens3),bty='n')
dev.off()
setwd('/projectnb/modislc/projects/landsat_sentinel/ARD/tifs')
in_dirs_tile <- list.files(path=getwd(),
pattern=glob2rx("RMSE*tif"),full.names=T,include.dirs=T,recursive=TRUE)
s <- stack(in_dirs_tile)
s_vals <- getValues(s)
s_vals1 <- s_vals[w1,]
s_vals2 <- s_vals[w2,]
s_vals3 <- s_vals[w3,]
colnames(s_vals) <- c('Chill','Photo','SW Mar 17','SW Jan1 (Mean AGDD)')
dens1 <- apply(s_vals1, 2, density, na.rm=TRUE)
dens2 <- apply(s_vals2, 2, density, na.rm=TRUE)
dens3 <- apply(s_vals3, 2, density, na.rm=TRUE)
#x11(h=4,w=11)
pdf(h=4,w=11,'/projectnb/modislc/projects/landsat_sentinel/ARD/figures/NCC/rmse_density_all.pdf')
par(mfrow=c(1,3))
plot(NA, xlim=c(2,14), ylim=range(sapply(dens1, "[", "y")),xlab='RMSE',ylab='',
main=expression('0-2 months with T'[avg]*' < 5'~degree*'C'))
mapply(lines, dens1, col=1:length(dens1),lwd=4)
plot(NA, xlim=c(2,14), ylim=range(sapply(dens2, "[", "y")),xlab='RMSE',ylab='',
main=expression('3-5 months with T'[avg]*' < 5'~degree*'C'))
mapply(lines, dens2, col=1:length(dens2),lwd=4)
plot(NA, xlim=c(2,14), ylim=range(sapply(dens3, "[", "y")),xlab='RMSE',ylab='',
main=expression('6-8 months with T'[avg]*' < 5'~degree*'C'))
mapply(lines, dens3, col=1:length(dens3),lwd=4)
legend("topright", legend=colnames(s_vals), fill=1:length(dens3),bty='n')
dev.off()
setwd('/projectnb/modislc/projects/landsat_sentinel/ARD/tifs')
in_dirs_tile <- list.files(path=getwd(),
pattern=glob2rx("slope*tif"),full.names=T,include.dirs=T,recursive=TRUE)
s <- stack(in_dirs_tile)
s_vals <- getValues(s)
s_vals1 <- s_vals[w1,]
s_vals2 <- s_vals[w2,]
s_vals3 <- s_vals[w3,]
colnames(s_vals) <- c('Chill','Photo','SW Mar 17','SW Jan1 (Mean AGDD)')
dens1 <- apply(s_vals1, 2, density, na.rm=TRUE)
dens2 <- apply(s_vals2, 2, density, na.rm=TRUE)
dens3 <- apply(s_vals3, 2, density, na.rm=TRUE)
#x11(h=4,w=11)
pdf(h=4,w=11,'/projectnb/modislc/projects/landsat_sentinel/ARD/figures/NCC/slope_density_all.pdf')
par(mfrow=c(1,3))
plot(NA, xlim=c(0,2), ylim=range(sapply(dens1, "[", "y")),xlab='RMA Slope',ylab='',
main=expression('0-2 months with T'[avg]*' < 5'~degree*'C'))
mapply(lines, dens1, col=1:length(dens1),lwd=4)
abline(v=1,lty=2)
plot(NA, xlim=c(0,2), ylim=range(sapply(dens2, "[", "y")),xlab='RMA Slope',ylab='',
main=expression('3-5 months with T'[avg]*' < 5'~degree*'C'))
mapply(lines, dens2, col=1:length(dens2),lwd=4)
abline(v=1,lty=2)
plot(NA, xlim=c(0,2), ylim=range(sapply(dens3, "[", "y")),xlab='RMA Slope',ylab='',
main=expression('6-8 months with T'[avg]*' < 5'~degree*'C'))
mapply(lines, dens3, col=1:length(dens3),lwd=4)
abline(v=1,lty=2)
legend("topleft", legend=colnames(s_vals), fill=1:length(dens3),bty='n')
dev.off()
|
b69572f68c6dc90f257e9e26c7fb3c63715d6cad
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/jvnVaR/examples/jMCPri.Rd.R
|
8a9fea3cabb480efcea7add2525dd7b4ec873698
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 221
|
r
|
jMCPri.Rd.R
|
library(jvnVaR)
### Name: jMCPri
### Title: Monte-Carlo Price Simulation
### Aliases: jMCPri
### Keywords: jVaR jPrice jReturn
### ** Examples
s0 <- 100
mu <- 0.02
sigma <- 0.1
m <- 1000
jMCPri (s0, mu, sigma, m)
|
09794a1546b29700320fe099af47022a4566e4c4
|
9319d9ef8bea2c2f4f3e95be2303a1d8994ea4b5
|
/plot3.R
|
a5bf04391462a7fe0cfbc9f28e35c75d0ba31973
|
[] |
no_license
|
mlk/ExData_Plotting1
|
25b7795610f5859d66ae477cabc38678fc9d6783
|
4b2a90fcb155b4cd0d90424ca059b68f4f6d03d2
|
refs/heads/master
| 2021-01-15T14:50:15.126813
| 2015-06-07T06:55:01
| 2015-06-07T06:55:01
| 36,880,589
| 0
| 0
| null | 2015-06-04T15:57:31
| 2015-06-04T15:57:31
| null |
UTF-8
|
R
| false
| false
| 608
|
r
|
plot3.R
|
source("load_data.R")
data_set <- load_data()
png("plot3.png", width = 480, height = 480, units = "px", bg='transparent')
plot(data_set$date_time, data_set$Sub_metering_1, col="Black", type="l",
ylim=range(data_set$Sub_metering_1, data_set$Sub_metering_2, data_set$Sub_metering_3),
xlab="", ylab="Energy sub metering"
)
lines(data_set$date_time, data_set$Sub_metering_2, col="Red")
lines(data_set$date_time, data_set$Sub_metering_3, col="Blue")
legend("topright", lty=1, c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),cex=0.8, col=c("Black", "Red", "Blue"))
dev.off()
|
55f4aaa5e4642f30c50bf9a509259cda87ac936f
|
364d3c7f7b87095baadbbacefb69a0a7c107c87c
|
/man/get_binary_corr_mat.Rd
|
e64b2c3b503cd6ee9508d21304a0a44e74299ec7
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
SchlossLab/mikropml
|
32c9d0cd7351d667b2fc7522eabdcfb73e28d699
|
3dcc9bc0c49e0e65714fd9a1e0045a749ada76e8
|
refs/heads/main
| 2023-06-11T15:23:19.409104
| 2023-04-15T17:02:49
| 2023-04-15T17:02:49
| 226,981,416
| 41
| 12
|
NOASSERTION
| 2023-08-21T15:44:37
| 2019-12-09T22:37:38
|
R
|
UTF-8
|
R
| false
| true
| 1,136
|
rd
|
get_binary_corr_mat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corr_feats.R
\name{get_binary_corr_mat}
\alias{get_binary_corr_mat}
\title{Identify correlated features as a binary matrix}
\usage{
get_binary_corr_mat(
features,
corr_thresh = 1,
group_neg_corr = TRUE,
corr_method = "spearman"
)
}
\arguments{
\item{features}{a dataframe with each column as a feature for ML}
\item{corr_thresh}{For feature importance, group correlations
above or equal to \code{corr_thresh} (range \code{0} to \code{1}; default: \code{1}).}
\item{group_neg_corr}{Whether to group negatively correlated features
together (e.g. c(0,1) and c(1,0)).}
\item{corr_method}{correlation method. options or the same as those supported
by \code{stats::cor}: spearman, pearson, kendall. (default: spearman)}
}
\value{
A binary matrix of correlated features
}
\description{
Identify correlated features as a binary matrix
}
\examples{
\dontrun{
features <- data.frame(
a = 1:3, b = 2:4, c = c(1, 0, 1),
d = (5:7), e = c(5, 1, 4)
)
get_binary_corr_mat(features)
}
}
\author{
Kelly Sovacool, \email{sovacool@umich.edu}
}
\keyword{internal}
|
b313cb6cf4fc84125ac67f0c20873ee7537dc78f
|
4fc55e270331cf46982bf9cb8ee8502a36ec2a7b
|
/prediction.R
|
2df8d6599466b919ccf7df9bfc9d079deb653d6a
|
[] |
no_license
|
rggeorge/cfa
|
686a89096de7145c263b86f0462d4ad5443c9c76
|
9a84c48fc00e60f6dd56d46df2ba75fd7fd332e8
|
refs/heads/master
| 2016-08-05T09:46:31.649821
| 2014-07-24T05:11:10
| 2014-07-24T05:11:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 198
|
r
|
prediction.R
|
library(caret)
dat <- read.csv("Violations-2012.csv")
dat$date <- ymd_hms(dat$violation_date)
pred <- function(viol_type){
dates <- dat$date[dat$violation_type=="Refuse Accumulation"]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.