blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5c204446db3b07a6e14ed50f58855e94f1c32b64
|
5f3198e36d7c42b0ed15e1e364a7bc3b3e00652e
|
/man/plotTemporal.Rd
|
f8e1c4bed32b406f4ddc3b750e5a4abe0245b93a
|
[] |
no_license
|
cran/scalpel
|
f6a2eeca848d9f793810754400de0059ccaa5bda
|
5ed5b98cfd326688c8bdabfdd8d487a83e807768
|
refs/heads/master
| 2021-06-26T13:05:04.209432
| 2021-02-03T04:30:02
| 2021-02-03T04:30:02
| 84,911,225
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,565
|
rd
|
plotTemporal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SCALPEL_graphics.R
\name{plotTemporal}
\alias{plotTemporal}
\title{Plot temporal components from Step 3 of SCALPEL.}
\usage{
plotTemporal(
scalpelOutput,
neuronsToDisplay = NULL,
colVec = NULL,
ylab = "",
title = "",
fileName = NULL,
lambdaIndex = NULL
)
}
\arguments{
\item{scalpelOutput}{An object returned by one of these SCALPEL functions:
\code{\link{scalpel}} or \code{\link{scalpelStep3}}.}
\item{neuronsToDisplay}{Vector giving which neurons' temporal components to plot. The indices refer to which rows
of \code{scalpelOutput$Zhat} to plot. By default, all components are plotted. Users may also specify \code{"kept"},
which will exclude all dictionary elements discarded using a previous call to \code{\link{reviewNeurons}} or \code{\link{reviewNeuronsInteractive}}.}
\item{colVec}{Vector of colors to use, which are chosen automatically if the default value of NULL is used.}
\item{ylab}{Label for the y-axis.}
\item{title}{Label for the title.}
\item{fileName}{If provided, the plot will be saved to the specified location.}
\item{lambdaIndex}{Optional advanced user argument: Index of lambda value for which results will be plotted. Default is
to use lambda value of \code{scalpelOutput$lambda} but specifying this will use the lambda value of \code{scalpelOutput$lambdaSeq[lambdaIndex]}.}
}
\value{
None
}
\description{
We plot the temporal components, displaying the estimated fluorescence over time for each spatial component,
which result from running Step 3 of SCALPEL.
}
\details{
If \code{lambdaIndex} is \code{NULL}, each temporal component is scaled by its largest value. If
\code{lambdaIndex} is specified, each temporal component is scaled by its largest value across all of the lambda values.
Temporal components that were zeroed out in the sparse group lasso are omitted from the plot.
}
\examples{
\dontrun{
### many of the functions in this package are interconnected so the
### easiest way to learn to use the package is by working through the vignette,
### which is available at ajpete.com/software
#assumes you have run the example for the "scalpel" function
#simplest example with default parameters:
plotTemporal(scalpelOutput = scalpelOutput)
#example with optional parameters:
#plot only two of the neurons and add a title
plotTemporal(scalpelOutput = scalpelOutput, neuronsToDisplay = c(1,2),
title = "First two neurons")
}
}
\seealso{
\code{\link{scalpelStep3}}, \code{\link{scalpel}}, \code{\link{plotResults}}
}
|
7e519e67fc6604d8decf7c98f5224165bf76cd3b
|
3b361820e93c9cbaa7e740b6edbf13c03a1cfcce
|
/man/DCML.Rd
|
faacad9feb2a18c366835eacd4b926e8c9773ef2
|
[] |
no_license
|
msalibian/RobStatTM
|
f8dabc88197be2460f1ba4c95b595e95ff53c1e9
|
d542c29816d50889f25649817e3ae5de08946141
|
refs/heads/master
| 2023-05-14T08:42:27.747789
| 2023-05-09T17:08:37
| 2023-05-09T17:08:37
| 83,067,068
| 14
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,287
|
rd
|
DCML.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DCML.R
\name{DCML}
\alias{DCML}
\title{DCML regression estimator}
\usage{
DCML(x, y, z, z0, control)
}
\arguments{
\item{x}{design matrix}
\item{y}{response vector}
\item{z}{robust fit as returned by \code{\link{MMPY}} or \code{\link{SMPY}}}
\item{z0}{least squares fit as returned by \code{\link{lm.fit}}}
\item{control}{a list of control parameters as returned by \code{\link{lmrobdet.control}}}
}
\value{
a list with the following components
\item{coefficients}{the vector of regression coefficients}
\item{cov}{the estimated covariance matrix of the DCML regression estimator}
\item{residuals}{the vector of regression residuals from the DCML fit}
\item{scale}{a robust residual (M-)scale estimate}
\item{t0}{the mixing proportion between the least squares and robust regression estimators}
}
\description{
This function computes the DCML regression estimator. This function is used
internally by \code{\link{lmrobdetDCML}}, and not meant to be used
directly.
}
\references{
\url{http://www.wiley.com/go/maronna/robust}
}
\seealso{
\code{\link{DCML}}, \code{\link{MMPY}}, \code{\link{SMPY}}
}
\author{
Victor Yohai, \email{victoryohai@gmail.com}, Matias Salibian-Barrera, \email{matias@stat.ubc.ca}
}
|
a448de5b762f50fe3b6b4f726e990f567f031cc9
|
6ca362731c11fc3c6db6ff2bb0852be4c3f9fba3
|
/code/nc_matrix.R
|
0dfe8722c69422d035181ad531cca4d151713fb0
|
[] |
no_license
|
willofbigdata/real_estate
|
50d817b2e85cbc3a80edfb3f0f25bcde7d93877e
|
47ee4c967bc25bf8ea650b36560dcef163799347
|
refs/heads/master
| 2021-01-22T03:49:19.779268
| 2017-07-30T23:18:56
| 2017-07-30T23:18:56
| 92,406,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,834
|
r
|
nc_matrix.R
|
# Functions
# Load all dependencies
load_all_deps <- function(){
library(pryr) # memory usage
library(ggplot2) # visualization
library(pscl) # model diagnostics
library(ROCR) # classifier performance evaluation
library(caret) # training and evaluating models
library(nnet) # multinomial logistic regression
library(GDAtools) # Gaussian Discriminant Analysis
library(geosphere) # geodistances
library(RMySQL) # MySQL connection
}
# Multiple classifications (more than 2 classes)
apply_cat_id <- function(category,cat_names,ids){
n_obs <- length(c(category))
n_names <- length(cat_names)
n_ids <- length(ids)
cats_vec <- rep(0,n_obs)
if(min(n_names,n_ids) > 0 && n_names == n_ids){
for(i in 1:n_names){
cats_vec[which(category == cat_names[i])] <- ids[i]
}
}
else{
print("The number of category names does not match the number of IDs,
or there are not cat_names or IDs.")
}
return(cats_vec)
}
# get_unique_cats
get_unique_cats <- function(cats_vec,extra=NULL){
if(is.null(extra)){
return(sort(unique(cats_vec)))
}
else{
return(sort(unique(c(cats_vec,extra))))
}
}
# get_cats_mat
# Return a matrix showing, for each observation in cats_vec,
# which group that observation belongs to.
# 'extra' supplements additional categories that may not have
# already be in cats_vec.
get_cats_mat <- function(cats_vec,extra=NULL){
cats <- get_unique_cats(cats_vec,extra)
n_cats <- length(cats)
cats_vec <- as.matrix(cats_vec)
cats_mat <- apply(cats_vec,1,function(e) e == cats)
cats_mat <- as.matrix(cats_mat)
if(n_cats > 1){
cats_mat <- t(cats_mat)
}
return(cats_mat)
}
# get_count_mat
# For each observation and category, count the number of neighbours that
# belong to that category (except for that observation)
get_count_mat <- function(cats_vec,extra=NULL){
cats_mat <- get_cats_mat(cats_vec,extra)
n_obs <- nrow(cats_mat)
count_mat <- (1 - diag(n_obs)) %*% cats_mat
return(count_mat)
}
# get_dist_mat
# Geosphere distances in meters
# need data.frame and correct column names
get_dist_mat <- function(from_points,to_points){
from_points <- data.matrix(from_points[,c("lng","lat")])
to_points <- data.matrix(to_points[,c("lng","lat")])
return(distm(to_points,from_points))
}
# filter_by_lng_lat
# Given any point and ranges for latitude and longitude
# filter for target points which fall within both ranges
filter_by_lng_lat <- function(from_point,to_points,lng_range=NULL,lat_range=NULL){
if(!is.null(lng_range)){
to_points <- subset(to_points,abs(lng - from_point$lng) <= lng_range)
}
if(!is.null(lat_range)){
to_points <- subset(to_points,abs(lat - from_point$lat) <= lat_range)
}
# print("=====")
# print(dim(from_point))
# print(dim(to_points))
# return(to_points)
}
# Obtain the matrix of neighbourhood characteristics
# by filtering out latitude and longitude first
get_nc_mat <- function(from_points,to_points,lng_range=NULL,lat_range=NULL,r=1){
n_from_points <- nrow(from_points)
all_cats <- get_unique_cats(from_points$cat_id,extra=to_points$cat_id)
n_cats <- length(all_cats)
# create a variable that will store and return the final result
nc_mat <- matrix(0,nrow=0,ncol=n_cats)
for(i in 1:n_from_points){
# filter out points with the same coordinates as from_point[i]
to_points_temp <- subset(to_points,lng != from_points$lng[i])
to_points_temp <- subset(to_points_temp,lat != from_points$lat[i])
# filter for points that are within a certain amount of longutude and latitude
to_points_temp <- filter_by_lng_lat(from_points[i,],to_points_temp,
lng_range,lat_range)
# if(nrow(to_points_temp) == 0){
# print(i)
# print(dim(to_points_temp))
# }
# compute neighbourhood characteristics and store the results
# account for the case where to_points_temp has no row
if(nrow(to_points_temp) > 0){
# if(any(from_points$lng[i] == to_points_temp$lng)){
# print(paste("same lng at",i))
# print(from_points[i,])
# print(to_points_temp[which(from_points$lng[i] == to_points_temp$lng),])
# print(" ")
# }
#
# if(any(from_points$lat[i] == to_points_temp$lat)){
# print(paste("same lat at",i))
# print(from_points[i,])
# print(to_points_temp[which(from_points$lat[i] == to_points_temp$lat),])
# print(" ")
# }
if(r == 0){
dist_mat <- matrix(1,nrow=nrow(to_points_temp),ncol=1)
}
else{
dist_mat <- get_dist_mat(from_points[i,],to_points_temp)
dist_mat <- exp(-r*dist_mat)
}
cats_mat <- get_cats_mat(to_points_temp$cat_id,extra=all_cats)
nc_mat <- rbind(nc_mat,t(dist_mat) %*% cats_mat)
}
else{
nc_mat <- rbind(nc_mat,rep(0,n_cats))
}
# progress report
if(i %% 1000 == 0){
print(paste(i,"points done."))
}
}
# Name the columns
cats <- get_unique_cats(to_points$cat_id,extra=from_points$cat_id)
colnames(nc_mat) <- paste(c("nc"),rep(sapply(cats,deparse)),sep="_")
return(nc_mat)
}
# multiplot: for visualization
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
|
7da7a0e40a73d51922ff53eb23bdb114e08880fc
|
dab62b87e34527f27179b2342268bcd86fddf0ee
|
/karban-sage-analysis.R
|
122bc6f65b71da0ab5642c0c38db521dfdc575de
|
[] |
no_license
|
wcwetzel/Karban-sage
|
81304878c64a28d0d3dfaa6c62f9debf4a68924e
|
4fe4a6e3f17710dd4e5b23922523ba6e8956f268
|
refs/heads/master
| 2021-01-22T06:48:54.838406
| 2012-08-02T14:14:52
| 2012-08-02T14:14:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,359
|
r
|
karban-sage-analysis.R
|
# Analysis of Karban's damage experiment
# 25 May 2012
library(gstat)
library(sp)
library(lme4)
library(bbmle)
library(spdep)
#library(rgl)
library(arm)
library(rethinking)
library(RANN)
library(pgirmess)
d = read.csv("/Users/will/Documents/Analysis for colleagues/Karban - Damage Neighbors/Sage99plants2011.csv")
# Center X AND Y!!!
d$y = d$y - mean(d$y)
d$x = d$x - mean(d$x)
coordinates(d) = c('x', 'y')
d$x = coordinates(d)[,1]
d$y = coordinates(d)[,2]
# Spatial blocks with 3 blocks
# block 1 is lower right, 2 is upper right, 3 is left
d$spblock[d$receiver > 73] = 1
d$spblock[is.na(d$spblock)][d$x[is.na(d$spblock)] > 0] = 2
d$spblock[is.na(d$spblock)][d$x[is.na(d$spblock)] < 0] = 3
plot(y ~ x, data=d, type='n')
text(y ~ x, data=d, labels=d$spblock)
# Spatial blocks with 2 blocks
# block 1 is right side (lower herb), 2 is left side (high herb)
d$spblock2[d$receiver > 73] = 1
d$spblock2[is.na(d$spblock2)][d$x[is.na(d$spblock2)] > 0] = 1
d$spblock2[is.na(d$spblock2)][d$x[is.na(d$spblock2)] < 0] = 2
plot(y ~ x, data=d, type='n')
text(y~x, data=d, labels=d$spblock2)
hist(d$damage[d$spblock2==1], xlim=c(0,100), border='blue')
hist(d$damage[d$spblock2==2], xlim=c(0,100), add=TRUE, lty=2,
breaks=9, border='red')
d$healthy = 100 - d$damage
d$damage.prop = d$damage/100
d$asdamage = asin(sqrt(d$damage.prop))
hist(d$asdamage)
hist(log(d$damage.prop))
hist(log(d$damage))
# some plotting to explore spatial patterns
plot(y ~ x, data=d, cex=log(d$damage), asp=1)
plot(y ~ x, data=d, cex=d$damage/50, asp=1)
plot(y ~ x, data=d, cex=d$r+1, asp=1)
plot3d(d$damage ~ d$x + d$y, type='s', size=0.5)
plot3d(d$damage ~ d$x + d$y, type='h', add=TRUE)
plot3d(d$damage ~ d$x + d$r, type='s', size=0.5)
plot3d(d$damage ~ d$x + d$r, type='h', add=TRUE)
plot3d(d$r ~ d$x + d$y, type='s', size=0.5)
plot3d(d$r ~ d$x + d$y, type='h', add=TRUE)
plot(damage ~ x, data=d, pch=as.character(d$spblock2))
plot(damage ~ r, data=d, pch=as.character(d$spblock2))
plot(asdamage ~ r, data=d)
plot(damage ~ spblock2, data=d)
spplot(d, 'damage', do.log=TRUE)
bubble(d, 'damage')
# distance matrix
dists = as.matrix(dist(coordinates(d)))
# autocovariate of damage
d$autodam = numeric(65)
for(i in 1:nrow(d)){
d$autodam[i] = sum( d$damage[-i] / (-0.5 + dists[i,-i]) )
}
plot(damage ~ autodam, data=d)
corr(cbind(d$damage, d$autodam))
summary(lm(damage ~ autodam, data=d))
# Maybe I don't care about spatial autocorrelation.
# Maybe there is no spatial autocorrelation once I take out the
# effect of the x dimension?
# So maybe I just need to include x in a model w/o autocorr.
k1 = lm(log(damage) ~ I(x^2) + I(y^2) + I(x*y) + x + y, data=d)
k2 = glm(cbind(d$damage, d$healthy) ~ 1, family='binomial')
k3 = glm(cbind(d$damage, d$healthy) ~
I(x^2) + I(y^2) + I(x*y) + x + y, data=d, family='binomial')
k4 = glmer(cbind(d$damage, d$healthy) ~
I(x^2) + I(y^2) + I(x*y) + x + y + (1|receiver), data=d,
family='binomial', REML=FALSE)
k5 = glmer(cbind(d$damage, d$healthy) ~
I(x^2) + I(y^2) + I(x*y) + x + y + r + (1|receiver), data=d,
family='binomial', REML=FALSE)
k6 = glmer(cbind(d$damage, d$healthy) ~
x + r + (1|receiver), data=d,
family='binomial', REML=FALSE)
k7 = glmer(cbind(d$damage, d$healthy) ~
x + (1|receiver), data=d,
family='binomial', REML=FALSE)
k8 = glmer(cbind(d$damage, d$healthy) ~
r + (x|receiver), data=d,
family='binomial', REML=FALSE)
k9 = glmer(cbind(d$damage, d$healthy) ~
(x|receiver), data=d,
family='binomial', REML=FALSE)
k10 = glmer(cbind(d$damage, d$healthy) ~
r + (0+x|receiver), data=d,
family='binomial', REML=FALSE)
k11 = glmer(cbind(d$damage, d$healthy) ~
r + (1|receiver), data=d,
family='binomial', REML=FALSE)
k12 = glmer(cbind(d$damage, d$healthy) ~
(1|receiver), data=d,
family='binomial', REML=FALSE)
k13 = glmer(cbind(d$damage, d$healthy) ~
(1|receiver) + (x), data=d,
family='binomial', REML=FALSE)
local = localG(d$damage, rlistW)
k14 = glmer(cbind(d$damage, d$healthy) ~
as.vector(local) + (1|receiver), data=d,
family='binomial', REML=FALSE)
k15 = glmer(cbind(d$damage, d$healthy) ~
r + as.vector(local) + (1|receiver), data=d,
family='binomial', REML=FALSE)
k16 = glmer(cbind(d$damage, d$healthy) ~
(1 + x|receiver), data=d,
family='binomial', REML=FALSE)
k17 = glmer(cbind(d$damage, d$healthy) ~
r + (1 + x|receiver), data=d,
family='binomial', REML=FALSE)
k18 = glmer(cbind(d$damage, d$healthy) ~
x + (1 + r|receiver), data=d,
family='binomial', REML=FALSE)
k19 = glmer(cbind(d$damage, d$healthy) ~
x + r + (1 + r|receiver), data=d,
family='binomial', REML=FALSE)
k20 = glmer(cbind(d$damage, d$healthy) ~
(x|receiver) + (1 + r|receiver), data=d,
family='binomial', REML=FALSE)
k21 = glmer(cbind(d$damage, d$healthy) ~
r + (x|receiver) + (1 + r|receiver), data=d,
family='binomial', REML=FALSE)
k24 = glmer(cbind(d$damage, d$healthy) ~
(1 + r|receiver), data=d,
family='binomial', REML=FALSE)
k25 = glmer(cbind(d$damage, d$healthy) ~
r + (1 + r|receiver), data=d,
family='binomial', REML=FALSE)
k26 = glmer(cbind(d$damage, d$healthy) ~
(1 + autodam + r|receiver), data=d,
family='binomial', REML=FALSE)
k27 = glmer(cbind(d$damage, d$healthy) ~
r + (1 + autodam + r|receiver), data=d,
family='binomial', REML=FALSE)
k28 = glmer(cbind(d$damage, d$healthy) ~
autodam + (1 + r|receiver), data=d,
family='binomial', REML=FALSE)
k29 = glmer(cbind(d$damage, d$healthy) ~
r + autodam + (1 + r|receiver), data=d,
family='binomial', REML=FALSE)
AICctab(k6, k7, k11, k12, nobs=65)
anova(k6, k7)
extractDIC(k6) - extractDIC(k6)
extractDIC(k7) - extractDIC(k6)
anova(k14, k15)
AICctab(k8, k9, nobs=65)
anova(k8, k9)
AICctab(k11, k12, nobs=65)
anova(k11, k12)
AICctab(k16, k17, nobs=65)
anova(k16, k17)
AICctab(k18, k19, nobs=65)
AICtab(k18, k19)
anova(k18, k19)
AICctab(k20, k21, nobs=65)
anova(k20, k21)
AICctab(k24, k25, nobs=65)
anova(k24, k25)
AICctab(k26, k27, nobs=65)
anova(k26,k27)
AICctab(k28, k29, nobs=65)
anova(k28,k29)
AICctab(k4,k5, k6, k7, k8, k9, k10, nobs=nrow(d))
plot(damage ~ r, data=d[-4,])
plot(resid(k5) ~ r, data=d)
plot(y ~ x, data=d, cex= (resid(k11)+1)/ (max(resid(k11) + 1)) *3 )
#############
# random and fixed effect of x, McElreath's suggestion
k22 = glmer(cbind(d$damage, d$healthy) ~
x + (1 + x|receiver), data=d,
family='binomial', REML=FALSE)
k23 = glmer(cbind(d$damage, d$healthy) ~
r + x + (1 + x|receiver), data=d,
family='binomial', REML=FALSE)
k23.5 = glmer(cbind(d$damage, d$healthy) ~
r + x + r:x + (1 + x|receiver), data=d,
family='binomial', REML=FALSE)
extractDIC(k22) - extractDIC(k23.5)
# fixed effect of x
k30 = glmer(cbind(d$damage, d$healthy) ~
x + (1|receiver), data=d,
family='binomial', REML=FALSE)
k31 = glmer(cbind(d$damage, d$healthy) ~
r + x + (1|receiver), data=d,
family='binomial', REML=FALSE)
k31.5 = glmer(cbind(d$damage, d$healthy) ~
r + x + r:x + (1|receiver), data=d,
family='binomial', REML=FALSE)
# random slopes for r and random and fixed x
k32 = glmer(cbind(d$damage, d$healthy) ~
x + (1 + x + r|receiver), data=d,
family='binomial', REML=FALSE)
k33 = glmer(cbind(d$damage, d$healthy) ~
r + x + (1 + x + r|receiver), data=d,
family='binomial', REML=FALSE)
# random slopes for r and fixed effect for x
k34 = glmer(cbind(d$damage, d$healthy) ~
x + (1 + r|receiver), data=d,
family='binomial', REML=FALSE)
k35 = glmer(cbind(d$damage, d$healthy) ~
r + x + (1 + r|receiver), data=d,
family='binomial', REML=FALSE)
# no effect of x
k36 = glmer(cbind(d$damage, d$healthy) ~
(1|receiver), data=d,
family='binomial', REML=FALSE)
k37 = glmer(cbind(d$damage, d$healthy) ~
r + (1|receiver), data=d,
family='binomial', REML=FALSE)
AICctab(k22, k23, k23.5, nobs=65)
BICtab(k22, k23, nobs=65)
anova(k22,k23)
anova(k22, k23.5)
extractDIC(k22)
extractDIC(k23)
AICctab(k30, k31, k31.5, nobs=65)
BICtab(k22, k23, nobs=65)
anova(k30,k31)
extractDIC(k30)
extractDIC(k31)
AICctab(k32, k33, nobs=65)
anova(k32,k33)
extractDIC(k32)
extractDIC(k33)
AICctab(k34, k35, nobs=65)
anova(k34,k35)
extractDIC(k34)
extractDIC(k35)
BICtab(k32,k33, k22,k23, nobs=65)
extractDIC(k33)
extractDIC(k23)
AICctab(k36, k37, nobs=65)
anova(k36,k37)
extractDIC(k36)
extractDIC(k37)
AICctab(k22, k23, k23.5,k30, k31, k31.5, nobs=65)
#---------- final models with x instead of sp block --------------------------#
# fixed effect of x, no random effect of x
x0 = glmer(cbind(d$damage, d$healthy) ~
(1|receiver), data=d,
family='binomial', REML=FALSE)
xx = glmer(cbind(d$damage, d$healthy) ~
x + (1|receiver), data=d,
family='binomial', REML=FALSE)
xr = glmer(cbind(d$damage, d$healthy) ~
r + (1|receiver), data=d,
family='binomial', REML=FALSE)
xxr = glmer(cbind(d$damage, d$healthy) ~
r + x + (1|receiver), data=d,
family='binomial', REML=FALSE)
xxrint = glmer(cbind(d$damage, d$healthy) ~
r + x + r:x + (1|receiver), data=d,
family='binomial', REML=FALSE)
AICctab(x0, xx, xr, xxr, xxrint, weights=TRUE, nobs=65)
#------------ models with 3 spatial blocks (spblock) ------------#
d$spblock = as.factor(d$spblock)
d$receiver = as.factor(d$receiver)
sp0 = glmer(cbind(d$damage, d$healthy) ~
(1| spblock / receiver), data=d,
family='binomial', REML=FALSE)
sp1 = glmer(cbind(d$damage, d$healthy) ~
r + (1| spblock / receiver), data=d,
family='binomial', REML=FALSE)
AICctab(sp0, sp1, nobs=65)
anova(sp0, sp1)
sp0r = glmer(cbind(d$damage, d$healthy) ~
(1 + r| spblock / receiver), data=d,
family='binomial', REML=FALSE)
sp1r = glmer(cbind(d$damage, d$healthy) ~
r + (1 + r| spblock / receiver), data=d,
family='binomial', REML=FALSE)
AICctab(sp0r, sp1r, nobs=65)
anova(sp0r, sp1r)
sp0int = glmer(cbind(d$damage, d$healthy) ~
(1| spblock / receiver), data=d,
family='binomial', REML=FALSE)
sp0.5int = glmer(cbind(d$damage, d$healthy) ~
spblock + (1| spblock / receiver), data=d,
family='binomial', REML=FALSE)
sp1int = glmer(cbind(d$damage, d$healthy) ~
r + spblock + r:spblock + (1| spblock / receiver), data=d,
family='binomial', REML=FALSE)
AICctab(sp0int, sp1int, sp0.5int, nobs=65)
anova(sp0.5int, sp1int)
#------ explore effects of spatial blocks -----------------#
plot(damage.prop ~ r, data=d, pch=as.character(d$spblock),
col=d$spblock)
lines(loess.smooth(d$r[d$spblock==1], d$damage.prop[d$spblock==1]))
lines(loess.smooth(d$r[d$spblock==2], d$damage.prop[d$spblock==2]),
col='red')
lines(loess.smooth(d$r[d$spblock==3], d$damage.prop[d$spblock==3]),
col='green')
abline(lm(damage.prop ~ r, data=d, subset=d$spblock==1), lty=2)
abline(lm(damage.prop ~ r, data=d, subset=d$spblock==2), lty=2,
col='red')
abline(lm(damage.prop ~ r, data=d, subset=d$spblock==3), lty=2,
col='green')
summary(lm(damage.prop ~ r, data=d, subset=d$spblock==1))
#------------ models with 2 spatial blocks (spblock) ------------#
d$spblock2 = as.factor(d$spblock2)
d$receiver = as.factor(d$receiver)
sp0 = glmer(cbind(d$damage, d$healthy) ~
(1| spblock2 / receiver), data=d,
family='binomial', REML=FALSE)
sp1 = glmer(cbind(d$damage, d$healthy) ~
r + (1| spblock2 / receiver), data=d,
family='binomial', REML=FALSE)
AICctab(sp0, sp1, nobs=65)
anova(sp0, sp1)
sp0r = glmer(cbind(d$damage, d$healthy) ~
(1 + r| spblock2 / receiver), data=d,
family='binomial', REML=FALSE)
sp1r = glmer(cbind(d$damage, d$healthy) ~
r + (1 + r| spblock2 / receiver), data=d,
family='binomial', REML=FALSE)
AICctab(sp0r, sp1r, nobs=65)
anova(sp0r, sp1r)
sp0int = glmer(cbind(d$damage, d$healthy) ~
(1| spblock2 / receiver), data=d,
family='binomial', REML=FALSE)
sp0.5int = glmer(cbind(d$damage, d$healthy) ~
spblock2 + (1| spblock2 / receiver), data=d,
family='binomial', REML=FALSE)
sp1int = glmer(cbind(d$damage, d$healthy) ~
r + spblock2 + r:spblock2 + (1| spblock2 / receiver), data=d,
family='binomial', REML=FALSE)
AICctab(sp0int, sp1int, sp0.5int, nobs=65)
anova(sp0.5int, sp1int)
sp0intx = glmer(cbind(d$damage, d$healthy) ~
(1|receiver), data=d,
family='binomial', REML=FALSE)
sp0.5intx = glmer(cbind(d$damage, d$healthy) ~
spblock2 + (1|receiver), data=d,
family='binomial', REML=FALSE)
spnoint2x = glmer(cbind(d$damage, d$healthy) ~
r + spblock2 + (1|receiver), data=d,
family='binomial', REML=FALSE)
spnoint1x = glmer(cbind(d$damage, d$healthy) ~
r + (1|receiver), data=d,
family='binomial', REML=FALSE)
sp1intx = glmer(cbind(d$damage, d$healthy) ~
r + spblock2 + r:spblock2 + (1|receiver), data=d,
family='binomial', REML=FALSE)
sp1intxmeans = glmer(cbind(d$damage, d$healthy) ~
r + spblock2 + r:spblock2 + (1|receiver) - 1, data=d,
family='binomial', REML=FALSE)
AICctab(sp0intx, sp1intx, sp0.5intx, spnoint1x, spnoint2x, nobs=65, weights=TRUE)
AICtab(sp0intx, sp1intx, sp0.5intx, spnoint1x, spnoint2x, weights=TRUE)
extractDIC(sp1intx) - extractDIC(sp1intx)
extractDIC(sp0.5intx) - extractDIC(sp1intx)
extractDIC(spnoint2x) - extractDIC(sp1intx)
extractDIC(spnoint1x) - extractDIC(sp1intx)
extractDIC(sp0intx) - extractDIC(sp1intx)
AICctab(sp0intx, sp1intx, sp0.5intx, spnoint1x, spnoint2x, x0, xx, xr, xxr, xxrint, nobs=65, weights=TRUE)
AICctab(sp0intx, sp1intx, sp0.5intx, sp0int, sp1int, sp0.5int, nobs=65, weights=TRUE)
BICtab(sp0intx, sp1intx, sp0.5intx, sp0int, sp1int, sp0.5int, nobs=65)
anova(sp0.5intx, sp1intx)
anova(sp0intx, sp1intx, sp0.5intx, spnoint1x, spnoint2x, nobs=65)
extractDIC(sp0.5intx) - extractDIC(sp1intx)
AICctab(sp0intx, sp1intx, sp0.5intx, spnoint1x, spnoint2x, k22, k23, k23.5,
k30, k31, k31.5, nobs=65, weights=TRUE)
# simulate counts using ran eff estimate from sp1intx
# does it match the observed distribution?
par(mfrow=c(3,2))
hist(d$damage, xlim=c(0,100))
hist(rbinom(65, 100, mean(d$damage/100)), xlim=c(0,100))
herbsim = rbinom(65, 100,
plogis( rnorm(65, mean = qlogis(mean(d$damage/100)), sd = 1.0404) ) )
hist(herbsim, xlim=c(0,100))
herbsim = rbinom(65, 100,
plogis( rnorm(65, mean = qlogis(mean(d$damage/100)), sd = 1.0404) ) )
hist(herbsim, xlim=c(0,100))
herbsim = rbinom(65, 100,
plogis( rnorm(65, mean = qlogis(mean(d$damage/100)), sd = 1.0404) ) )
hist(herbsim, xlim=c(0,100))
herbsim = rbinom(65, 100,
plogis( rnorm(65, mean = qlogis(mean(d$damage/100)), sd = 1.0404) ) )
hist(herbsim, xlim=c(0,100))
# looks close enough, mode could be closer to zero.
#------ explore effects of 2 spatial blocks -----------------#
plot(damage.prop ~ r, data=d, pch=as.character(d$spblock2),
col=d$spblock2)
lines(loess.smooth(d$r[d$spblock2==1], d$damage.prop[d$spblock2==1]))
lines(loess.smooth(d$r[d$spblock2==2], d$damage.prop[d$spblock2==2]),
col='red')
abline(lm(damage.prop ~ r, data=d, subset=d$spblock2==1), lty=2)
abline(lm(damage.prop ~ r, data=d, subset=d$spblock2==2), lty=2,
col='red')
summary(lm(damage.prop ~ r, data=d, subset=d$spblock2==1))
summary(lm(damage.prop ~ r, data=d, subset=d$spblock2==2))
#----------- plot prediction of spblock2 interaction x model-------#
post.sp1intx = sample.naive.posterior(sp1intx)
newr = seq(-0.6, 0.8, length=100)
#newblock = sort(rep(1:2, 100))
pred.sp1intx.block1 = sapply(newr,
function(z) plogis(mean(post.sp1intx[,1] + post.sp1intx[,2] * z)))
ci.sp1intx.block1 = sapply(newr,
function(z) plogis(HPDI(post.sp1intx[,1] + post.sp1intx[,2] * z)))
pred.sp1intx.block2 = sapply(newr,
function(z) plogis(mean(post.sp1intx[,1] + post.sp1intx[,2] * z +
post.sp1intx[,3] + post.sp1intx[,4] * z)))
ci.sp1intx.block2 = sapply(newr,
function(z) plogis(HPDI(post.sp1intx[,1] + post.sp1intx[,2] * z +
post.sp1intx[,3] + post.sp1intx[,4] * z)))
plot(damage.prop ~ r, data=d, pch=20,col=d$spblock2,
ylab='Rate of herbivory on receiver',
xlab='Relatedness between volatile donor and receiver',
las=1)
lines(newr, pred.sp1intx.block1)
lines(newr, ci.sp1intx.block1[1,], lty=2)
lines(newr, ci.sp1intx.block1[2,], lty=2)
lines(newr, pred.sp1intx.block2, col='red')
lines(newr, ci.sp1intx.block2[1,], lty=2, col='red')
lines(newr, ci.sp1intx.block2[2,], lty=2, col='red')
legend(-0.15, 1, pch=20, lty=1, col=c(1,2),
legend=c('East block, low herbivory', 'West block, high herbivory'))
#----------- exploratory variogram analysis ----------------#
hscat(d$damage ~ 1, d, (0:9) * 5)
hscat(log(d$damage) ~ 1, d, (0:9) * 5)
plot(variogram(d$damage ~ 1, d, cloud=FALSE))
plot(variogram(log(d$damage) ~ 1, d, cloud=FALSE))
plot(d$x, d$y, cex=resid(k11)+1)
plot(resid(k11)+1 ~ d$x)
#------ neighbors and weight lists -----------#
# turn x-y coordinates into a square distance matrix
dists = as.matrix(dist(cbind(d$x,d$y), diag=TRUE, upper=TRUE))
### inverse distance weights ###
curve(1/(1+x), 0, max(dists))
rW = 1/(1+dists)
diag(rW) = 0 # ack, I think I've always had diag of weights matrix
# as 1, but I think it needs to be zero.
#plot(nelistW, coordinates(d))
rlistW = mat2listw(rW, row.names=d$receiver)
summary(rlistW)
### negative exponential ###
# convert distance matrix into weights matrix
# with negative exp decline in weight with distance
curve(exp(-x * 0.5), 0, max(dists))
nexpW = exp(-dists * 0.5)
diag(nexpW) = 0
# mat2listw converts a square spatial weights matrix into
# a spdep weights list object
nelistW = mat2listw(nexpW, row.names=d$receiver)
summary(nelistW)
### nearest neighbors ###
nb2listw
#------------- Moran's I -----------------------------#
# sim data with no spat autocorr
uncorr = rbinom(65, 100, mean(d$damage/100))
moran.test(d$damage, listw=nelistW)
moranI.damage.rW = moran.test(d$damage, listw=rlistW)
moranI.damage.rW$statistic
moran.test(uncorr, listw=nelistW)
moran.test(uncorr, listw=rlistW)
# bootstrap it
nboots = 999
moranIboot = numeric(nboots)
for(i in 1:nboots){
bootdata = sample(d$damage, length(d$damage), replace=TRUE)
mt = moran.test(bootdata, listw = rlistW)
moranIboot[i] = mt$statistic
}
hist(moranIboot, xlim=c(min(c(moranIboot, moranI.damage.rW$statistic)),
max(c(moranIboot, moranI.damage.rW$statistic))))
abline(v=moranI.damage.rW$statistic)
ci = HPDI(moranIboot)
abline(v=ci, lty=2, col='grey')
#------------------ Geary's C ------------------------#
gearyC.damage.rW = geary.test(d$damage, listw=rlistW)
geary.test(d$damage, listw=nelistW)
geary.test(uncorr, listw=nelistW)
# bootstrap it
nboots = 99
gearyCboot = numeric(nboots)
for(i in 1:nboots){
bootdata = sample(d$damage, length(d$damage), replace=TRUE)
mt = geary.test(bootdata, listw = rlistW)
gearyCboot[i] = mt$statistic
}
hist(gearyCboot, xlim=c(min(c(gearyCboot, gearyC.damage.rW$statistic)),
max(c(gearyCboot, gearyC.damage.rW$statistic))))
abline(v=gearyC.damage.rW$statistic)
ci = HPDI(gearyCboot)
abline(v=ci, lty=2, col='grey')
#------------------ Geary's C log(d$damage) ------------------------#
gearyC.log.damage.rW = geary.test(log(d$damage), listw=rlistW)
geary.test(log(d$damage), listw=nelistW)
geary.test(log(uncorr), listw=rlistW)
# bootstrap it
nboots = 99
gearyCboot = numeric(nboots)
for(i in 1:nboots){
bootdata = sample(log(d$damage), length(d$damage), replace=TRUE)
mt = geary.test(bootdata, listw = rlistW)
gearyCboot[i] = mt$statistic
}
hist(gearyCboot, xlim=c(min(c(gearyCboot, gearyC.log.damage.rW$statistic)),
max(c(gearyCboot, gearyC.log.damage.rW$statistic))))
abline(v=gearyC.log.damage.rW$statistic)
ci = HPDI(gearyCboot)
abline(v=ci, lty=2, col='grey')
#------------------ Global G ------------------------#
globalG.damage.rW = globalG.test(d$damage, listw=rlistW)
geary.test(d$damage, listw=nelistW)
geary.test(uncorr, listw=nelistW)
# bootstrap it
nboots = 99
globalGboot = numeric(nboots)
for(i in 1:nboots){
bootdata = sample(d$damage, length(d$damage), replace=TRUE)
mt = globalG.test(bootdata, listw = rlistW)
globalGboot[i] = mt$statistic
}
hist(globalGboot, xlim=c(min(c(globalGboot, globalG.damage.rW$statistic)),
max(c(globalGboot, globalG.damage.rW$statistic))))
abline(v=globalG.damage.rW$statistic)
ci = HPDI(globalGboot)
abline(v=ci, lty=2, col='grey')
#------------------ Global G log(d$damage) ------------------------#
globalG.damage.rW = globalG.test(log(d$damage), listw=rlistW)
globalG.test(log(d$damage), listw=nelistW)
# bootstrap it
nboots = 99
globalGboot = numeric(nboots)
for(i in 1:nboots){
bootdata = sample(log(d$damage), length(d$damage), replace=TRUE)
mt = globalG.test(bootdata, listw = rlistW)
globalGboot[i] = mt$statistic
}
hist(globalGboot, xlim=c(min(c(globalGboot, globalG.damage.rW$statistic)),
max(c(globalGboot, globalG.damage.rW$statistic))))
abline(v=globalG.damage.rW$statistic)
ci = HPDI(globalGboot)
abline(v=ci, lty=2, col='grey')
#------------- set up neighbor lists -------------------#
# nearest neighbor for each plant
nblist = knn2nb(knearneigh(coordinates(d)))
# max, min, mean nearest neighbor relationships
max(unlist(nbdists(nblist, coordinates(d))))
min(unlist(nbdists(nblist, coordinates(d))))
mean(unlist(nbdists(nblist, coordinates(d))))
# neighbors by distance
# 10m = smallest distance so that all plants have a neighbor
nb10<-dnearneigh(coordinates(d),0,15,row.names=d$receiver)
plot(nb10, coordinates(d))
#---------------- Correlograms -------------------------#
cor.nb10 = sp.correlogram(nblist, d$damage, order=10, method='I')
correl = correlog(coordinates(d), d$damage)
# log transformed
correllog = correlog(coordinates(d), log(d$damage))
plot(correl[,1], correl[,2], pch=(c(19,19, rep(21,11))), type='b')
abline(h=0, lty=2, col='grey')
# log transofmed
plot(correllog[1:10,1], correllog[1:10,2], pch=(c(19,19, rep(21,8))), type='b',
xlab='Distance', ylab='Moran I', las=1)
abline(h=0, lty=2, col='grey')
#autocorrelation to about 11 m
# with residuals #
correl.glmm.r.res = correlog(coordinates(d), residuals(k11, type='pearson'))
plot(correl.glmm.r.res[,1], correl.glmm.r.res[,2], pch=(c(19,19, rep(21,11))), type='b')
abline(h=0, lty=2, col='grey')
#-------------- Spline correlogram ---------------------#
library(ncf)
spline.correlog.dam = spline.correlog(x = d$x, y = d$y, z = d$damage, xmax=50)
plot.spline.correlog(spline.correlog.dam)
summary(spline.correlog.dam)
spline.correlog.damprop = spline.correlog(x = d$x, y = d$y, z = d$damage.prop, xmax=50)
plot.spline.correlog(spline.correlog.damprop)
summary(spline.correlog.damprop)
spline.correlog.logdam = spline.correlog(x = d$x, y = d$y, z = log(d$damage), xmax=50)
plot.spline.correlog(spline.correlog.logdam)
summary(spline.correlog.logdam)
spline.correlog.glmm.intercept.res = spline.correlog(x = d$x, y = d$y,
z = residuals(k12, type='pearson'), xmax=50)
plot.spline.correlog(spline.correlog.glmm.intercept.res)
spline.correlog.glmm.r.res = spline.correlog(x = d$x, y = d$y,
z = residuals(k11, type='pearson'), xmax=50)
plot.spline.correlog(spline.correlog.glmm.r.res)
#--------------- local G ------------------------------#
local = localG(d$damage, rlistW)
plot(coordinates(d), cex=local-min(local), asp=1)
#-------------- Autocovariate regression ----------------#
library(spdep)
autocov = autocov_dist(d$damage, nbs=10, coordinates(d))
plot(autocov ~ d$autodam)
plot(d$damage ~ autocov)
abline(lm(d$damage ~ autocov))
ac0 = glm(cbind(damage, healthy) ~ autocov, data=d, family='binomial')
ac1 = glm(cbind(damage, healthy) ~ r + autocov, data=d, family='binomial')
nac0 = glm(cbind(damage, healthy) ~ 1, data=d, family='binomial')
nac1 = glm(cbind(damage, healthy) ~ r, data=d, family='binomial')
acglmm0 = glmer(cbind(damage, healthy) ~ autocov + (1|receiver), data=d, family='binomial')
acglmm1 = glmer(cbind(damage, healthy) ~ r + autocov + (1|receiver), data=d, family='binomial')
AICctab(ac0, ac1, nobs=65)
AICctab(nac0, nac1, nobs=65)
AICctab(acglmm0, acglmm1, nobs=65)
anova(ac0, ac1)
#----------- Transform and use autoregressive model ------------#
z = log(d$damage.prop)
plot(z ~ d$r)
lm1 = lm(z ~ r, data=d)
plot(residuals(lm1) ~ d$x)
lm.morantest(lm1, rlistW)
m0 = spautolm(z ~ 1, data=d, family='SAR', listw=rlistW)
m1 = spautolm(z ~ r, data=d, family='SAR', listw=rlistW)
AICtab(m0,m1)
m0 = spautolm(z ~ 1, data=d, family='CAR', listw=rlistW)
m1 = spautolm(z ~ r, data=d, family='CAR', listw=rlistW)
AICtab(m0,m1)
#-------------------- GAM on transformed data -----------------#
library(mgcv)
gam0 = gam(z ~ 1)
gam1 = gam(z ~ r, data=d)
AICtab(gam0, gam1)
anova(gam0, gam1)
sg0xy = gam(z ~ 1 + s(x,y), data=d)
sg1xy = gam(z ~ r + s(x,y), data=d)
sg0x = gam(z ~ 1 + s(x), data=d)
sg1x = gam(z ~ r + s(x), data=d)
AICtab(sg0xy, sg1xy, sg0x, sg1x)
#------------------- GAM binomial ----------------------------#
sg0xy = gam(cbind(damage, healthy) ~ 1 + s(x,y), data=d, family=binomial)
sg1xy = gam(cbind(damage, healthy) ~ r + s(x,y), data=d, family=binomial)
sg0x = gam(cbind(damage, healthy) ~ 1 + s(x), data=d, family=binomial)
sg1x = gam(cbind(damage, healthy) ~ r + s(x), data=d, family=binomial)
AICtab(sg0xy, sg1xy, sg0x, sg1x)
#--------------------- lme with spat autocorr -------------------------------#
library(MASS)
library(nlme)
sp1 = corSpatial(form = ~ x + y, type = 'gaussian')
scor = Initialize(sp1, as(d, "data.frame")[, c('x', 'y')], nugget=FALSE)
m0 = lme(z ~ 1, random = ~1|receiver, data=as.data.frame(d),
correlation=scor, method='ML')
m1 = lme(z ~ r, random = ~1|receiver, data=as.data.frame(d),
correlation=scor, method='ML')
AICtab(m0,m1)
#--------------------- glmmPQL -------------------------------#
library(MASS)
library(nlme)
sp1 = corSpatial(1, form = ~ x + y, type = 'gaussian')
scor = Initialize(sp1, as(d, "data.frame")[, c('x', 'y')], nugget=FALSE)
ddd = as.data.frame(d)
m1PQL = glmmPQL(cbind(damage, healthy) ~ r, random = ~1 | receiver, family=binomial,
data=as.data.frame(d), correlation=scor)
summary(m1PQL)
#------------------- check for spat autocorr within the 2 blocks --------#
### first for block 1 ###
# turn x-y coordinates into a square distance matrix
dists1 = as.matrix(dist(cbind(d$x[d$spblock2==1],d$y[d$spblock2==1]), diag=TRUE, upper=TRUE))
## inverse distance weights ##
rW1 = 1/(1+dists1)
diag(rW1) = 0
rlistW1 = mat2listw(rW1, row.names=d$receiver[d$spblock2==1])
summary(rlistW1)
## negative exponential ##
nexpW1 = exp(-dists1 * 0.5)
diag(nexpW1) = 0
nelistW1 = mat2listw(nexpW1, row.names=d$receiver[d$spblock2==1])
summary(nelistW1)
## Moran's I ##
# sim data with no spat autocorr
uncorr1 = rbinom(35, 100, mean(d$damage[d$spblock2==1]/100))
moran.test(d$damage[d$spblock2==1], listw=nelistW1)
moran.test(log(d$damage[d$spblock2==1]), listw=nelistW1)
moran.test(d$damage[d$spblock2==1], listw=rlistW1)
moran.test(log(d$damage[d$spblock2==1]), listw=rlistW1)
moran.test(uncorr1, listw=nelistW1)
moran.test(uncorr1, listw=rlistW1)
### second for block 2 ###
dists2 = as.matrix(dist(cbind(d$x[d$spblock2==2],d$y[d$spblock2==2]), diag=TRUE, upper=TRUE))
## inverse distance weights ##
rW2 = 1/(1+dists2)
diag(rW2) = 0
rlistW2 = mat2listw(rW2, row.names=d$receiver[d$spblock2==2])
summary(rlistW1)
## negative exponential ##
nexpW2 = exp(-dists2 * 0.5)
diag(nexpW2) = 0
nelistW2 = mat2listw(nexpW2, row.names=d$receiver[d$spblock2==2])
summary(nelistW1)
## Moran's I ##
# sim data with no spat autocorr
uncorr2 = rbinom(30, 100, mean(d$damage[d$spblock2==2]/100))
moran.test(d$damage[d$spblock2==2], listw=nelistW2)
moran.test(log(d$damage[d$spblock2==2]), listw=nelistW2)
moran.test(d$damage[d$spblock2==2], listw=rlistW2)
moran.test(log(d$damage[d$spblock2==2]), listw=rlistW2)
moran.test(uncorr1, listw=nelistW1)
moran.test(uncorr1, listw=rlistW1)
|
f478fd1f51397cb50525483b36936968276eee55
|
cd0b0c9d78902b7721836626c5bb4a244903c7da
|
/man/ps_adjust_one.Rd
|
f021e11718883eb967d6b8a5df099ed2408da743
|
[] |
no_license
|
Zoe187419/adapt4pv
|
1ebd7dad2a91dbff9ad3f77dc60b2334a6947537
|
f1efa05aed71803b315b5810840a66a1b1b99499
|
refs/heads/master
| 2022-11-10T18:07:11.167386
| 2020-06-24T08:40:07
| 2020-06-24T08:40:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,374
|
rd
|
ps_adjust_one.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ps_adjust_one.R
\name{ps_adjust_one}
\alias{ps_adjust_one}
\title{adjustment on propensity score for one drug exposure}
\usage{
ps_adjust_one(ps_est, y)
}
\arguments{
\item{ps_est}{An object of class \code{"ps", "*"} where \code{"*"} is
\code{"bic"}, \code{"hdps"} or \code{"xgb"} according on how the
score was estimated, respective outputs of internal functions
\code{est_ps_bic}, \code{est_ps_hdps}, \code{est_ps_xgb}.
It is a list with the following elements :
* score_type: character, name of the drug exposure for which the PS was
estimated.
* indicator_expo: indicator of the drugs exposure for which the
PS was estimated. One-column Matrix object.
* score_variables: Character vector, names of covariate(s) selected
to include in the PS estimation model. Could be empty.
*score: One-column Matrix object, the estimated score.}
\item{y}{Binary response variable, numeric.}
}
\value{
An object with S3 class \code{"ps","adjust" }
\item{expo_name}{Character, name of the drug exposure for which the PS
was estimated.}
\item{estimate}{Regression coefficient associated with the drug exposure
in adjustment on PS.}
\item{pval_1sided}{One sided p-value associated with the drug exposure
in adjustment on PS.}
\item{pval_2sided}{Two sided p-value associated with the drug exposure
in adjustment on PS.}
Could return NA if the adjustment on the PS did not converge.
}
\description{
Implement the adjustment on propensity score for one drug exposure.
The binary outcome is regressed on the drug exposure of interest and
its estimated PS.
Internal function, not supposed to be used directly.
}
\details{
The PS could be estimated in different ways: using lasso-bic approach,
the hdPS algorithm or gradient tree boosting using functions
\code{est_ps_bic}, \code{est_ps_hdps} and \code{est_ps_xgb}
respectivelly.
}
\examples{
set.seed(15)
drugs <- matrix(rbinom(100*20, 1, 0.2), nrow = 100, ncol = 20)
colnames(drugs) <- paste0("drugs",1:ncol(drugs))
ae <- rbinom(100, 1, 0.3)
pshdps2 <- est_ps_hdps(idx_expo = 2, x = drugs, y = ae, keep_total = 10)
adjps2 <- ps_adjust_one(ps_est = pshdps2, y = ae)
adjps2$estimate #estimated strength of association between drug_2 and the outcome by PS adjustment
}
\author{
Emeline Courtois \cr Maintainer: Emeline Courtois
\email{emeline.courtois@inserm.fr}
}
|
2633c2681a9cc0079c75aeebe7abe769e8ede33d
|
63aed5902db665db34aee375a66723d76aae939e
|
/DDPcourseProject/ui.R
|
d0bac3f1f510d8f995f10cb0cc053150051cce12
|
[] |
no_license
|
dudinhadnm/DevelopingDataProductsProject
|
ae21d4c8d9afbb9e638922a3f75e06f467b68f17
|
d54c34451454c07db990763b75d68e685c0a4df8
|
refs/heads/master
| 2023-07-08T23:10:12.911261
| 2021-07-22T18:07:38
| 2021-07-22T18:07:38
| 388,553,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,142
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
tags$head(
# Note the wrapping of the string in HTML()
tags$style(HTML("
body {
background-color: #D3D4D9;
color: black;
}
h2 {
font-family: 'Yusei Magic', sans-serif;
color: #A0097D
}
h3 {
font-family: 'Yusei Magic', sans-serif;
color: #A0097D
}
h4 {
font-family: 'Yusei Magic', sans-serif;
color: #A0097D
}
.shiny-input-container {
color: #00928B;
}"))
),
titlePanel("Developing data products Course project"),
sidebarLayout(
sidebarPanel(
sliderInput("sliderCarat", "How many carats in your diamond?", 0, 3, value = 0.4, step = 0.1),
submitButton("Submit")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("App page",
br(),
plotOutput("plot1"),
br(),
h4("Carat:"),
textOutput("out1"),
br(),
h4("Predicted price from Model:"),
textOutput("pred1")),
tabPanel("Documentation",
br(),
h3("Documentation"),
h4("How to use this shiny app:"),
p("Use the slider on the left side bar to
input a value for the weight of a diamond
(in carat), and click the Submit button to
see how much that diamond would cost according
to a linear regression calculated used from the
'diamonds' dataset"),
h4("Model"),
p("It calculates the price of a diamond with weight
x in carats, based on the diamonds database. The model
used is a simple linear regression:"),
code("model <- lm(price ~ carat, diamonds)"),
p("Given the model and the
value of carats that was input in the slider
on the side bar, the price is predicted as shown:"),
code("predict(model, newdata = data.frame(carat = caratInput))"),
h4("Other material"),
tags$a(href="https://github.com/dudinhadnm/DevelopingDataProductsProject/blob/master/DDPcourseProject/ui.R", "ui.R code on GitHub"),
br(),
tags$a(href="https://github.com/dudinhadnm/DevelopingDataProductsProject/blob/master/DDPcourseProject/server.R", "server.R code on GitHub"))
)
)
)
))
|
17d826232a37c9fa71713ced31693566783cba1c
|
dbd7eca50cbbd581ed88e85fc7a66e283ee05e61
|
/R/figures_report.R
|
05a3e73a37bd3e39b1e4f8de49fb1428f523fa12
|
[] |
no_license
|
timriffe/covid_eus
|
d06c71d1ae9eef69a340225c4d760bc90e6fd7a0
|
9dd2ca513a2b24b0cd94903682478cf141699bfb
|
refs/heads/master
| 2023-06-20T00:25:50.077197
| 2021-07-14T08:51:31
| 2021-07-14T08:51:31
| 368,883,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,926
|
r
|
figures_report.R
|
# packages needed
library(tidyverse)
library(ggridges)
library(lubridate)
library(cowplot)
# someo data prep for the visualizations
covid <- readRDS("Data/data_ccaa.rds") %>%
filter(!is.na(tasa),
# data cutoff declared for reporting
fecha <= dmy("30-06-2021")) %>%
arrange(CCAA, variable, sexo, edad, year_iso, week_iso) %>%
group_by(CCAA, variable, sexo, edad) %>%
mutate(tasa_cumul = cumsum(tasa)) %>%
ungroup()
# the standarization
covid_st <-
covid %>%
group_by(CCAA, CCAA_iso, year_iso, week_iso, fecha, variable) %>%
summarize(tasa_st = sum(tasa * stand_nac),
tasa_st_cumul = sum(tasa_cumul * stand_nac),
.groups = "drop") %>%
mutate(CCAA = factor(CCAA, levels =rev(c("Andalucía", "Aragón", "Asturias", "Canarias", "Cantabria",
"Castilla y León", "Castilla-La Mancha", "Catalunya ", "Extremadura",
"Galicia", "Illes Balears", "C. Madrid", "Murcia", "Navarra",
"País Vasco", "La Rioja", "C. Valenciana"))))
############################################################
# Ridge plots (scale is relative, no need to annualize!) #
############################################################
# plot objects
variables <- c("casos","hosp","uci","def","exceso")
variable_names <- c("Casos","Hospitalizaciones","Ingresos UCI",
"Defunciones COVID-19","Exceso mortalidad")
names(variable_names) <- variables
CCAA_isov <- covid_st %>% pull(CCAA) %>% unique() %>% sort()
CCAA_cols <- rep(gray(.5),length(CCAA_isov))
CCAA_cols[CCAA_isov == "Navarra"] <- "#d4204d"
CCAA_cols[CCAA_isov == "País Vasco"] <- "#20ab3a"
heights <- c(casos = 250, hosp =3000 , uci = 40000, def = 8000, exceso = 8000 )
# heights <- c(casos = 250, hosp =3000 , uci = 50000, def = 10000, exceso = 10000 )
date_breaks <- as_date(
c("2020-01-01","2020-04-01","2020-07-01","2020-10-01","2021-01-01","2021-04-01","2021-06-30"))
for (i in variables){
p <-
covid_st %>%
dplyr::filter(variable == i,
fecha <= dmy("30-06-2021")) %>%
ggplot(aes(x = fecha,
#y = reorder(CCAA_iso, tasa_st_nac, na.rm=TRUE, FUN = var),
y = CCAA,
height = tasa_st,
fill = CCAA)) +
# geom_text(data = label_df,
# mapping = aes(x = lx, y = ly, label = label)) +
geom_ridgeline(color = gray(.2), alpha = .7,min_height = -2,scale = heights[i]) +
# theme_bw() +
theme(legend.pos = "none",
axis.text=element_text(size=10),
axis.title=element_text(size=14),
plot.margin = margin(t=2.5, r=3, b=1, l=2, "lines"),
plot.title.position = "plot",
panel.background = element_blank(),
panel.grid.major = element_line(color = "#BBBBBB50"),
axis.ticks = element_blank()) +
labs(y=element_blank(),
x=element_blank()) +
scale_fill_manual(values = CCAA_cols, labels = CCAA_isov) +
scale_x_date(breaks = date_breaks,
date_labels = "%d/%m/%Y") +
coord_cartesian(clip = "off") +
# annotations
annotate("segment",
x = dmy("14/03/2020"),
xend = dmy("14/03/2020"),
y = .5,
yend = 19.5,
color = "#4b8ead"
) +
annotate("segment",
x = dmy("05/05/2020"),
xend = dmy("05/05/2020"),
y = .5,
yend =20.5,
color = "#4b8ead"
) +
annotate("segment",
x = dmy("25/10/2020"),
xend = dmy("25/10/2020"),
y = .5,
yend =19.5,
color = "#4b8ead"
) +
annotate("segment",
x = dmy("23/12/2020"),
xend = dmy("23/12/2020"),
y = .5,
yend =20.5,
color = "#4b8ead"
) +
annotate("segment",
x = dmy("9/05/2021"),
xend = dmy("9/05/2021"),
y = .5,
yend =20.5,
color = "#4b8ead"
)
pp <-
ggdraw(p) +
draw_label(label="CCAA",x=.13,y=.84,size=16,fontface="bold") +
draw_label(label="1º Estado de Alarma\n(14/03/2020)",x=.25,y=.88,size=10) +
draw_label(label="Desescalada\n(05/05/2020)",x=.37,y=.95,size=10) +
draw_label(label="2º Estado de Alarma\n(25/10/2020)",x=.535,y=.88,size=10)+
draw_label(label="Apertura Navidad\n(23/12/2020)",x=.66,y=.95,size=10)+
# draw_label(label="fin movilidad\n(6/01/2021)",x=.75,y=.80)+
draw_label(label="Fin 2º Estado de Alarma\n(09/05/2021)",x=.84,y=.95,size=10)
ggsave(here::here("FigData",paste0(i,".png")),pp,width = 3000, height = 1700, units = "px")
}
############################################################
# Dot plot of cumulative values #
# (discount to annualize, labels in per 1000, log scale) #
############################################################
cols <- c("Casos" = "#4daf4a",
"Hospitalizaciones" = "#984ea3",
"Ingresos UCI" = "#377eb8",
"Defunciones COVID-19" = "#e41a1c",
"Exceso mortalidad" = "#ff7f00")
DT <-
covid_st %>%
filter(fecha <= ymd("2021-06-14")) %>%
mutate(k = range(fecha) %>% diff() %>% as.numeric() * 1 / 365.25) %>%
filter(fecha == ymd("2021-06-14")) %>%
mutate(tasa_st_cumul = tasa_st_cumul * 1000 / k,
lab_color = case_when(CCAA == "País Vasco" ~ "#20ab3a",
CCAA == "Navarra" ~ "#d4204d",
TRUE ~ "#000000")) %>%
group_by(CCAA) %>%
mutate(deaths = tasa_st_cumul[variable == "def"]) %>%
left_join(tibble(variable_names, variable = names(variable_names)), by = "variable")
pc <-
DT %>%
ggplot(aes(y = reorder(CCAA,deaths),
x = tasa_st_cumul,
color = variable_names)) +
geom_point(size = 4) +
scale_x_log10() +
# labs(title = "Tasas acumuladas estandarizadas (14 junio 2021)") +
ylab("") +
xlab("Tasa acumulada estandarizada per mil (log)") +
theme(axis.text=element_text(size=10),
title = element_text(size=14),
plot.title.position = "plot",
legend.title=element_blank(),
legend.text = element_text(size = 10),
panel.background = element_blank(),
panel.grid.major = element_line(color = "#BBBBBB50"),
axis.ticks = element_blank()) +
scale_color_manual(values = cols) +
geom_hline(data = NULL, yintercept = 11, color = "#20ab3a", size = 5, alpha = .2) +
geom_hline(data = NULL, yintercept = 10, color = "#d4204d", size = 5, alpha = .2) +
geom_point(size = 4)
ggsave(here::here("FigData","cumul.png"),pc,width = 3000, height = 1700, units = "px")
#############################
# Rank plot of cumulative #
#############################
covid_ranks2 <-
covid %>%
group_by(CCAA, variable,sexo,edad, stand_nac) %>%
summarize(tasa = sum(tasa, na.rm = TRUE), .groups = "drop") %>%
group_by(CCAA, variable) %>%
summarize(tasa_st_nac = sum(tasa * stand_nac, na.rm=TRUE),
.groups = "drop") %>%
group_by(variable) %>%
mutate(Rank = rank(tasa_st_nac),
RelRank = tasa_st_nac / max(tasa_st_nac, na.rm=TRUE) * 17) %>%
ungroup() %>%
left_join(tibble(variable_name = variable_names,
variable = names(variable_names)),
by = "variable") %>%
mutate(variable_name = factor(variable_name, levels = c("Casos","Hospitalizaciones","Ingresos UCI","Defunciones COVID-19","Exceso mortalidad")))
ranks <-
covid_ranks2 %>%
ggplot(aes(x = variable_name, y = Rank, color = CCAA, group = CCAA)) +
geom_point(size = 4) +
geom_line(data = dplyr::filter(covid_ranks2, variable_name != "Exceso mortalidad")) +
geom_text(data = dplyr::filter(covid_ranks2,
variable_name == "Exceso mortalidad"),
mapping = aes(x = 5.1, y = Rank, label = CCAA),
size = 4, hjust = 0) +
geom_text(data = dplyr::filter(covid_ranks2,
variable_name == "Casos"),
mapping = aes(x = .9, y = Rank, label = CCAA),
size = 4, hjust = 1) +
#annotate("text",x = rep(.6,17), y = 1:17, label = 17:1) +
#annotate("text", x = .6, y= 17.9, label = "Rel. Rank", size = 6) +
# annotate("text", x = 5.2, y= 19, label = "CCAA", size = 6) +
labs(x = "") +
theme(legend.position = "none",
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(color = "#BBBBBB50"),
axis.ticks = element_blank(),
plot.margin = margin(t=1, r=5, b=1, l=5, "lines"),
axis.text=element_text(size=10))+
coord_cartesian(clip = "off")
ggsave(here::here("FigData","ranks.png"), ranks, width = 3000, height = 1700, units = "px")
################################
# library(tidyverse)
# library(readxl)
# dat <- read_excel("Data/Gráficos.xlsx", sheet = "table")
#
#
#
# p <-
# dat %>%
# ggplot(aes(y = origen,
# x = Valor,
# group = interaction(origen, model),
# color = model)) +
# geom_vline(data = NULL,aes(xintercept = 1), col = gray(.5))+
# geom_point(position = position_dodge2(width = .4, reverse = TRUE), size = 2) +
# scale_x_log10(breaks = c(.8,1.25,2,4)) +
# geom_pointrange(aes(xmin = lower, xmax = upper),
# position = position_dodge2(width = .4, reverse = TRUE)) +
# ylab("") +
# xlab("log prevalence ratio") +
# theme_minimal() +
# theme(axis.text = element_text(size=12),
# axis.title = element_text(size=14))
#
# p
# ggsave("FigData/prevratio.svg",p)
|
de3a3ab8f63d7e24fbb56cc516c2866e0587099d
|
8a45e52ffb27ba07fcd00211b5c7794b718423d0
|
/tests/testthat/test-package.R
|
5453836a5a68177adb7d514645bc917c1ae74807
|
[
"MIT"
] |
permissive
|
patrickgtwalker/dust
|
4aaf6c5918421e721eae4bc8d26c96bc44304e88
|
e27c0d54e9bc61b4af07a34fb01c532ed88e09bd
|
refs/heads/master
| 2023-07-13T22:19:23.432203
| 2021-08-31T14:25:30
| 2021-08-31T14:25:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,944
|
r
|
test-package.R
|
context("package")
test_that("validate package", {
skip_if_not_installed("pkgload")
skip_on_cran()
path <- create_test_package()
## ensure we create these as needed
file.remove(file.path(path, "R"))
file.remove(file.path(path, "src"))
path <- dust_package(path)
expect_false(any(grepl("##'", readLines(file.path(path, "R", "dust.R")))))
expect_true(file.exists(file.path(path, "src/Makevars")))
expect_equal(
readLines(file.path(path, "src/Makevars")),
readLines(dust_file("template/Makevars.pkg")))
pkgbuild::compile_dll(path, quiet = TRUE)
res <- pkgload::load_all(path)
w <- res$env$walk$new(list(sd = 1), 0, 100)
expect_equal(w$state(), matrix(0, 1, 100))
expect_equal(w$run(0), matrix(0, 1, 100))
w$set_state(pi)
expect_equal(w$run(0), matrix(pi, 1, 100))
w$set_index(integer(0))
expect_equal(w$run(0), matrix(0, 0, 100))
rm(w)
gc()
})
test_that("validate package dependencies", {
deps <- data.frame(
type = c("LinkingTo", "LinkingTo"),
package = c("cpp11", "dust"),
version = "*",
stringsAsFactors = FALSE)
expect_silent(package_validate_has_dep(deps, "cpp11", "LinkingTo"))
expect_error(
package_validate_has_dep(deps, "cpp11", "Depends"),
"Expected package 'cpp11' as 'Depends' in DESCRIPTION")
expect_error(
package_validate_has_dep(deps, "other", "Imports"),
"Expected package 'other' as 'Imports' in DESCRIPTION")
})
test_that("validate destination notices existing C++ code", {
msg <- "File '.+\\.cpp' does not look like it was created by dust - stopping"
path <- create_test_package()
path_cpp <- file.path(path, "src", "walk.cpp")
file.create(path_cpp)
expect_error(
package_validate_destination(path, c("sir.cpp", "walk.cpp")),
msg)
writeLines("// some actual content", path_cpp)
expect_error(
package_validate_destination(path, c("sir.cpp", "walk.cpp")),
msg)
writeLines("// Generated by dust", path_cpp)
expect_silent(
package_validate_destination(path, c("sir.cpp", "walk.cpp")))
})
test_that("validate destination notices existing R code", {
msg <- "File '.+\\.R' does not look like it was created by dust - stopping"
path <- create_test_package()
path_r <- file.path(path, "R", "dust.R")
file.create(path_r)
expect_error(
package_validate_destination(path, character()),
msg)
writeLines("## some actual content", path_r)
expect_error(
package_validate_destination(path, character()),
msg)
writeLines("## Generated by dust", path_r)
expect_silent(
package_validate_destination(path, character()))
})
test_that("Fail to run if no dust files found", {
path <- create_test_package()
unlink(dir(file.path(path, "inst", "dust"), full.names = TRUE))
expect_error(
dust_package(path),
"No dust files found in '.+/inst/dust'")
})
test_that("Fail to run if NAMESPACE missing", {
path <- create_test_package()
unlink(file.path(path, "NAMESPACE"))
expect_error(
package_validate(path),
"Expected a file 'NAMESPACE' at path '.+'")
})
test_that("Fail to run if DESCRIPTION missing", {
path <- create_test_package()
unlink(file.path(path, "DESCRIPTION"))
expect_error(
package_validate(path),
"Expected a file 'DESCRIPTION' at path '.+'")
})
test_that("Validate NAMESPACE has correct useDynLib call", {
path <- create_test_package()
path_ns <- file.path(path, "NAMESPACE")
expect_null(package_validate_namespace(path_ns, "pkg"))
expect_error(
package_validate_namespace(path_ns, "other"),
"Found a useDynLib call but not for 'other'")
txt <- readLines(path_ns)
writeLines(gsub('"', "", txt), path_ns)
expect_null(package_validate_namespace(path_ns, "pkg"))
expect_error(
package_validate_namespace(path_ns, "other"),
"Found a useDynLib call but not for 'other'")
file.create(path_ns)
expect_error(
package_validate_namespace(path_ns, "other"),
"Did not find a useDynLib call in NAMESPACE")
})
test_that("Validate NAMESPACE from dust_package", {
path <- create_test_package()
file.create(file.path(path, "NAMESPACE"))
expect_error(
dust_package(path),
"Did not find a useDynLib call in NAMESPACE")
})
test_that("Validate openmp support", {
text <- read_lines(dust_file("template/Makevars.pkg"))
expect_silent(package_validate_makevars_openmp(text))
msg <- "Package has a 'src/Makevars' but no openmp flags support"
expect_error(
package_validate_makevars_openmp(""),
msg)
expect_error(
package_validate_makevars_openmp("PKG_CXXFLAGS=$(SHLIB_OPENMP_CXXFLAGS)"),
msg)
expect_error(
package_validate_makevars_openmp("PKG_LIBS=$(SHLIB_OPENMP_CXXFLAGS)"),
msg)
})
test_that("Validate openmp support in real package", {
path <- create_test_package()
file.create(file.path(path, "src/Makevars"))
expect_error(
dust_package(path),
"Package has a 'src/Makevars' but no openmp flags support")
})
|
91b9653ec6d9bc0c4330f9b553343d02eedd7d29
|
da7ffd8f9260945bcaae8ca247a1bd3e94bf66ca
|
/R/create_last_level.R
|
93f52dd8fb66503f43277c661307571cdeb8909c
|
[] |
no_license
|
cran/cat.dt
|
9b1ab82a050191b25f6dffe551639227796141ac
|
df3fd18a53163c30be4d50a7d56dcc73d3cddfbc
|
refs/heads/master
| 2021-07-10T22:00:13.440144
| 2021-03-31T11:20:06
| 2021-03-31T11:20:06
| 236,568,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,021
|
r
|
create_last_level.R
|
#' CAT decision tree last level generator
#'
#' Generates a list of node lists for the last level of the CAT decision tree
#'
#' @param nodes_prev list of node lists of the nodes from the previous level
#' @param nres vector of number of possible responses for every item
#' @param level last-level number (equals the length of the test plus one)
#' @param prob_array 3-D array of probability responses. Dim 1 represent items,
#' dim 2 represent evaluated ability levels and dim 3 represent possible
#' responses
#' @param SE minimum standard error of the ability level
#' @return A list of lists. Each of these lists represent a node of the
#' last level of the decision tree
#' @author Javier Rodríguez-Cuadrado
#'
#' @export
create_last_level = function(nodes_prev, nres, level, prob_array, SE) {
#Initialise level nodes
nodes = list()
indx = 0 #Auxiliary variable
#Add known information to the level nodes
for (i in 1:length(nodes_prev)) {
if (nodes_prev[[i]]$SE > SE) {
for (j in 1:nres[nodes_prev[[i]]$item]) {
indx = indx+1 #Update auxiliary variable
it = nodes_prev[[i]]$item #Item of the father node
#A posteriori density function values calculus
apos = a_posteriori(nodes_prev[[i]]$dens_vec, prob_array[it, , j])
#Add information
est = estimate(apos) #Calculate the estimation and the SE
nodes[[indx]] = create_node(level*10000+indx, apos, NA,
c(nodes_prev[[i]]$item,
nodes_prev[[i]]$item_prev),
est[[1]], est[[2]], NA, NA, NA)
#Add the ID of the father node and the response to that node that leaded
#to the current node
nodes[[indx]][[10]] = nodes_prev[[i]]$ID
nodes[[indx]][[11]] = j
nodes[[indx]][[12]] = 1
}
}
}
return(nodes) #Return the list of node lists
}
|
736696beccc217eb350e050a01b5997c0ae810da
|
ab651c03d51c95fb1847986d9b341cd658b7fc1e
|
/src/dataVisualization.R
|
4624254f2e504ab51ec234db45418907e7d06fc3
|
[] |
no_license
|
Peterliao96/soccer-rating-prediction
|
8ab68982e39755dcb3957a1bd9517e70b9de2937
|
3cf84d4c1265cf29216d5c1c210839b6835e5399
|
refs/heads/master
| 2020-06-15T12:42:24.055594
| 2019-08-10T23:54:11
| 2019-08-10T23:54:11
| 195,302,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 955
|
r
|
dataVisualization.R
|
# ---
# title: "Soccer Player Rating Prediction"
# author:
# Junqi Liao 20650701
# Raymond Tan
#
#
# date: "31 July 2019"
# output: data plot
# ---
#
# Visualize the data plot and see if each player attribute has correlation to each other
library(RSQLite)
library(dplyr)
# extract the zip database file in the data folder and set your own db path
con <- dbConnect(SQLite(), dbname="/Users/peterliao/Desktop/stat/stat444/project1/data/database.sqlite")
dbListTables(con)
rating_potential<- tbl_df(dbGetQuery(con,"SELECT * FROM rating_potential"))
# correlation plot
library(corrplot)
corrplot(cor(rating_potential[,-c(1,32)]))
# since short_passing, long_passing, reactions, vision have a higher correlation,
# we pair them with the response variable overall_rating and see the scatterplot
# matrix
pairs(overall_rating~short_passing + long_passing+reactions+
vision,data=rating_potential,
main="Simple Scatterplot Matrix")
|
a888c24bda4a4ab3c2b32895792456c4e63107ba
|
6eeb13bde50030420c87ca1197edb6609aab7b03
|
/R/3rd class/class3_3.R
|
38ea53b5a07cd61abaafc0b1dd6cf79d968c6bfb
|
[] |
no_license
|
genie3933/course-materials
|
81a98e206c125eb5b9e9176572c7e3585b803307
|
bd99c17d1278c91ace2891bdcd6817bbcaafc970
|
refs/heads/main
| 2023-07-10T11:08:59.163542
| 2021-08-23T05:01:53
| 2021-08-23T05:01:53
| null | 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 5,882
|
r
|
class3_3.R
|
# 정규분포의 R 함수
# 확률밀도함수 (mean=기댓값, sd=표준편차)
# mean, sd를 생략하면 표준정규분포로 계산함 (mean = 0, sd = 1)
dnorm(x, mean = 0, sd = 1)
# Excel 함수 = NORM.DIST(x, mean, sd, FALSE)
# 누적분포함수 (q=분위수)
pnorm(q, mean = 0, sd = 1, lower.tail = TRUE)
# Excel 함수 = NORM.DIST(x, mean, sd, TRUE)
# 분위수 (p=누적확률)
qnorm(p, mean = 0, sd = 1, lower.tail = TRUE)
# Excel 함수 = NORM.INV(p, mean, sd)
# 정규 확률변수 (n=난수의 개수)
rnorm(n, mean = 0, sd = 1)
# Excel 함수 = NORM.INV(RAND( ), mean, sd) ⇒ 난수 1개 생성
# 정규분포 모수 지정 및 함수 cont.spdf() 실행
mu = c(0,0,2,2); sig=c(1,2,1,2)
cont.spdf("norm", -7, 7, mu, sig, xp=mu)
# 표준정규분포 사용
pnorm(1); pnorm(-1.5)
pnorm(1)-pnorm(-1.5)
# 정규분포 모수, 확률계산 영역 설정
mu = 2; sig = 2; a = -1; b = 4
# 함수 norm.trans() 실행
norm.trans(mu, sig, a, b)
# 표준정규분포 (0 ~ 2.99), 0.01 단위, 10개 열
pv <- matrix(pnorm(0:299/100), ncol=10, byrow=T)
colnames(pv) <- 0:9/100
rownames(pv) <- 0:29/10
print(round(pv, 4))
# 표준정규분포 누적확률 그래프에서 표시할 점 지정
zp = seq(-2, 2, by=0.5)
# 누적확률 그래프 작성 함수 snorm.cdf() 실행
snorm.cdf(zp)
# 누적확률 표시 점 지정 및 함수 snorm.prob() 실행
zp = 1:4; snorm.prob(zp)
# 분위수 계산/표시 누적확률 지정
pv = c(0.005, 0.01, 0.025, 0.05, 1:9/10, 0.95, 0.975, 0.99, 0.995)
# 표준정규 분위수 작성 함수 snorm.quant() 실행
snorm.quant(pv, pv)
# pnorm(x, mean, sd) 사용
pnorm(185, 175, 8) - pnorm(180, 175, 8)
# qnorm(p, mean, sd) 사용
qnorm(0.05, 10, 1.5)
# 표준화 사용
10+qnorm(0.05)*1.5
# pnorm(p, mean, sd) 사용
pnorm(0, 21.5-20, sqrt(0.4^2 +0.3^2))
# pnorm(x, mean, sd) 사용
pnorm(60, 50, 4)-pnorm(40, 50, 4)
# 정확한 계산 a pbinom(x, n, p) 사용
pbinom(4, 25, 0.2)
# 정규근사 a pnorm(x, mean, sd) 사용
pnorm(4, 5, 2)
# 연속성 보정
pnorm(4.5, 5, 2)
# 정확한 계산
pbinom(45, 100, 0.5)-pbinom(39, 100, 0.5)
# 정규근사 (연속성 보정)
pnorm(-0.9)-pnorm(-2.1); pnorm(45.5, 50, 5) - pnorm(39.5, 50, 5)
# 카이제곱분포의 R 함수
# 카이제곱분포의 확률밀도함수 (df=자유도, ncp=비중심모수(미사용))
dchisq(x, df, ncp = 0)
# Excel 함수 = CHISQ.DIST(x, df, FALSE)
# 카이제곱분포의 누적분포함수 F(x)
pchisq(x, df, ncp = 0, lower.tail = TRUE)
# Excel 함수 = CHISQ.DIST(x, df, TRUE)
# 카이제곱분포의 분위수 (p=누적확률)
qchisq(p, df, ncp = 0, lower.tail = TRUE)
# Excel 함수 = CHISQ.INV(p, df)
# 카이제곱분포의 확률변수 (n=난수의 개수)
rchisq(n, df, ncp = 0)
# Excel 함수 = CHISQ.INV(RAND( ), df) ⇒ 한 개의 난수 생성
# 누적확률 표시 점 지정 및 함수 chi.prob() 실행
k= 1:10; nu= 5; chi.prob(nu, k)
nu = 5
# 표에서 나타낼 값
pv = c(0.005, 0.01, 0.025, 0.05, 1:9/10, 0.95, 0.975, 0.99, 0.995)
# 그래프에서 나타낼 값, chi.quant() 실행
pv2 = c(0.005, 0.01, 0.025, 0.05, 0.5, 0.95, 0.975, 0.99, 0.995)
chi.quant(nu, pv, pv2)
# 자유도 입력 및 x-축 값 생성
nu = c(5, 10, 15, 20); up = qchisq(0.99, max(nu))
# 함수 cont.spdf() 실행
cont.spdf("chi", 0, up, para=nu, xp=nu)
# 자유도, 누적확률 입력
nu <- c(5, 10, 30, 100)
p <- 0.95
# 분위수 (상수 a)
qchisq(p, nu)
# 기댓값 대비 95% 분위수 비율
qchisq(p, nu) / nu
# t-분포의 확률밀도함수 f(x) (df=자유도, ncp=비중심모수(미사용))
dt(x, df, ncp)
# Excel 함수 = T.DIST(x, df, FALSE)
# t-분포의 누적분포함수 F(x)
pt(x, df, ncp, lower.tail = TRUE)
# Excel 함수 = T.DIST(x, df, TRUE)
# t-분포의 분위수 (p=누적확률)
qt(p, df, ncp, lower.tail = TRUE)
# Excel 함수 = T.INV(p, df)
# t-분포의 확률변수 (n=난수의 개수)
rt(n, df, ncp)
# Excel 함수 = T.INV(RAND( ), df) ⇒ 한 개의 난수 생성
# 자유도 지정
nu = c(1, 5, 10, 30)
# 확률밀도 비교 함수 tnorm.comp() 실행
tnorm.comp(nu)
# 표시 확률 지정
p = c(5:9/10, 0.95, 0.975, 0.99, 0.995, 0.999, 0.9995); nc = length(p)
# 자유도 지정
df = c(1:40, 50, 100, Inf); nr = length(df)
# 분위수 생성
qv = array(0, dim=c(nr, nc))
colnames(qv) = p; rownames(qv) = df
for (i in 1:nc) qv[,i] = qt(p[i], df)
print(round(qv, 3))
# F-분포의 R 함수
# F-분포의 확률밀도함수 (df1, df2 =자유도, ncp=비중심모수(사용하지 않음))
df(x, df1, df2, ncp)
# Excel 함수 = F.DIST(x, df1, df2, FALSE)
# F-분포의 누적분포함수 F(x)
pf(x, df1, df2, ncp, lower.tail = TRUE)
# Excel 함수 = F.DIST(x, df1, df2, TRUE)
# F-분포의 분위수 (p=누적확률)
qf(p, df1, df2, ncp, lower.tail = TRUE)
# Excel 함수 = F.INV(p, df1, df2)
# F-분포의 확률변수 (n=난수의 개수)
rf(n, df1, df2, ncp)
# Excel 함수 = F.INV(RAND( ), df1, df2)
# (1) F(8,5)-분포에 대해 함수 fdist.sim() 실행
fdist.sim(nu1=8, nu2=5)
# 누적확률 표시 점 및 자유도 지정
k= 1:7; nu1 = 8; nu2= 5
# 함수 f.prob() 실행
f.prob(nu1, nu2, k)
# 자유도 및 F-분포 계산/표시 누적확률 지정
nu1 = 8; nu2 = 5
pv = c(0.005, 0.01, 0.025, 0.05, 1:9/10, 0.95, 0.975, 0.99, 0.995)
pv2 = c(0.05, 0.5, 0.95, 0.975, 0.99, 0.995)
# 함수 f.quant() 실행
f.quant(nu1, nu2, pv, pv2)
# 자유도 지정 및 x-축 값 생성
nu1 = c(3, 3, 20, 20); nu2 = c(3, 20, 3, 20)
up = max(qf(0.95, nu1, nu2))
# 함수 cont.spdf() 실행
cont.spdf("f", 0, up, nu1, nu2, xp=nu2/(nu2-2))
|
4f74377c22e5975c1c59d0de009747a2894a3f32
|
8d88c70a55e053e31c5fb7c6f8e095a954b8737d
|
/Logistic_regression(Affairs_data).R
|
2010d6aafde326d2ce5d242f2fc5ccad6744ad5c
|
[] |
no_license
|
amitdivekar30/Logistic_Regresssion_by_R
|
1a9baed53715a5aab0d5a1fcd90edf6f9ac3375c
|
bbc142e0599a3e2fa7660ebac5af369b22b30fff
|
refs/heads/master
| 2021-04-11T11:15:07.897053
| 2020-03-21T16:29:54
| 2020-03-21T16:29:54
| 249,014,936
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,807
|
r
|
Logistic_regression(Affairs_data).R
|
#Assignment on Logistic Regression
#classify that a person had an affair or not
Affairs<-read.csv("affairs.csv")
summary(Affairs)
View(Affairs)
#attach(Affairs)
# display first 10 rows of data
head(Affairs, n=10)
# display the dimensions of the dataset
dim(Affairs)
str(Affairs)
# list types for each attribute
sapply(Affairs, class)
# standard deviations and mean for each class
sapply(Affairs[,c(3,4,6:9)], mean)
sapply(Affairs[,c(3,4,6:9)], sd)
xn<-colnames(Affairs[,c(3,4,6:9)])
x<-c(c(3,4,6:9))
z<-sapply(Affairs[,c(3,4,6:9)], mean)
barplot(z, main = "Average Value of Feature",
xlab = "Feature Name",
ylab = "Average Value")
barplot(sapply(Affairs[,c(3,4,6:9)], sd), main = "Standard Devaition of Feature",
xlab = "Feature Name",
ylab = "Standard Devaition")
#table of output for Affairs
table(Affairs$affairs)
# table or proportation of enteries in the datasets.
round(prop.table(table(Affairs$affairs))*100,1)
# distribution of class variable
z <- as.factor(Affairs$affairs)
cb <- cbind(freq=table(z), percentage=prop.table(table(z))*100)
barplot(table(z), main = "Frequency Distribution of All Classes",
xlab = "Class Name",
ylab = "Number of Data Points", legend = TRUE)
# calculate a correlation matrix for numeric variables
library(corrplot)
correlations <- cor(Affairs[,c(3,4,6:9)])
# display the correlation matrix
print(correlations)
corrplot(correlations, method = "circle")
#Encoding Categorical Data
Affairs$affairs<-ifelse(Affairs$affairs>0, 1, 0)
Affairs$affairs<-factor(Affairs$affairs,
levels = c(0,1),
labels = c(0, 1))
Affairs$children<-factor(Affairs$children,
levels = c('no','yes'),
labels = c(0, 1))
Affairs$gender<-factor(Affairs$gender,
levels = c('female','male'),
labels = c(0, 1))
summary(Affairs)
attach(Affairs)
# Feature Scaling
Affairs[, c(3,4,7,8,9)] = scale(Affairs[, c(3,4,7,8,9)])
#fitting model1 on Affairs data
model1<-glm(affairs~gender+age+yearsmarried+children+religiousness+education+occupation+rating,
data = Affairs,family = "binomial")
summary(model1)
library("MASS")
stepAIC(model1)
# fitting modelafter removing feature occuptation
model2<-glm(affairs~gender+age+yearsmarried+children+religiousness+education+rating,
data = Affairs,family = "binomial")
summary(model2)
# fitting modelafter removing feature occuptation and education
model3<-glm(factor(affairs)~factor(gender)+age+yearsmarried+factor(children)+religiousness+rating,
data = Affairs,family = "binomial")
summary(model3)
stepAIC(model3)
# fitting modelafter removing feature occuptation, children and education
model4<-glm(factor(affairs)~factor(gender)+age+yearsmarried+religiousness+rating,
data = Affairs,family = "binomial")
summary(model4)
stepAIC(model4)
exp(coef(model4))
table(Affairs$affairs)
# Confusion matrix table
prob <- predict(model4,type=c("response"),data = Affairs[-1])
prob
y_pred<-ifelse(prob>0.5,1,0)
confusion<-table(prob>0.5,Affairs$affairs)
confusion
# Model Performance
Accuracy<-sum(diag(confusion)/sum(confusion))
Accuracy
Error <- 1-Accuracy
Error
Recal<-confusion[4]/(confusion[3]+confusion[4]) #TPR
Recal
Precision<-confusion[4]/(confusion[2]+confusion[4])
Precision
F1_score<-2*Precision*Recal/(Precision+Recal)
F1_score
Specificity<-confusion[1]/(confusion[1]+confusion[2])#TNR
Specificity
Precision
# ROC Curve
library(ROCR)
rocrpred<-prediction(prob,Affairs$affairs)
rocrperf<-performance(rocrpred,'tpr','fpr')
plot(rocrperf,colorize=T,text.adj=c(-0.2,1.7))
|
7d7ee9639fdc2833594c8799fa0555f527c64988
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/bpcs/R/data.R
|
d98f6a2f61e218d19090fc66dd47a5847ebacf15
|
[
"MIT"
] |
permissive
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,742
|
r
|
data.R
|
#' This is the expansion of the tennis data from Agresti (2003) p.449
#' This data refers to matches for several women tennis players during 1989 and 1990
#' @name tennis_agresti
#' @docType data
#' @format This is the expansion of the data where each row contains 1 match only
#' * player0: name of player0
#' * player1: name of player1
#' * y: corresponds to the result of the match: 0 if player0 won, 1 if player1 won.
#' * id: is a column to make each row unique in the data. It does not have any particular interpretation
#' @source Agresti, Alan. Categorical data analysis. Vol. 482. John Wiley & Sons, 2003.
#' @keywords data
"tennis_agresti"
#' This is a dataset with the results matches fromo the first league of the Brazilian soccer championship from 2017-2019.
#' It was reduced and translatedfrom the adaduque/Brasileirao_Dataset repository
#' @name brasil_soccer_league
#' @docType data
#' @format Data frame that contains 1140 matches and 9 Columns from the Brazilian soccer championship
#' * Time: time of the day in 24h format
#' * DayWeek: day of the week
#' * Date: date YY-MM-DD
#' * HomeTeam: name of the team playing home
#' * VisitorTeam: name of the team playing visitor
#' * Round: Round number of the championship
#' * Stadium: Name of the stadium where the game was played
#' * ScoreHomeTeam: number of goals for the home team
#' * ScoreVisitorTeam: number of goals for the visitor
#' @source \url{https://github.com/adaoduque/Brasileirao_Dataset}
#' @keywords data
"brasil_soccer_league"
#' Dataset containing an example of the performance of different optimization algorithms against different benchmark functions.
#' This is a reduced version of the dataset presented at the paper: "Statistical Models for the Analysis of Optimization Algorithms with Benchmark Functions.".
#' For details on how the data was collected we refer to the paper.
#' @name optimization_algorithms
#' @docType data
#' @format This is the expansion of the data where each row contains 1 match only
#' * Algorithm: name of algorithm
#' * Benchmark: name of the benchmark problem
#' * TrueRewardDifference: Difference between the minimum function value obtained by the algorithm and the known global minimum
#' * Ndimensions: Number of dimensions of the benchmark problem
#' * MaxFevalPerDimensions: Maximum allowed budget for the algorithm per dimensions of the benchmark problem
#' * simNumber: id of the simulation. Indicates the repeated measures of each algorithm in each benchmark
#' @source Mattos, David Issa, Jan Bosch, and Helena Holmstrom Olsson. Statistical Models for the Analysis of Optimization Algorithms with Benchmark Functions. arXiv preprint arXiv:2010.03783 (2020).
#' @keywords data
"optimization_algorithms"
|
f79a2283fd96527374cf2f51d361a4a58af3bfcf
|
97a1e76db46f9876e4703fe17f1e051ce92f5a61
|
/man/IGHV_Epitopes.Rd
|
4f83cc4bf0706c5bcd669a83fe5c3a8fa71ee2a6
|
[
"MIT"
] |
permissive
|
snaketron/IgGeneUsage
|
032a6bb379704588278c693fcc4bff3183c2c897
|
61b59ca85d612b56372aef44f2679d25c93fded6
|
refs/heads/master
| 2023-08-17T00:24:15.048356
| 2023-08-09T21:35:49
| 2023-08-09T21:35:49
| 193,404,464
| 15
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,070
|
rd
|
IGHV_Epitopes.Rd
|
\name{CDR3_Epitopes}
\alias{CDR3_Epitopes}
\docType{data}
\title{Net charge usage in CDR3 sequences of T-cell receptor repertoires
disturbed by Influenza-A and CMV}
\description{
Data of CDR3 sequence from human T-cells receptors (TRB-chain) downloaded from
VDJdb. CDR3 sequences annotated to epitopes in Influenza-A and CMV were selected
from different publications, as long as the publication contains at least 100
CDR3 sequences. Each publication is considered as a repertoire (sample).
To compute the net CDR3 sequence charge, we consider the amino acids K, R and
H as +1 charged, while D and E as -1 charged. Thus, we computed the net charge
of a CDR3 sequence by adding up the individual residue charges.
}
\usage{data("CDR3_Epitopes")}
\format{
A data frame with 4 columns: "sample_id", "condition", "gene_name" and
"gene_usage_count". The format of the data is suitible to be used as input in
IgGeneUsage
gene_name = net charge group
}
\source{
https://vdjdb.cdr3.net/
}
\examples{
data(CDR3_Epitopes)
head(CDR3_Epitopes)
}
\keyword{CDR3_Epitopes}
|
39a9d8913da49952bca2bd003fbe86cbe39f50e1
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610035961-test.R
|
2fe6b36428ec9cea124b7deedb5069e177e84a95
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
r
|
1610035961-test.R
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(3.79212874880738e+146, 0, 4.90512589555316e-312, 7.73866088151611e-317, 7.94285525380525e-275, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 5L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result)
|
c3db84ad56215c848ed17af4473c1a3b30ba770a
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/dmt/R/set.M.R
|
e10d1bfee9036c1392ba0864cf333605bbb56651
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,486
|
r
|
set.M.R
|
# (C) 2008-2011 Leo Lahti and Olli-Pekka Huovilainen
# All rights reserved.
# FreeBSD License (keep this notice)
# "Do not worry about your difficulties in Mathematics. I can assure you
# mine are still greater."
# - Albert Einstein
set.M.isotropic <- function (wtw, sigma.sq, dx) {
# (C) 2008-2011 Leo Lahti and Olli-Pekka Huovilainen
# All rights reserved.
# FreeBSD License (keep this notice)
solve(wtw + diag(sigma.sq, dx, dx))
}
set.M <- function (W, phi) {
# (C) 2008-2011 Leo Lahti and Olli-Pekka Huovilainen
# All rights reserved.
# FreeBSD License (keep this notice)
solve(t(W)%*%W/phi + diag(ncol(W)))
}
set.M.full <- function (W, phi.inv) {#
# (C) 2008-2011 Leo Lahti and Olli-Pekka Huovilainen
# # All rights reserved.
# FreeBSD License (keep this notice) #
# for full marginal covariance
solve(t(W)%*%phi.inv%*%W + diag(ncol(W)))
}
set.M.full2 <- function (W, phi.inv) {
# (C) 2008-2011 Leo Lahti and Olli-Pekka Huovilainen
# All rights reserved.
# FreeBSD License (keep this notice)
# modified from G in Bishop's book
# when phi$total is block-diagonal, we can use sums of the two blocks
# This corresponds to
# M <- set.M.full(W$total, phi.inv$total) # for non-matched case
# but should be faster in general
# for full marginal covariance
solve(t(W$X)%*%phi.inv$X%*%W$X + t(W$Y)%*%phi.inv$Y%*%W$Y + diag(ncol(W$X)))
}
|
c708a3c4c0f7bda883ae7fcad1ad5bc7be38f681
|
8258ac64f4b1a6afe35c8b86e526148ba15ce4ce
|
/Master Thesis/R - Caprino/Caprino - Others/7. C - Rendimenti - AE.R
|
f82ae8df00f7ce04277530f6bddafd4af4d9b0e6
|
[] |
no_license
|
maricorsi17/University-Projects
|
212bba7462068ad0da5140000acd8a24c965cc57
|
f5e9e044ff17dfc47f2002759e19d8c72108f145
|
refs/heads/master
| 2020-04-23T17:39:21.037187
| 2019-03-02T19:16:18
| 2019-03-02T19:16:18
| 171,339,185
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 20,035
|
r
|
7. C - Rendimenti - AE.R
|
# Installare pacchetti
#install.packages("xts")
library("xts")
#install.packages("TTR")
library("TTR")
library("plotrix")
library("hydroTSM")
library("zoo")
library("RColorBrewer")
# caricare file excel
Caprino<-read.csv2(file="c:/Users/Marianna/Documents/Universita/Tesi/R - Caprino/Caprino.CSV", header=TRUE, sep=";")
# Creare timeseries
## Creare un oggetto con le date (tutti gli anni)
datestotal<-seq(as.Date("2015-01-01"), length=1277, by="days")
#BOD totale
tsCaricoBODINtotal<-(na.omit(xts(x=Caprino[,15], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoBODINtotal<-mean(tsCaricoBODINtotal)
tsCaricoBODOUTtotal<-(na.omit(xts(x=Caprino[,16], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoBODOUTtotal<-mean(tsCaricoBODOUTtotal)
rendBODtotal<-(mediaCaricoBODINtotal - mediaCaricoBODOUTtotal)/mediaCaricoBODINtotal*100
aeBODtotal<-mediaCaricoBODINtotal/60*1000
#BOD 2015
tsCaricoBODIN2015<-tsCaricoBODINtotal["2015"]
mediaCaricoBODIN2015<-mean(tsCaricoBODIN2015)
tsCaricoBODOUT2015<-tsCaricoBODOUTtotal["2015"]
mediaCaricoBODOUT2015<-mean(tsCaricoBODOUT2015)
rendBOD2015<-(mediaCaricoBODIN2015 - mediaCaricoBODOUT2015)/mediaCaricoBODIN2015*100
aeBOD2015<-mediaCaricoBODIN2015/60*1000
percentileBOD2015<-quantile(tsCaricoBODIN2015/60*1000,c(0.90))
#BOD 2016
tsCaricoBODIN2016<-tsCaricoBODINtotal["2016"]
mediaCaricoBODIN2016<-mean(tsCaricoBODIN2016)
tsCaricoBODOUT2016<-tsCaricoBODOUTtotal["2016"]
mediaCaricoBODOUT2016<-mean(tsCaricoBODOUT2016)
rendBOD2016<-(mediaCaricoBODIN2016 - mediaCaricoBODOUT2016)/mediaCaricoBODIN2016*100
aeBOD2016<-mediaCaricoBODIN2016/60*1000
percentileBOD2016<-quantile(tsCaricoBODIN2016/60*1000,c(0.90))
#BOD 2017
tsCaricoBODIN2017<-tsCaricoBODINtotal["2017"]
mediaCaricoBODIN2017<-mean(tsCaricoBODIN2017)
tsCaricoBODOUT2017<-tsCaricoBODOUTtotal["2017"]
mediaCaricoBODOUT2017<-mean(tsCaricoBODOUT2017)
rendBOD2017<-(mediaCaricoBODIN2017 - mediaCaricoBODOUT2017)/mediaCaricoBODIN2017*100
aeBOD2017<-mediaCaricoBODIN2017/60*1000
percentileBOD2017<-quantile(tsCaricoBODIN2017/60*1000,c(0.90))
#BOD 2018
tsCaricoBODIN2018<-tsCaricoBODINtotal["2018"]
mediaCaricoBODIN2018<-mean(tsCaricoBODIN2018)
tsCaricoBODOUT2018<-tsCaricoBODOUTtotal["2018"]
mediaCaricoBODOUT2018<-mean(tsCaricoBODOUT2018)
rendBOD2018<-(mediaCaricoBODIN2018 - mediaCaricoBODOUT2018)/mediaCaricoBODIN2018*100
aeBOD2018<-mediaCaricoBODIN2018/60*1000
percentileBOD2018<-quantile(tsCaricoBODIN2018/60*1000,c(0.90))
rendimentiBOD<-c(rendBOD2015,rendBOD2016,rendBOD2017,rendBOD2018)
aeBOD<-c(aeBOD2015,aeBOD2016,aeBOD2017,aeBOD2018)
#COD totale
tsCaricoCODINtotal<-(na.omit(xts(x=Caprino[,17], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoCODINtotal<-mean(tsCaricoCODINtotal)
tsCaricoCODOUTtotal<-(na.omit(xts(x=Caprino[,18], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoCODOUTtotal<-mean(tsCaricoCODOUTtotal)
rendCODtotal<-(mediaCaricoCODINtotal - mediaCaricoCODOUTtotal)/mediaCaricoCODINtotal*100
aeCODtotal<-mediaCaricoCODINtotal/120*1000
#COD 2015
tsCaricoCODIN2015<-tsCaricoCODINtotal["2015"]
mediaCaricoCODIN2015<-mean(tsCaricoCODIN2015)
tsCaricoCODOUT2015<-tsCaricoCODOUTtotal["2015"]
mediaCaricoCODOUT2015<-mean(tsCaricoCODOUT2015)
rendCOD2015<-(mediaCaricoCODIN2015 - mediaCaricoCODOUT2015)/mediaCaricoCODIN2015*100
aeCOD2015<-mediaCaricoCODIN2015/120*1000
percentileCOD2015<-quantile(tsCaricoCODIN2015/120*1000,c(0.92))
#COD 2016
tsCaricoCODIN2016<-tsCaricoCODINtotal["2016"]
mediaCaricoCODIN2016<-mean(tsCaricoCODIN2016)
tsCaricoCODOUT2016<-tsCaricoCODOUTtotal["2016"]
mediaCaricoCODOUT2016<-mean(tsCaricoCODOUT2016)
rendCOD2016<-(mediaCaricoCODIN2016 - mediaCaricoCODOUT2016)/mediaCaricoCODIN2016*100
aeCOD2016<-mediaCaricoCODIN2016/120*1000
percentileCOD2016<-quantile(tsCaricoCODIN2016/120*1000,c(0.92))
#COD 2017
tsCaricoCODIN2017<-tsCaricoCODINtotal["2017"]
mediaCaricoCODIN2017<-mean(tsCaricoCODIN2017)
tsCaricoCODOUT2017<-tsCaricoCODOUTtotal["2017"]
mediaCaricoCODOUT2017<-mean(tsCaricoCODOUT2017)
rendCOD2017<-(mediaCaricoCODIN2017 - mediaCaricoCODOUT2017)/mediaCaricoCODIN2017*100
aeCOD2017<-mediaCaricoCODIN2017/120*1000
percentileCOD2017<-quantile(tsCaricoCODIN2017/120*1000,c(0.92))
#COD 2018
tsCaricoCODIN2018<-tsCaricoCODINtotal["2018"]
mediaCaricoCODIN2018<-mean(tsCaricoCODIN2018)
tsCaricoCODOUT2018<-tsCaricoCODOUTtotal["2018"]
mediaCaricoCODOUT2018<-mean(tsCaricoCODOUT2018)
rendCOD2018<-(mediaCaricoCODIN2018 - mediaCaricoCODOUT2018)/mediaCaricoCODIN2018*100
aeCOD2018<-mediaCaricoCODIN2018/120*1000
percentileCOD2018<-quantile(tsCaricoCODIN2018/120*1000,c(0.92))
rendimentiCOD<-c(rendCOD2015,rendCOD2016,rendCOD2017,rendCOD2018)
aeCOD<-c(aeCOD2015,aeCOD2016,aeCOD2017,aeCOD2018)
#Ntot totale
tsCaricoNtotINtotal<-(na.omit(xts(x=Caprino[,29], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoNtotINtotal<-mean(tsCaricoNtotINtotal)
tsCaricoNtotOUTtotal<-(na.omit(xts(x=Caprino[,30], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoNtotOUTtotal<-mean(tsCaricoNtotOUTtotal)
rendNtottotal<-(mediaCaricoNtotINtotal - mediaCaricoNtotOUTtotal)/mediaCaricoNtotINtotal*100
aeNtottotal<-mediaCaricoNtotINtotal/12*1000
#Ntot 2015
tsCaricoNtotIN2015<-tsCaricoNtotINtotal["2015"]
mediaCaricoNtotIN2015<-mean(tsCaricoNtotIN2015)
tsCaricoNtotOUT2015<-tsCaricoNtotOUTtotal["2015"]
mediaCaricoNtotOUT2015<-mean(tsCaricoNtotOUT2015)
rendNtot2015<-(mediaCaricoNtotIN2015 - mediaCaricoNtotOUT2015)/mediaCaricoNtotIN2015*100
aeNtot2015<-mediaCaricoNtotIN2015/12*1000
percentileN2015<-quantile(tsCaricoNtotIN2015/12*1000,c(0.92))
#Ntot 2016
tsCaricoNtotIN2016<-tsCaricoNtotINtotal["2016"]
mediaCaricoNtotIN2016<-mean(tsCaricoNtotIN2016)
tsCaricoNtotOUT2016<-tsCaricoNtotOUTtotal["2016"]
mediaCaricoNtotOUT2016<-mean(tsCaricoNtotOUT2016)
rendNtot2016<-(mediaCaricoNtotIN2016 - mediaCaricoNtotOUT2016)/mediaCaricoNtotIN2016*100
aeNtot2016<-mediaCaricoNtotIN2016/12*1000
percentileN2016<-quantile(tsCaricoNtotIN2016/12*1000,c(0.92))
#Ntot 2017
tsCaricoNtotIN2017<-tsCaricoNtotINtotal["2017"]
mediaCaricoNtotIN2017<-mean(tsCaricoNtotIN2017)
tsCaricoNtotOUT2017<-tsCaricoNtotOUTtotal["2017"]
mediaCaricoNtotOUT2017<-mean(tsCaricoNtotOUT2017)
rendNtot2017<-(mediaCaricoNtotIN2017 - mediaCaricoNtotOUT2017)/mediaCaricoNtotIN2017*100
aeNtot2017<-mediaCaricoNtotIN2017/12*1000
percentileN2017<-quantile(tsCaricoNtotIN2017/12*1000,c(0.92))
#Ntot 2018
tsCaricoNtotIN2018<-tsCaricoNtotINtotal["2018"]
mediaCaricoNtotIN2018<-mean(tsCaricoNtotIN2018)
tsCaricoNtotOUT2018<-tsCaricoNtotOUTtotal["2018"]
mediaCaricoNtotOUT2018<-mean(tsCaricoNtotOUT2018)
rendNtot2018<-(mediaCaricoNtotIN2018 - mediaCaricoNtotOUT2018)/mediaCaricoNtotIN2018*100
aeNtot2018<-mediaCaricoNtotIN2018/12*1000
percentileN2018<-quantile(tsCaricoNtotIN2018/12*1000,c(0.92))
rendimentiNtot<-c(rendNtot2015,rendNtot2016,rendNtot2017,rendNtot2018)
aeNtot<-c(aeNtot2015,aeNtot2016,aeNtot2017,aeNtot2018)
#Ptot totale
tsCaricoPtotINtotal<-(na.omit(xts(x=Caprino[,27], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoPtotINtotal<-mean(tsCaricoPtotINtotal)
tsCaricoPtotOUTtotal<-(na.omit(xts(x=Caprino[,28], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoPtotOUTtotal<-mean(tsCaricoPtotOUTtotal)
rendPtottotal<-(mediaCaricoPtotINtotal - mediaCaricoPtotOUTtotal)/mediaCaricoPtotINtotal*100
aePtottotal<-mediaCaricoPtotINtotal/1.2*1000
#Ptot 2015
tsCaricoPtotIN2015<-tsCaricoPtotINtotal["2015"]
mediaCaricoPtotIN2015<-mean(tsCaricoPtotIN2015)
tsCaricoPtotOUT2015<-tsCaricoPtotOUTtotal["2015"]
mediaCaricoPtotOUT2015<-mean(tsCaricoPtotOUT2015)
rendPtot2015<-(mediaCaricoPtotIN2015 - mediaCaricoPtotOUT2015)/mediaCaricoPtotIN2015*100
aePtot2015<-mediaCaricoPtotIN2015/1.2*1000
percentileP2015<-quantile(tsCaricoPtotIN2015/1.2*1000,c(0.92))
#Ptot 2016
tsCaricoPtotIN2016<-tsCaricoPtotINtotal["2016"]
mediaCaricoPtotIN2016<-mean(tsCaricoPtotIN2016)
tsCaricoPtotOUT2016<-tsCaricoPtotOUTtotal["2016"]
mediaCaricoPtotOUT2016<-mean(tsCaricoPtotOUT2016)
rendPtot2016<-(mediaCaricoPtotIN2016 - mediaCaricoPtotOUT2016)/mediaCaricoPtotIN2016*100
aePtot2016<-mediaCaricoPtotIN2016/1.2*1000
percentileP2016<-quantile(tsCaricoPtotIN2016/1.2*1000,c(0.92))
#Ptot 2017
tsCaricoPtotIN2017<-tsCaricoPtotINtotal["2017"]
mediaCaricoPtotIN2017<-mean(tsCaricoPtotIN2017)
tsCaricoPtotOUT2017<-tsCaricoPtotOUTtotal["2017"]
mediaCaricoPtotOUT2017<-mean(tsCaricoPtotOUT2017)
rendPtot2017<-(mediaCaricoPtotIN2017 - mediaCaricoPtotOUT2017)/mediaCaricoPtotIN2017*100
aePtot2017<-mediaCaricoPtotIN2017/1.2*1000
percentileP2017<-quantile(tsCaricoPtotIN2017/1.2*1000,c(0.92))
#Ptot 2018
tsCaricoPtotIN2018<-tsCaricoPtotINtotal["2018"]
mediaCaricoPtotIN2018<-mean(tsCaricoPtotIN2018)
tsCaricoPtotOUT2018<-tsCaricoPtotOUTtotal["2018"]
mediaCaricoPtotOUT2018<-mean(tsCaricoPtotOUT2018)
rendPtot2018<-(mediaCaricoPtotIN2018 - mediaCaricoPtotOUT2018)/mediaCaricoPtotIN2018*100
aePtot2018<-mediaCaricoPtotIN2018/1.2*1000
percentileP2018<-quantile(tsCaricoPtotIN2018/1.2*1000,c(0.92))
rendimentiPtot<-c(rendPtot2015,rendPtot2016,rendPtot2017,rendPtot2018)
aePtot<-c(aePtot2015,aePtot2016,aePtot2017,aePtot2018)
#percentili<-c(percentileBOD2015,percentileCOD2015,percentileN2015,percentileP2015,percentileBOD2016,percentileCOD2016,percentileN2016,percentileP2016,percentileBOD2017,percentileCOD2017,percentileN2017,percentileP2017,percentileBOD2018,percentileCOD2018,percentileN2018,percentileP2018)
percentiliBOD<-c(percentileBOD2015,NA,NA,NA,percentileBOD2016,NA,NA,NA,percentileBOD2017,NA,NA,NA,percentileBOD2018,NA,NA,NA)
percentiliCOD<-c(NA,percentileCOD2015,NA,NA,NA,percentileCOD2016,NA,NA,NA,percentileCOD2017,NA,NA,NA,percentileCOD2018,NA,NA)
# #SST totale
# tsCaricoSSTINtotal<-(na.omit(xts(x=Caprino[,13], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
# mediaCaricoSSTINtotal<-mean(tsCaricoSSTINtotal)
# tsCaricoSSTOUTtotal<-(na.omit(xts(x=Caprino[,14], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
# mediaCaricoSSTOUTtotal<-mean(tsCaricoSSTOUTtotal)
# rendSSTtotal<-(mediaCaricoSSTINtotal - mediaCaricoSSTOUTtotal)/mediaCaricoSSTINtotal*100
# #SST 2015
# tsCaricoSSTIN2015<-tsCaricoSSTINtotal["2015"]
# mediaCaricoSSTIN2015<-mean(tsCaricoSSTIN2015)
# tsCaricoSSTOUT2015<-tsCaricoSSTOUTtotal["2015"]
# mediaCaricoSSTOUT2015<-mean(tsCaricoSSTOUT2015)
# rendSST2015<-(mediaCaricoSSTIN2015 - mediaCaricoSSTOUT2015)/mediaCaricoSSTIN2015*100
# #SST 2016
# tsCaricoSSTIN2016<-tsCaricoSSTINtotal["2016"]
# mediaCaricoSSTIN2016<-mean(tsCaricoSSTIN2016)
# tsCaricoSSTOUT2016<-tsCaricoSSTOUTtotal["2016"]
# mediaCaricoSSTOUT2016<-mean(tsCaricoSSTOUT2016)
# rendSST2016<-(mediaCaricoSSTIN2016 - mediaCaricoSSTOUT2016)/mediaCaricoSSTIN2016*100
# #SST 2017
# tsCaricoSSTIN2017<-tsCaricoSSTINtotal["2017"]
# mediaCaricoSSTIN2017<-mean(tsCaricoSSTIN2017)
# tsCaricoSSTOUT2017<-tsCaricoSSTOUTtotal["2017"]
# mediaCaricoSSTOUT2017<-mean(tsCaricoSSTOUT2017)
# rendSST2017<-(mediaCaricoSSTIN2017 - mediaCaricoSSTOUT2017)/mediaCaricoSSTIN2017*100
# #SST 2018
# tsCaricoSSTIN2018<-tsCaricoSSTINtotal["2018"]
# mediaCaricoSSTIN2018<-mean(tsCaricoSSTIN2018)
# tsCaricoSSTOUT2018<-tsCaricoSSTOUTtotal["2018"]
# mediaCaricoSSTOUT2018<-mean(tsCaricoSSTOUT2018)
# rendSST2018<-(mediaCaricoSSTIN2018 - mediaCaricoSSTOUT2018)/mediaCaricoSSTIN2018*100
#
# rendimentiSST<-c(rendSST2015,rendSST2016,rendSST2017,rendSST2018)
#NIT totale
tsCaricoTKNINtotal<-(na.omit(xts(x=Caprino[,31], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoTKNINtotal<-mean(tsCaricoTKNINtotal)
tsCaricoNNH4OUTtotal<-(na.omit(xts(x=Caprino[,20], order.by=datestotal))*0.78*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoNNH4OUTtotal<-mean(tsCaricoNNH4OUTtotal)
tsCaricoNASStotal<-0.05*(tsCaricoBODINtotal - tsCaricoBODOUTtotal)
mediaCaricoNASStotal<-mean(tsCaricoNASStotal)
rendNITtotal<-(mediaCaricoTKNINtotal - mediaCaricoNNH4OUTtotal - mediaCaricoNASStotal)/(mediaCaricoTKNINtotal - mediaCaricoNASStotal)*100
#NIT 2015
tsCaricoTKNIN2015<-tsCaricoTKNINtotal["2015"]
mediaCaricoTKNIN2015<-mean(tsCaricoTKNIN2015)
tsCaricoNNH4OUT2015<-tsCaricoNNH4OUTtotal["2015"]
mediaCaricoNNH4OUT2015<-mean(tsCaricoNNH4OUT2015)
tsCaricoNASS2015<-0.05*(tsCaricoBODIN2015 - tsCaricoBODOUT2015)
mediaCaricoNASS2015<-mean(tsCaricoNASS2015)
rendNIT2015<-(mediaCaricoTKNIN2015 - mediaCaricoNNH4OUT2015 - mediaCaricoNASS2015)/(mediaCaricoTKNIN2015 - mediaCaricoNASS2015)*100
#NIT 2016
tsCaricoTKNIN2016<-tsCaricoTKNINtotal["2016"]
mediaCaricoTKNIN2016<-mean(tsCaricoTKNIN2016)
tsCaricoNNH4OUT2016<-tsCaricoNNH4OUTtotal["2016"]
mediaCaricoNNH4OUT2016<-mean(tsCaricoNNH4OUT2016)
tsCaricoNASS2016<-0.05*(tsCaricoBODIN2016 - tsCaricoBODOUT2016)
mediaCaricoNASS2016<-mean(tsCaricoNASS2016)
rendNIT2016<-(mediaCaricoTKNIN2016 - mediaCaricoNNH4OUT2016 - mediaCaricoNASS2016)/(mediaCaricoTKNIN2016 - mediaCaricoNASS2016)*100
#NIT 2017
tsCaricoTKNIN2017<-tsCaricoTKNINtotal["2017"]
mediaCaricoTKNIN2017<-mean(tsCaricoTKNIN2017)
tsCaricoNNH4OUT2017<-tsCaricoNNH4OUTtotal["2017"]
mediaCaricoNNH4OUT2017<-mean(tsCaricoNNH4OUT2017)
tsCaricoNASS2017<-0.05*(tsCaricoBODIN2017 - tsCaricoBODOUT2017)
mediaCaricoNASS2017<-mean(tsCaricoNASS2017)
rendNIT2017<-(mediaCaricoTKNIN2017 - mediaCaricoNNH4OUT2017 - mediaCaricoNASS2017)/(mediaCaricoTKNIN2017 - mediaCaricoNASS2017)*100
#NIT 2018
tsCaricoTKNIN2018<-tsCaricoTKNINtotal["2018"]
mediaCaricoTKNIN2018<-mean(tsCaricoTKNIN2018)
tsCaricoNNH4OUT2018<-tsCaricoNNH4OUTtotal["2018"]
mediaCaricoNNH4OUT2018<-mean(tsCaricoNNH4OUT2018)
tsCaricoNASS2018<-0.05*(tsCaricoBODIN2018 - tsCaricoBODOUT2018)
mediaCaricoNASS2018<-mean(tsCaricoNASS2018)
rendNIT2018<-(mediaCaricoTKNIN2018 - mediaCaricoNNH4OUT2018 - mediaCaricoNASS2018)/(mediaCaricoTKNIN2018 - mediaCaricoNASS2018)*100
rendimentiNIT<-c(rendNIT2015,rendNIT2016,rendNIT2017,rendNIT2018)
#DEN totale
tsCaricoNNO3OUTtotal<-(na.omit(xts(x=Caprino[,24], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoNNO3OUTtotal<-mean(tsCaricoNNO3OUTtotal)
tsCaricoNNO2OUTtotal<-(na.omit(xts(x=Caprino[,22], order.by=datestotal))*xts(x=Caprino[,6],order.by=datestotal)/1000)
mediaCaricoNNO2OUTtotal<-mean(tsCaricoNNO2OUTtotal)
rendDENtotal<-(mediaCaricoNtotINtotal - mediaCaricoNNH4OUTtotal-mediaCaricoNNO3OUTtotal-mediaCaricoNNO2OUTtotal - mediaCaricoNASStotal)/(mediaCaricoNtotINtotal - mediaCaricoNNH4OUTtotal - mediaCaricoNASStotal)*100
#DENT 2015
tsCaricoNNO3OUT2015<-tsCaricoNNO3OUTtotal["2015"]
mediaCaricoNNO3OUT2015<-mean(tsCaricoNNO3OUT2015)
tsCaricoNNO2OUT2015<-tsCaricoNNO2OUTtotal["2015"]
mediaCaricoNNO2OUT2015<-mean(tsCaricoNNO2OUT2015)
rendDEN2015<-(mediaCaricoNtotIN2015 - mediaCaricoNNH4OUT2015-mediaCaricoNNO3OUT2015-mediaCaricoNNO2OUT2015 - mediaCaricoNASS2015)/(mediaCaricoNtotIN2015 - mediaCaricoNNH4OUT2015 - mediaCaricoNASS2015)*100
#DEN 2016
tsCaricoNNO3OUT2016<-tsCaricoNNO3OUTtotal["2016"]
mediaCaricoNNO3OUT2016<-mean(tsCaricoNNO3OUT2016)
tsCaricoNNO2OUT2016<-tsCaricoNNO2OUTtotal["2016"]
mediaCaricoNNO2OUT2016<-mean(tsCaricoNNO2OUT2016)
rendDEN2016<-(mediaCaricoNtotIN2016 - mediaCaricoNNH4OUT2016-mediaCaricoNNO3OUT2016-mediaCaricoNNO2OUT2016 - mediaCaricoNASS2016)/(mediaCaricoNtotIN2016 - mediaCaricoNNH4OUT2016 - mediaCaricoNASS2016)*100
#DEN 2017
tsCaricoNNO3OUT2017<-tsCaricoNNO3OUTtotal["2017"]
mediaCaricoNNO3OUT2017<-mean(tsCaricoNNO3OUT2017)
tsCaricoNNO2OUT2017<-tsCaricoNNO2OUTtotal["2017"]
mediaCaricoNNO2OUT2017<-mean(tsCaricoNNO2OUT2017)
rendDEN2017<-(mediaCaricoNtotIN2017 - mediaCaricoNNH4OUT2017-mediaCaricoNNO3OUT2017-mediaCaricoNNO2OUT2017 - mediaCaricoNASS2017)/(mediaCaricoNtotIN2017 - mediaCaricoNNH4OUT2017 - mediaCaricoNASS2017)*100
#DEN 2018
tsCaricoNNO3OUT2018<-tsCaricoNNO3OUTtotal["2018"]
mediaCaricoNNO3OUT2018<-mean(tsCaricoNNO3OUT2018)
tsCaricoNNO2OUT2018<-tsCaricoNNO2OUTtotal["2018"]
mediaCaricoNNO2OUT2018<-mean(tsCaricoNNO2OUT2018)
rendDEN2018<-(mediaCaricoNtotIN2018 - mediaCaricoNNH4OUT2018-mediaCaricoNNO3OUT2018-mediaCaricoNNO2OUT2018 - mediaCaricoNASS2018)/(mediaCaricoNtotIN2018 - mediaCaricoNNH4OUT2018 - mediaCaricoNASS2018)*100
rendimentiDEN<-c(rendDEN2015,rendDEN2016,rendDEN2017,rendDEN2018)
df<-setNames(data.frame(matrix(ncol=4,nrow=0)),c(2015,2016,2017,2018))
df[1,]<-rendimentiBOD
df[2,]<-rendimentiCOD
df[3,]<-rendimentiNtot
df[4,]<-rendimentiPtot
df[5,]<-rendimentiNIT
df[6,]<-rendimentiDEN
windows(width = 16,height = 9)
par(mar=c(6,6,4,6),mgp=c(4,1,0)) #margini e distanza etichette-asse
options(OutDec = ",")
bp<-barplot(as.matrix(df),beside=T,ylim=c(0,100),col=brewer.pal(6,"Set3"),las=1,yaxt="n",xaxt="n")
grid(nx=NA,ny=5,col="grey")
barplot(as.matrix(df),beside=T,ylim=c(0,100),col=brewer.pal(6,"Set3"),ylab="Rendimento [%]",ad=T,las=1)
abline(h=100,lty=3,col="grey")
abline(h=0)
text(bp,as.matrix(df)/2,as.character(as.matrix(format(round(df,digits=1))),nsmall=1),srt=90)
legend("bottom",ncol=4, c(expression("BOD"[5]),"COD",expression("N"[tot]),expression("P"[tot]),"Nitrificazione","Denitrificazione"),pch=c(22,22,22,22,22,22),pt.bg=brewer.pal(6,"Set3"),pt.cex = 2,bg="white")
windows(width = 16,height = 9)
par(mar=c(6,6,4,6),mgp=c(3,1,0)) #margini e distanza etichette-asse
rendimentitotal<-rev(c(rendBODtotal,rendCODtotal,rendNtottotal,rendPtottotal,rendNITtotal,rendDENtotal))
bpt<-barplot(rendimentitotal,horiz = T, xlim=c(0,100),space=c(0,0),xlab="Rendimento [%]",ylab="2015 - 2018",col=rev(brewer.pal(6,"Set3")))
text(rendimentitotal/2,bpt,as.character(format(round(rendimentitotal,digits=1)),nsmall=1))
legend("left", c(expression("BOD"[5]),"COD",expression("N"[tot]),expression("P"[tot]),"Nitrificazione","Denitrificazione"),pch=c(22,22,22,22,22,22),pt.bg=brewer.pal(6,"Set3"),pt.cex = 2,bg="white")
abline(v=0)
#AE
df1<-setNames(data.frame(matrix(ncol=4,nrow=0)),c(2015,2016,2017,2018))
df1[1,]<-aeBOD
df1[2,]<-aeCOD
df1[3,]<-aeNtot
df1[4,]<-aePtot
windows(width = 16,height = 9)
par(mar=c(6,6,4,6),mgp=c(4,1,0)) #margini e distanza etichette-asse
bp1<-barplot(as.matrix(df1),beside=T,ylim=c(0,35000),col=brewer.pal(4,"Set3"),las=1,yaxt="n",xaxt="n")
grid(nx=NA,ny=7,col="grey")
barplot(as.matrix(df1),beside=T,ylim=c(0,35000),col=brewer.pal(4,"Set3"),yaxt="n",ylab=expression(paste("BOD"[5-IN],", COD"[IN],", N"[tot-IN],", P"[tot-IN]," [AE]")),ad=T,las=1)
arrows(bp1, percentiliBOD, bp1, unlist(df1), angle = 90, code = 1, length = 0.05)
arrows(bp1, percentiliCOD, bp1, unlist(df1), angle = 90, code = 1, length = 0.05,col="blue")
axis(side=2,at=seq(from = 0, to = 35000, by = 5000), las=2,format(seq(from = 0,to = 35000,by = 5000), big.mark = ".", decimal.mark = ","))
abline(h=35000,lty=3,col="grey")
abline(h=0)
text(bp1,as.matrix(df1)/2,as.character(as.matrix(format(round(df1),big.mark=".", decimal.mark = ","))),srt=90)
text(bp1,percentiliBOD+2000,as.character(format(round(percentiliBOD),big.mark=".", decimal.mark = ",")),srt=90)
text(bp1,percentiliCOD+2000,as.character(format(round(percentiliCOD),big.mark=".", decimal.mark = ",")),srt=90)
legend(x=9,y=32500,ncol=3, c(expression("BOD"[5-IN]),expression("COD"[IN]),expression("N"[tot-IN]),expression("P"[tot-IN]),"90° percentile","92° percentile"),pch=c(22,22,22,22,NA,NA),col=c(NA,NA,NA,NA,"black","blue"),lty=c(NA,NA,NA,NA,1,1),pt.bg=brewer.pal(4,"Set3"),pt.cex = 2,bg="white")
windows(width = 16,height = 9)
par(mar=c(6,6,4,6),mgp=c(3,1,0)) #margini e distanza etichette-asse
aetotal<-rev(c(aeBODtotal,aeCODtotal,aeNtottotal,aePtottotal))
bpt<-barplot(aetotal,horiz = T, xlim=c(0,15000),space=c(0,0),xaxt="n",xlab=expression(paste("BOD"[5-IN],", COD"[IN],", N"[tot-IN],", P"[tot-IN]," [AE]")),ylab="2015 - 2018",col=rev(brewer.pal(4,"Set3")))
axis(side=1,at=seq(from = 0, to = 14000, by = 2000), las=1,format(seq(from = 0,to = 14000,by = 2000), big.mark = ".", decimal.mark = ","))
text(aetotal/2,bpt,as.character(format(round(aetotal), big.mark = ".", decimal.mark = ",")))
legend(x=12500,y=3.5, c(expression("BOD"[5-IN]),expression("COD"[IN]),expression("N"[tot-IN]),expression("P"[tot-IN])),pch=c(22,22,22,22),pt.bg=brewer.pal(4,"Set3"),pt.cex = 2,bg="white")
abline(v=0)
|
3c70ce6e1c5681eafc7191ef266f82a78216ad3f
|
f0ad9522c67f563680f853bf1189f2cce9ee1375
|
/R/cess.R
|
0454e9dfd4ffab0951e17ed385f1ec00b6377271
|
[] |
no_license
|
LiangCZhang/Rcess
|
832ae6922c2b3212f5d3768bb5099b917714e14b
|
675e65f33548a36f77b73719eb514fdf965d6b40
|
refs/heads/master
| 2021-01-12T10:21:13.776370
| 2018-12-03T08:12:57
| 2018-12-03T08:12:57
| 76,426,857
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,494
|
r
|
cess.R
|
#' Calculate estimates of costs and economies of scale and scope from 25 to 200 \% levels at means
#'
#' @param data The data used for calculating the estimates.
#' @param outputName A vector of strings containing the names of the independent (output) variables.
#' @param priceName A vector of strings containing the names of the independent (price) variables.
#' @param controlName A vector of strings containing the names of the control variables.
#' @param form A cost function character.
#' @param model The estimated model(nls class object).
#' @param vcovCL A variance matrix provided by clusterEst function
#' @return Estimates of scale and scope economies including their standard errors (SE), lower
#' interval(Lo) and upper interval(Hi)
#' @author
#' Liang-Cheng Zhang
#' @references
#' Zhang, L.-C., Worthington, A. C., & Hu, M. (in press). Cost economies in the provision of
#' higher education for international students: Australian evidence. Higher Education. doi: \href{http://dx.doi.org/10.1007/s10734-016-0078-9}{10.1007/s10734-016-0078-9}
#'
#' Zhang, L.-C., & Worthington, A. C. (2015). Evaluating the accuracy of scope economies: comparisons among
#' delta method, bootstrap, and Bayesian approach. Paper presented at Australian Conference of Economists
#' PhD Colloquium. Retrieved from \href{http://www98.griffith.edu.au/dspace/handle/10072/69409}{http://www98.griffith.edu.au/dspace/handle/10072/69409}
#' @examples
#' ##Reproduce results of Zhang et al. (in press)
#' data(unidat)
#' data = unidat
#' library(minpack.lm)
#' model <- nlsLM(costFunction(costName = colnames(unidat)[3], outputName = colnames(unidat)[7:11],
#' priceName = colnames(unidat)[4:6], controlName = colnames(unidat)[12:24],
#' form = "FFCQ-M"), start = list(b0 = 600, b1 = 0, b2 = 0,
#' b3 = 0, b4 = 0, b5 = 0, b11 = 0, b22 = 0, b33 = 0, b44 = 0,
#' b55 = 0, b12 = 0, b13 = 0, b14 = 0, b15 = 0, b23 = 0, b24 = 0,
#' b25 = 0, b34 = 0, b35 = 0, b45 = 0, bp2 = 0, bp3 = 0, bz1 = 0,
#' bz2 = 0, bz3 = 0, bz4 = 0, bz5 = 0, bz6 = 0, bz7 = 0, bz8 = 0,
#' bz9 = 0, bz10 = 0, bz11 = 0, bz12 = 0, bz13 = 0), data = unidat,
#' trace = F)
#' vcovCL <- clusterEst(model = model , cluster = unidat$unicode)$vcovCL
#' cess(data=data, outputName = colnames(unidat)[7:11],priceName = colnames(unidat)[4:6],
#' controlName = colnames(unidat)[12:24], model=model, vcovCL=vcovCL)
#'
#' @import car
#' @export
cess <- function(data, outputName, priceName, controlName, model, vcovCL=clusterEst(model = model , cluster = unidat$unicode)$vcovCL) {
model <- model
vcovCL <- vcovCL
mOutput <- list()
coefPrice <- vector()
for (i in 2:length(priceName)) { #starting from 2 since w1 is the numeriate price
coefPrice[i] <- coef(model)[sprintf("%s",paste("bp",i, sep = ""))]
}
coefPrice[1] <- 1 - sum(as.numeric(coefPrice[-1])) #the above code is prepared for bp1 since deltamethod cannot extract it from the model.
cPrice <- as.numeric(coefPrice)
for (i in 1:length(outputName)) {
mOutput[i] <- mean(data[,outputName[i]])
}
for (i in 1:length(priceName)) {
priceName[i] <- mean(data[,priceName[i]])
}
for (i in 1:length(controlName)) {
controlName[i] <- mean(data[,controlName[i]])
}
mOutput <- as.numeric(mOutput)
mPrice <- as.numeric(priceName)
mControl <- as.numeric(controlName)
# Preparing for the formula of cost economies estimates -------------------
control <- list() # create control variables for cost function
price <- list() # create price items for cost function
single <- list() # create single items for cost function
square <- list() # create square items for cost function
for (i in 1:length(priceName)) { #starting from 2 since w1 is the numeriate price
price[[i]] <- paste(paste("mw",i, sep = ""), paste("bp",i, sep = ""), sep = "^")
}
for (i in 1:length(controlName)) {
control[[i]] <- paste(paste("bz",i, sep = ""),paste("mz",i, sep = ""), sep = "*")
}
for (i in 1:length(outputName)) {
single[[i]] <- paste(paste("b",i, sep = ""),outputName[i], sep = "*")
}
for (i in 1:length(outputName)) {
square[[i]] <- paste(paste("b",i,i, sep = ""),0.5,paste(outputName[i],"^2",sep = ""), sep = "*")
}
Y = combn(outputName,2)
B = combn(length(outputName),2)
Multi <- list() # create multiplication items for cost function
for (i in 1:ncol(Y)) {
Multi[[i]] <- paste(paste("b",B[1,i],B[2,i], sep = ""),Y[1,i],Y[2,i], sep = "*")
}
QCF <- do.call("paste",c("b0",single, square, Multi, sep = "+"))
QCF <- parse(text = QCF)
for (i in 1:length(priceName)) {
assign( paste("mw",i,sep = ""), mPrice[i],pos = .GlobalEnv)
}
for (i in 1:length(controlName)) {
assign( paste("mz",i,sep = ""), mControl[i],pos = .GlobalEnv)
}
for (i in 1:length(priceName)) {
assign( paste("bp",i,sep = ""), coefPrice[i],pos = .GlobalEnv)
}
#the followings codes are loops for calculating estimates from 50% to 200% levels at means----
p <- 0
counter <- 0
repeat {
counter <- counter + 1
p <- p + 0.25
if (p > 2) break;
m.Output <- mOutput*p #percentage of output mean. For example, if p is 1, it means 100% of output mean
for (i in 1:length(outputName)) {
assign( paste("y",i,sep = ""), m.Output[i],pos = .GlobalEnv)
}
#Calculate different types of costs with the numbers calculated above------
#_ Tcost ---------------------------------------------------------------
Tcost <- do.call("paste",
c(price,
sprintf("exp(%s)",do.call("paste",c(control, sep = "+"))),
sprintf("(%s)",do.call("paste",c("b0",single, square, Multi, sep = "+"))),
sep = "*")) #Total costs
#_ mcy ---------------------------------------------------------------
mcy <- list() # create marginal cost variables
for (i in 1:length(outputName)) {
DF <- D(QCF, outputName[i])
F1 <- as.character(DF)
mcy[[i]] <- do.call("paste",
c(price,
sprintf("exp(%s)",do.call("paste",c(control, sep = "+"))),
sprintf("(%s)",paste(F1[2],F1[3], sep = "+")),
sep = "*"))
}
# _ ICy ---------------------------------------------------------------
# using formula (2) in cost economies of international paper
ICy <- list() # create incremental marginal cost variables
Multi2 <- list()
Yij <- list()
for (i in 1:length(outputName)) {
Yi <- B[,which(B[1,] == i)]
Yj <- B[,which(B[2,] == i)]
Yij[[i]] <- cbind(Yi, Yj)
Multi2[[i]] <- i }
for (i in 1:length(outputName)) {
for (j in 1:ncol(Yij[[i]])) {
Multi2[[i]][j] <- paste(paste("b",Yij[[i]][1,j],Yij[[i]][2,j], sep = ""),paste("y",Yij[[i]][1,j],sep = ""),paste("y",Yij[[i]][2,j], sep = ""), sep = "*")
ICy[[i]] <- do.call("paste",
c(price,
sprintf("exp(%s)",do.call("paste",c(control, sep = "+"))),
sprintf("(%s)",do.call("paste",c(single[i],square[i],Multi2[[i]], sep = "+"))),
sep = "*"))
}
}
#_ Cy ---------------------------------------------------------------
Cy <- list() # create costs of producing a specific output cost variables
for (i in 1:length(outputName)) {
Cy[[i]] <- do.call("paste",
c(price,
sprintf("exp(%s)",do.call("paste",c(control, sep = "+"))),
sprintf("(%s)",paste("b0",single[[i]],square[[i]], sep = "+")),
sep = "*"))
}
#_ Cy_---------------------------------------------------------------
Cy_ <- list() # create incremental marginal cost variables
Multi2 <- list()
Yij <- list()
for (i in 1:length(outputName)) {
B1 <- B[,which(B[1,] != i)]
Yij[[i]] <- B1[,which(B1[2,] != i)]
Multi2[[i]] <- i
}
for (i in 1:length(outputName)) {
for (j in 1:ncol(Yij[[i]])) {
Multi2[[i]][j] <- paste(paste("b",Yij[[i]][1,j],Yij[[i]][2,j], sep = ""),paste("y",Yij[[i]][1,j],sep = ""),paste("y",Yij[[i]][2,j], sep = ""), sep = "*")
Cy_[[i]] <- do.call("paste",
c(price,
sprintf("exp(%s)",do.call("paste",c(control, sep = "+"))),
sprintf("(%s)",do.call("paste",c("b0",single[-i],square[-i],Multi2[[i]], sep = "+"))),
sep = "*"))
}
}
#Calculate the degree of scale and scope economies with the estimated parameters and sample data-----------
parameterNames = list(paste("b", 0:(11*length(outputName)), sep = ""),paste("bp", 1:length(priceName), sep = ""),paste("bz", 1:length(controlName), sep = ""))
#_MC for outputs-------------------
for (i in 1:length(outputName)) {
assign( paste("MCy",i,sep = ""), car::deltaMethod(coef(model), mcy[[i]], parameterNames = parameterNames, vcov. = vcovCL ))
}
#_AIC for outputs-------------------
for (i in 1:length(outputName)) {
assign( paste("AICy",i,sep = ""), car::deltaMethod(coef(model), paste(ICy[[i]],"/",m.Output[[i]], sep = ""), parameterNames = parameterNames, vcov. = vcovCL ))
}
#_Ray Scale Economies------------
mymcy<-list()
for (i in 1:length(outputName)) {
mymcy[[i]] <- paste(paste("y",i,sep = ""),mcy[i], sep = "*")
}
SRAY <- car::deltaMethod(coef(model), paste(Tcost,"/(",do.call("paste",c(mymcy, sep = "+")),")", sep = ""), parameterNames = parameterNames, vcov. = vcovCL)
K = sum(lengths(parameterNames))
#_product-specific scale economies for outputs-------
for (i in 1:length(outputName)) {
assign( paste("PSCEy",i,sep = ""), car::deltaMethod(coef(model), paste(ICy[[i]],"/(",m.Output[[i]],"*",mcy[[i]],")", sep = ""), parameterNames = parameterNames, vcov. = vcovCL ))
}
#_Global Scope Economies-------------
GSE <- car::deltaMethod(coef(model), paste("((",do.call("paste",c(Cy, sep = "+")),")-(",Tcost,"))/(",Tcost,")", sep = ""), parameterNames = parameterNames, vcov. = vcovCL)
#_Product-specific scope economies for outputs---------
for (i in 1:length(outputName)) {
assign( paste("PSOEy",i,sep = ""), car::deltaMethod(coef(model), paste("(",Cy[[i]],"+",Cy_[[i]],"-",Tcost,")/(",Tcost,")", sep = ""), parameterNames = parameterNames, vcov. = vcovCL ))
}
pointEstimates <- list()
for (i in 1:length(outputName)) {
pointEstimates[[i ]] <- get(paste("PSCEy",i,sep = ""))$Estimate
pointEstimates[[i + 1*length(outputName)]] <- get(paste("PSCEy",i,sep = ""))[[2]]
pointEstimates[[i + 2*length(outputName)]] <- get(paste("PSCEy",i,sep = ""))[[3]]
pointEstimates[[i + 3*length(outputName)]] <- get(paste("PSCEy",i,sep = ""))[[4]]
pointEstimates[[i + 4*length(outputName)]] <- get(paste("PSOEy",i,sep = ""))$Estimate
pointEstimates[[i + 5*length(outputName)]] <- get(paste("PSOEy",i,sep = ""))[[2]]
pointEstimates[[i + 6*length(outputName)]] <- get(paste("PSOEy",i,sep = ""))[[3]]
pointEstimates[[i + 7*length(outputName)]] <- get(paste("PSOEy",i,sep = ""))[[4]]
pointEstimates[[1 + 8*length(outputName)]] <- SRAY$Estimate
pointEstimates[[2 + 8*length(outputName)]] <- SRAY[[2]]
pointEstimates[[2 + 8*length(outputName)]] <- SRAY[[3]]
pointEstimates[[3 + 8*length(outputName)]] <- SRAY[[4]]
pointEstimates[[4 + 8*length(outputName)]] <- GSE$Estimate
pointEstimates[[5 + 8*length(outputName)]] <- GSE[[2]]
pointEstimates[[5 + 8*length(outputName)]] <- GSE[[3]]
pointEstimates[[6 + 8*length(outputName)]] <- GSE[[4]]
names(pointEstimates[[i ]]) <- paste("PSCEy",i,sep = "")
names(pointEstimates[[i + 1*length(outputName)]]) <- paste("SE_PSCEy",i,sep = "")
names(pointEstimates[[i + 2*length(outputName)]]) <- paste("Lo_PSCEy",i,sep = "")
names(pointEstimates[[i + 3*length(outputName)]]) <- paste("Hi_PSCEy",i,sep = "")
names(pointEstimates[[i + 4*length(outputName)]]) <- paste("PSOEy",i,sep = "")
names(pointEstimates[[i + 5*length(outputName)]]) <- paste("SE_PSOEy",i,sep = "")
names(pointEstimates[[i + 6*length(outputName)]]) <- paste("Lo_PSOEy",i,sep = "")
names(pointEstimates[[i + 7*length(outputName)]]) <- paste("Hi_PSOEy",i,sep = "")
names(pointEstimates[[1 + 8*length(outputName)]]) <- "SRAY"
names(pointEstimates[[2 + 8*length(outputName)]]) <- "SE_SRAY"
names(pointEstimates[[2 + 8*length(outputName)]]) <- "Lo_SRAY"
names(pointEstimates[[3 + 8*length(outputName)]]) <- "Hi_SRAY"
names(pointEstimates[[4 + 8*length(outputName)]]) <- "GSE"
names(pointEstimates[[5 + 8*length(outputName)]]) <- "SE_GSE"
names(pointEstimates[[5 + 8*length(outputName)]]) <- "Lo_GSE"
names(pointEstimates[[6 + 8*length(outputName)]]) <- "Hi_GSE"
}
PointEstimates <- t(do.call(rbind, lapply(pointEstimates, data.frame, stringsAsFactors = FALSE)))
write.table(PointEstimates, file = "point Estimates of scale and scope economies.csv", row.names = p, col.names = NA, sep = ",",append = TRUE)
}
Estimates <- read.csv("point Estimates of scale and scope economies.csv", header = T,stringsAsFactors = FALSE)
Estimates <- Estimates[seq(from = 1,to = 15,by = 2),]
colnames(Estimates) <- c("meanLevels",colnames(Estimates)[2:(7 + 8*length(outputName))])
Estimates <- as.data.frame(apply(Estimates,2,as.numeric))
file.remove("point Estimates of scale and scope economies.csv")
return(Estimates)
}
|
3400470ae970c401b40d544c446890e3c65a4ad5
|
59f5b12829c296943d9aa4e14ec58ec151fefc95
|
/fx_stock_trading_sandbox.R
|
07cc328f4c2ddf2ec1aff6d2d60d875e7c76fe0d
|
[] |
no_license
|
BenjamS/Quanfin
|
b55cfe187bfe8fdb2ce971f8add5cd010f181578
|
4adb2f6e3d04dee95ae09035d17f4714eec36130
|
refs/heads/master
| 2023-08-04T08:44:20.739840
| 2023-07-24T20:47:32
| 2023-07-24T20:47:32
| 122,140,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,039
|
r
|
fx_stock_trading_sandbox.R
|
library(tidyverse)
library(tidyquant)
library(patchwork)
library(lubridate)
#--------------------------------------------------------------------------------
this_folder <- "C:/Users/bensc/OneDrive/Documents/Data/Trading/"
#=============================================================================
#=============================================================================
# Define fns
#=============================================================================
#=============================================================================
getTsTrendInds <- function(in_df, thresh_pct_uptrend = 0.7, thresh_pct_dntrend = -0.7){
# Captures index of sustained up and down trends of a time series
# by identifying periods of consecutive time steps in which slope
# (i.e. the detrended series) is positve (for up trends)
# or negative (for down trends).
# in_df = data.frame(date = etc., p = numeric, dt = numeric)
# where dt equals the detrended series, usually made by subtracting a short period
# (eg. 3-13) ema from the raw ts. This is essentially the slope (first derivative)
# of the ts.
#=======================================================
#Initial capture based on slope crossings of the mean slope
ind_uptrnd <- which(in_df$dt > 0)
ind_dntrnd <- which(in_df$dt <= 0)
#---------------------
#Get trend start/finish points by taking differences
ind_upFin <- ind_dntrnd[which(diff(ind_dntrnd) != 1) + 1]
ind_upBeg <- ind_uptrnd[which(diff(ind_uptrnd) != 1) + 1]
ind_dnFin <- ind_upBeg
ind_dnBeg <- ind_upFin
#If necessary, remove start/finish points so that you have one finish point
#for every start point (i.e. a complete set)
n_upBeg <- length(ind_upBeg)
n_upFin <- length(ind_upFin)
n_upBeg_raw <- n_upBeg
n_upFin_raw <- n_upFin
n_dnBeg <- length(ind_dnBeg)
n_dnFin <- length(ind_dnFin)
n_dnBeg_raw <- n_dnBeg
n_dnFin_raw <- n_dnFin
if(ind_upBeg[1] > ind_upFin[1]){ind_upFin <- ind_upFin[-1]; n_upFin <- length(ind_upFin)}
if(ind_upBeg[n_upBeg] > ind_upFin[n_upFin]){ind_upBeg <- ind_upBeg[-n_upBeg]; n_upBeg <- length(ind_upBeg)}
if(sum(ind_upFin - ind_upBeg < 0) > 0){print("Problem with uptrends")}
if(ind_dnBeg[1] > ind_dnFin[1]){ind_dnFin <- ind_dnFin[-1]; n_dnFin <- length(ind_dnFin)}
if(ind_dnBeg[n_dnBeg] > ind_dnFin[n_dnFin]){ind_dnBeg <- ind_dnBeg[-n_dnBeg]; n_dnBeg <- length(ind_dnBeg)}
if(sum(ind_dnFin - ind_dnBeg < 0) > 0){print("Problem with downtrends")}
#=================================
#Uptrends
df_a <- in_df
colnames(df_a)[2:ncol(df_a)] <- paste("Start", colnames(df_a)[2:ncol(df_a)])
df_a$`Start date` <- NA
df_a$`Start date`[ind_upBeg] <- as.character(df_a$date[ind_upBeg])
df_a <- subset(df_a, is.na(`Start date`) != T)
df_a$Date <- NULL
df_b <- in_df
colnames(df_b)[2:ncol(df_b)] <- paste("Stop", colnames(df_b)[2:ncol(df_b)])
df_b$`Stop date` <- NA
df_b$`Stop date`[ind_upFin] <- as.character(df_b$date[ind_upFin])
df_b <- subset(df_b, is.na(`Stop date`) != T)
df_b$Date <- NULL
#--
df_a$date <- NULL
df_b$date <- NULL
df_upTrends <- cbind(df_a, df_b)
#--
thresh_pct_uptrend <- 0
df_upTrends$Change <- df_upTrends$`Stop p` - df_upTrends$`Start p`
df_upTrends$`Pct. Change` <- 100 * df_upTrends$Change / df_upTrends$`Start p`
df_upTrends$Duration <- ind_upFin - ind_upBeg
#df_upTrends$`Pct. Change/Time` <- df_upTrends$`Pct. Change` / df_upTrends$Duration
df_upTrends$`Pct. Change/Time` <- df_upTrends$`Pct. Change` * exp(-0.05 * df_upTrends$Duration)
df_upTrends$`False trend` <- ifelse(df_upTrends$`Pct. Change` < thresh_pct_uptrend, 1, 0)
#df_upTrends$`True trend` <- ifelse(df_upTrends$`Pct. Change` < thresh_pct_uptrend, 0, 1)
df_upTrends$`Start date` <- as.Date(df_upTrends$`Start date`)
df_upTrends$`Stop date` <- as.Date(df_upTrends$`Stop date`)
#----------------------
#Downtrends
df_a <- in_df
colnames(df_a)[2:ncol(df_a)] <- paste("Start", colnames(df_a)[2:ncol(df_a)])
df_a$`Start date` <- NA
df_a$`Start date`[ind_dnBeg] <- as.character(df_a$date[ind_dnBeg])
df_a <- subset(df_a, is.na(`Start date`) != T)
df_a$Date <- NULL
df_b <- in_df
colnames(df_b)[2:ncol(df_b)] <- paste("Stop", colnames(df_b)[2:ncol(df_b)])
df_b$`Stop date` <- NA
df_b$`Stop date`[ind_dnFin] <- as.character(df_b$date[ind_dnFin])
df_b <- subset(df_b, is.na(`Stop date`) != T)
df_b$Date <- NULL
#--
df_a$date <- NULL
df_b$date <- NULL
df_dnTrends <- cbind(df_a, df_b)
#--
df_dnTrends$Change <- df_dnTrends$`Stop p` - df_dnTrends$`Start p`
df_dnTrends$`Pct. Change` <- 100 * df_dnTrends$Change / df_dnTrends$`Start p`
df_dnTrends$Duration <- ind_dnFin - ind_dnBeg
df_dnTrends$`Pct. Change/Time` <- df_dnTrends$`Pct. Change` / df_dnTrends$Duration
df_dnTrends$`False trend` <- ifelse(df_dnTrends$`Pct. Change` > thresh_pct_dntrend, 1, 0)
df_dnTrends$`Start date` <- as.Date(df_dnTrends$`Start date`)
df_dnTrends$`Stop date` <- as.Date(df_dnTrends$`Stop date`)
#=======================================================
outlist <- list(df_upTrends, df_dnTrends)
return(outlist)
}
#-----------------------------------------------------------------------------
# For consolidating more than two back-to-back trends
consolidateTrnds <- function(df, thresh_timeBetwn = 21, show_seqs = F){
df$`TimeBetwn` <- c(NA, df$`Start date`[-1] - df$`Stop date`[-nrow(df)])
ind <- which(df$TimeBetwn <= thresh_timeBetwn)
#ind <- c(3, 5, 8, 15:17, 22, 26, 30:33, 36:37, 55, 59, 64, 73:78, 90, 93, 105)
diffInd <- diff(ind)
indDiffDiff1 <- which(diffInd == 1)
seqUnbrok <- ind[indDiffDiff1]
diffSeq <- diff(seqUnbrok)
indBreaks <- which(diffSeq != 1)
indBreaks <- c(indBreaks, length(seqUnbrok))
seqEndPoints <- seqUnbrok[indBreaks]
n_seq <- length(seqEndPoints)
list_seq <- list()
list_indrm <- list()
this_break <- 0
for(i in 1:n_seq){
#this_endPoint <- seqEndPoints[i]
indStart <- this_break + 1
this_break <- indBreaks[i]
this_seq <- seqUnbrok[indStart:this_break]
this_seq <- c(this_seq, this_seq[length(this_seq)] + 1)
list_seq[[i]] <- this_seq
list_indrm[[i]] <- setdiff(this_seq, this_seq[length(this_seq)])
}
ind_rm <- do.call(c, list_indrm)
df$Mark <- NA
df$Mark[ind] <- 1
df <- df[-ind_rm, ]
ind <- which(df$Mark == 1)
df$`Start date`[ind] <- df$`Start date`[ind - 1]
df$`Start date_chr`[ind] <- df$`Start date_chr`[ind - 1]
df$`Start p`[ind] <- df$`Start p`[ind - 1]
df$`Pct. Change`[ind] <- 100 * (df$`Stop p`[ind] / df$`Start p`[ind] - 1)
df$Duration[ind] <- df$`Stop date`[ind] - df$`Start date`[ind]
df$`Pct. Change/Time`[ind] <- df$`Pct. Change`[ind] / df$Duration[ind]
df <- df[-(ind - 1), ]
if(show_seqs){
print(list_seq)
}
return(df)
}
#-----------------------------------------------------------------------------
# For percentile oscillator
pctileFun <- function(x){
out <- ecdf(x)(x[length(x)])
return(out)
}
#-----------------------------------------------------------------------------
# Days (t-steps) above/below x percentile
daysAbovePctile <- function(df_pctile, thresh_pctiles = c(0.05, 0.95)){
#pctileSeries <- df_plot$Pctile[which(!is.na(df_plot$Pctile))]
#df_pctile <- df_plot
ind_na <- which(is.na(df_pctile$Pctile))
thresh_up <- thresh_pctiles[2]
thresh_lo <- thresh_pctiles[1]
#---
indUp <- which(df_pctile$Pctile > thresh_up)
breaks_indUp <- which(diff(indUp) != 1)
breaks_indUp <- c(breaks_indUp, length(indUp))
n_runs <- length(breaks_indUp)
this_break <- 0
# df_pctile$`T-steps above thresh` <- NA
df_pctile$`T-steps above thresh` <- 0
df_pctile$`T-steps above thresh`[ind_na] <- NA
for(i in 1:n_runs){
indStart_indUp <- this_break + 1
this_break <- breaks_indUp[i]
ind_thisRun <- indUp[indStart_indUp:this_break]
numTstepsOverThresh <- 1:length(ind_thisRun)
df_pctile$`T-steps above thresh`[ind_thisRun] <- numTstepsOverThresh
}
indLo <- which(df_pctile$Pctile < thresh_lo)
breaks_indLo <- which(diff(indLo) != 1)
breaks_indLo <- c(breaks_indLo, length(indLo))
n_runs <- length(breaks_indLo)
this_break <- 0
# df_pctile$`T-steps below thresh` <- NA
df_pctile$`T-steps below thresh` <- 0
df_pctile$`T-steps below thresh`[ind_na] <- NA
for(i in 1:n_runs){
indStart_indLo <- this_break + 1
this_break <- breaks_indLo[i]
ind_thisRun <- indLo[indStart_indLo:this_break]
numTstepsOverThresh <- 1:length(ind_thisRun)
df_pctile$`T-steps below thresh`[ind_thisRun] <- numTstepsOverThresh
}
return(df_pctile)
}
#-----------------------------------------------------------------------------
# Visual inspection function
visuallyInspect <- function(df_plot, n_cols = 5){
df_plot$date_chr <- as.character(df_plot$date)
my_breaks <- df_plot$date_chr[seq.int(1, length(df_plot$date_chr), length.out = 5)]
gg <- ggplot(df_plot, aes(x = date_chr, y = Value, group = 1))
gg <- gg + geom_line()
gg <- gg + scale_x_discrete(breaks = my_breaks)
gg <- gg + facet_wrap(~Item, ncol = n_cols, scales = "free_y")
gg <- gg + theme(axis.text.x = element_text(angle = 60, hjust = 1))
print(gg)
}
#-----------------------------------------------------------------------------
# Get principal components loadings function
get_S_and_corrXS <- function(mat_X_in){
# mat_P = eigenvectors of the data correlation matrix
# mat_G = corresponding eigenvalues
mat_X_centered <- scale(mat_X_in, scale = F)
# out_svd <- svd(mat_X_centered)
# sing_values <- out_svd$d
# n_obs <- nrow(mat_X_centered)
# eig_values <- sing_values^2 / (n_obs - 1)
# mat_P <- out_svd$v
mat_X_in[which(is.na(mat_X_in[, 15])), 15]
mat_P <- eigen(cov(mat_X_in))$vectors
if(mean(mat_P[, 1]) < 0){mat_P <- -mat_P}
eig_values <- eigen(cov(mat_X_in))$values
mat_G <- diag(eig_values)
#mat_P_sigs <- mat_P[, 1:n_signals]
# eig_values[1:n_signals] / eigen(cov(mat_X_centered))$values[1:n_signals] #check
# mat_P / eigen(cov(mat_X_in))$vectors #check
#mat_G <- diag(eig_values)
#mat_G_sigs <- matU[, 1:n_signals]
#---------------------------------------------
sd_X <- apply(mat_X_in, 2, sd)
D_sdX_inv <- diag(1 / sd_X)
cormat_XS <- D_sdX_inv %*% mat_P %*% sqrt(mat_G)
row.names(cormat_XS) <- colnames(mat_X_in)
mat_L <- cormat_XS
#mat_L <- diag(1 / apply(mat_X_in, 2, sd)) %*% mat_P %*% sqrt(mat_G)
#---------------------------------------------------------
# Set sign of eigenvectors such that signals best conform to their most highly correlated items
# First have to get average of highest correlated items for each signal
corrThresh <- 0.55
n_items <- ncol(mat_L)
list_X_hiCorr_avg <- list()
for(i in 1:n_items){
this_loadvec <- mat_L[, i]
ind_tracks <- which(abs(this_loadvec) >= corrThresh)
if(length(ind_tracks) == 0){
ind_tracks <- which(abs(this_loadvec) == max(abs(this_loadvec)))
}
if(length(ind_tracks) == 1){
list_X_hiCorr_avg[[i]] <- mat_X_centered[, ind_tracks]
}else{
loadvec_kept <- this_loadvec[ind_tracks]
list_X_hiCorr_avg[[i]] <- rowMeans(mat_X_centered[, ind_tracks])
}
}
mat_X_hiCorr_avg <- do.call(cbind, list_X_hiCorr_avg)
mat_S_all <- mat_X_centered %*% mat_P
#mat_S_all <- mat_X_in %*% mat_P
for(i in 1:n_items){
this_S <- mat_S_all[, i]
this_X_hiCorr_avg <- mat_X_hiCorr_avg[, i]
mse <- mean((this_S - this_X_hiCorr_avg)^2)
mse_neg <- mean((-this_S - this_X_hiCorr_avg)^2)
if(mse_neg < mse){
mat_P[, i] <- -mat_P[, i]
}
}
cormat_XS <- D_sdX_inv %*% mat_P %*% sqrt(mat_G)
row.names(cormat_XS) <- colnames(mat_X_in)
mat_L <- cormat_XS
mat_S_all <- mat_X_centered %*% mat_P
#---------------------------------------------
# res <- FactoMineR::PCA(mat_pctDiff_in, scale.unit = F, ncp = ncol(mat_pctDiff_in), graph = F)
# mat_L_FactoMiner <- res$var$coord
# mat_L / mat_L_FactoMiner
list_out <- list(mat_S_all, cormat_XS, eig_values, mat_P)
return(list_out)
}
#-----------------------------------------------------------------------------
# Function to plot signal-item correlations (loadings)
plot_corrXS_barchart <- function(mat_L, group_info = NULL, xAxis_title = NULL, sigNames = NULL){
n_signals <- ncol(mat_L)
df_plot <- data.frame(Item = row.names(mat_L), mat_L)
df_plot$Item <- as.character(df_plot$Item)
#-------------------------------------------------------
if(is.null(sigNames)){
signal_id <- paste("Signal", 1:n_signals)
}else{
#signal_id <- paste("Signal", 1:n_signals, "\n", sigNames)
signal_id <- sigNames
}
colnames(df_plot)[2:(n_signals + 1)] <- signal_id
#-------------------------------------------------------
gathercols <- as.character(signal_id)
df_plot <- gather_(df_plot, "Signal", "Correlation", gathercols)
df_plot <- transform(df_plot,
Signal = factor(Signal, levels = gathercols))
if(!is.null(group_info)){
outlist <- group_fn(group_info)
cols_ordered_by_group <- outlist[[1]]
group_color_vec <- outlist[[2]]
group_vec_ordered <- outlist[[3]]
df_match_group <- data.frame(Item = cols_ordered_by_group, Group = group_vec_ordered)
df_plot <- merge(df_plot, df_match_group, by = "Item")
df_plot <- df_plot[order(df_plot$Group), ]
df_plot$Item <- factor(df_plot$Item, levels = unique(df_plot$Item))
gg <- ggplot(df_plot, aes(x = Item, y = Correlation, fill = Group))
gg <- gg + scale_fill_manual(values = unique(group_color_vec))
}else{
df_plot$Item <- factor(df_plot$Item,
levels = rev(unique(df_plot$Item)))
gg <- ggplot(df_plot, aes(x = Item, y = Correlation))
}
gg <- gg + geom_bar(stat = "identity", color = "black", position = "dodge")
gg <- gg + ylim(limits = c(-1, 1))
gg <- gg + facet_wrap(~ Signal, nrow = 1)
if(!is.null(xAxis_title)){
gg <- gg + labs(y = xAxis_title)
}
gg <- gg + theme(axis.text = element_text(size = 7),
axis.title.x = element_text(size = 7),
axis.title.y = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size = 7),
strip.text = element_text(size = 7))
gg <- gg + coord_equal()
gg <- gg + coord_flip()
gg
}
#-----------------------------------------------------------------------------
# Define function to order data by group
group_fn <- function(group_info){
list_groups <- group_info[[1]]
group_names <- group_info[[2]]
group_colors <- group_info[[3]]
varNames_ordered <- do.call(c, list_groups)
n_groups <- length(group_names)
n_items <- length(varNames_ordered)
if(is.na(group_colors)){
bag_of_colors <- randomcoloR::distinctColorPalette(k = 5 * n_groups)
group_colors <- sample(bag_of_colors, n_groups)
#group_colors <- viridis::viridis_pal(option = "D")(length(group_names))
}
#if(reverse_order){group_colors <- rev(group_colors)}
#varNames_ordered <- colnames(mat_pctDiff)
group_vec <- rep(NA, n_items)
group_color_vec <- rep(NA, n_items)
for(i in 1:n_groups){
this_group_vec <- list_groups[[i]]
this_group_name <- group_names[i]
this_group_color <- group_colors[i]
group_vec[which(varNames_ordered %in% this_group_vec)] <- this_group_name
group_color_vec[which(varNames_ordered %in% this_group_vec)] <- this_group_color
}
ind_ordered_cols <- order(factor(group_vec))
cols_ordered_by_group <- as.character(varNames_ordered[ind_ordered_cols])
group_color_vec <- group_color_vec[ind_ordered_cols]
group_vec_ordered <- group_vec[ind_ordered_cols]
out_list <- list(cols_ordered_by_group, group_color_vec, group_vec_ordered, ind_ordered_cols, group_vec)
return(out_list)
}
#=============================================================================
#=============================================================================
# End definition of functions
#=============================================================================
#=============================================================================
this_filename <- "fxFutData.csv"
this_filepath <- paste0(this_folder, this_filename)
df_raw <- read.csv(this_filepath, stringsAsFactors = F)
df_raw <- subset(df_raw, Item != "XBI")
df_raw$date <- as.POSIXct(df_raw$date, format = "%Y-%m-%d")
df_raw <- df_raw %>% group_by(Item, Element) %>%
tq_transmute(select = Value,
mutate_fun = apply.weekly,
FUN = mean)
df_p <- subset(df_raw, Element == "p")
df_vol <- subset(df_raw, Element == "Volume")
df_dif <- subset(df_raw, Element == "diffHiLo")
df_p$Element <- NULL
df_vol$Element <- NULL
df_dif$Element <- NULL
#--------------------------------------------------------------------------------
# Detrend price
df_dt <- df_p %>% spread(Item, Value)
df_dt[, -1] <- as.data.frame(na.approx(df_dt[, -1]))
indNA <- which(is.na(df_dt$`CC=F`))
df_dt <- df_dt[-indNA, ]
per_ema_for_detrend <- 21
df_dt[, -1] <- as.data.frame(apply(df_dt[, -1], 2, EMA, per_ema_for_detrend))
df_dt <- df_dt[-c(1:(per_ema_for_detrend - 1)), ]
gathercols <- colnames(df_dt)[-1]
df_dt <- df_dt %>% gather_("Item", "ema", gathercols)
df_dt <- merge(df_dt, df_p, by = c("date", "Item"))
df_dt$Value <- df_dt$Value - df_dt$ema
df_dt$ema <- NULL
#--------------------------------------------------------------------------------
# Visually inspect
# Price
visuallyInspect(df_p, n_cols = 6)
# Price detrended
visuallyInspect(df_dt, n_cols = 6)
# High-low difference
visuallyInspect(df_dif, n_cols = 6)
# Volume
visuallyInspect(df_vol, n_cols = 4)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# Get trends
df <- subset(df_p, Item == "usd/eur")
df$date_chr <- as.character(df$date)
df <- df[, c("date", "date_chr", "Value")]
colnames(df)[ncol(df)] <- "p"
per_ema_for_detrend <- 21
df$ema <- EMA(df$p, per_ema_for_detrend)
df$dt <- df$p - df$ema
indList <- getTsTrendInds(df, thresh_pct_uptrend = 0.7, thresh_pct_dntrend = -0.7)
df_up <- indList[[1]]
df_down <- indList[[2]]
these_cols <- c("Start date_chr", "Stop date_chr", "Start date", "Stop date",
"Start p", "Stop p", "Pct. Change", "Duration", "Pct. Change/Time", "False trend")
df_up <- subset(df_up[, these_cols], `False trend` == 0)
these_cols <- c("Start date_chr", "Stop date_chr", "Start date", "Stop date",
"Start p", "Stop p", "Pct. Change", "Duration", "Pct. Change/Time", "False trend")
df_down <- subset(df_down[, these_cols], `False trend` == 0)
#-----------------------------------------------------------------------------
# Consolidate trends
thresh_timeBetwn <- 21
df_down <- consolidateTrnds(df_down, thresh_timeBetwn, show_seqs = F)
df_up <- consolidateTrnds(df_up, thresh_timeBetwn, show_seqs = F)
df_down <- subset(df_down, `Pct. Change` <= -2)
df_up <- subset(df_up, `Pct. Change` >= 2)
#-----------------------------------------------------------------------------
# Visually inspect trend capture
# hist(df_up$`Pct. Change`)
# hist(df_down$`Pct. Change`)
# hist(df_down$`Pct. Change/Time`)
# hist(df_up$`Pct. Change/Time`)
df_plot <- df[, c("date", "date_chr", "p", "ema")]
colnames(df_plot)[ncol(df_plot)] <- paste("ema", per_ema_for_detrend)
gathercols <- colnames(df_plot)[3:ncol(df_plot)]
df_plot <- df_plot %>% gather_("Type", "p", gathercols)
# Evenly spaced breaks
my_breaks <- df_plot$date_chr[seq.int(1, length(df_plot$date_chr), length.out = 30)]
gg <- ggplot()
# gg <- gg + geom_rect(data = df_upTrnds, aes(xmin = `Start date`, xmax = `Stop date`,
# ymin = -Inf, ymax = Inf, fill = factor(`True uptrend`)), alpha = 0.7)
# gg <- gg + scale_fill_manual(values = c("magenta", "green"))
gg <- gg + geom_rect(data = df_up, aes(xmin = `Start date_chr`, xmax = `Stop date_chr`,
ymin = -Inf, ymax = Inf, fill = `Pct. Change`), alpha = 0.7)
gg <- gg + geom_rect(data = df_down, aes(xmin = `Start date_chr`, xmax = `Stop date_chr`,
ymin = -Inf, ymax = Inf, fill = `Pct. Change`), alpha = 0.7)
gg <- gg + scale_fill_gradient2(low = "darkmagenta", mid = "khaki2", high = "green")
gg <- gg + geom_line(data = df_plot, aes(x = date_chr, y = p, group = Type, color = Type))
gg <- gg + scale_x_discrete(breaks = my_breaks)
gg <- gg + theme(legend.position = "none",
axis.title.x = element_blank(),
axis.text.x = element_blank(),
legend.title = element_blank())
gg <- gg + guides(fill = guide_colorbar(title.position="top",
title.hjust = 0.5),
color = "none")
gg_p <- gg
#-----------------------------------------------------------------------------
rollWind <- 144
df$Pctile <- rollapply(df$p, rollWind, pctileFun, fill = NA, align = "right")
df_pctile <- df[, c("date", "date_chr", "Pctile")]
thresh_pctiles <- c(0.05, 0.95)
df_pctileDays <- daysAbovePctile(df_pctile, thresh_pctiles)
df_pctileDays$Pctile <- NULL
#df_pctileDays <- df_pctileDays %>% gather(Item, Value, `T-steps above thresh`:`T-steps below thresh`)
df_pctileDays$`Pctile days above` <- rollapply(df_pctileDays$`T-steps above thresh`, rollWind, pctileFun, fill = NA, align = "right")
df_pctileDays$`Pctile days below` <- rollapply(df_pctileDays$`T-steps below thresh`, rollWind, pctileFun, fill = NA, align = "right")
df_pctileDays$`T-steps above thresh` <- NULL
df_pctileDays$`T-steps below thresh` <- NULL
#df_pctileDays <- df_pctileDays %>% gather(Item, Value, `Pctile days above`:`Pctile days below`)
df_pctileDays$pctileDiff <- df_pctileDays$`Pctile days below` - df_pctileDays$`Pctile days above`
df_plot <- df_pctile
gg <- ggplot()
gg <- gg + geom_rect(data = df_up, aes(xmin = `Start date_chr`, xmax = `Stop date_chr`,
ymin = -Inf, ymax = Inf, fill = `Pct. Change`), alpha = 0.7)
gg <- gg + geom_rect(data = df_down, aes(xmin = `Start date_chr`, xmax = `Stop date_chr`,
ymin = -Inf, ymax = Inf, fill = `Pct. Change`), alpha = 0.7)
gg <- gg + scale_fill_gradient2(low = "darkmagenta", mid = "khaki2", high = "green",
guide = "none")
gg <- gg + geom_line(data = df_plot, aes(x = date_chr, y = Pctile, group = 1))
gg <- gg + scale_x_discrete(breaks = my_breaks)
gg <- gg + theme(axis.text.x = element_text(angle = 60, hjust = 1),
axis.title.x = element_blank(),
legend.position = "none")
gg_pctile <- gg
df_plot <- df_pctileDays
gg <- ggplot()
gg <- gg + geom_rect(data = df_up, aes(xmin = `Start date_chr`, xmax = `Stop date_chr`,
ymin = -Inf, ymax = Inf, fill = `Pct. Change`), alpha = 0.7)
gg <- gg + geom_rect(data = df_down, aes(xmin = `Start date_chr`, xmax = `Stop date_chr`,
ymin = -Inf, ymax = Inf, fill = `Pct. Change`), alpha = 0.7)
gg <- gg + scale_fill_gradient2(low = "darkmagenta", mid = "khaki2", high = "green",
guide = "none")
gg <- gg + geom_line(data = df_plot, aes(x = date_chr, y = pctileDiff, group = 1))#,
#y = Value, group = Item, color = Item))
gg <- gg + geom_hline(yintercept = 0, color = "red")
gg <- gg + scale_color_manual(values = c("orange", "black"))
gg <- gg + scale_x_discrete(breaks = my_breaks)
gg <- gg + theme(axis.text.x = element_blank(),
axis.title.x = element_blank(),
legend.title = element_blank())
gg_pctileDays <- gg
#-----------------------------------------------------------------------------
gg_p + gg_pctileDays + gg_pctile +
plot_layout(ncol = 1, heights = c(1, 1 / 3, 1 / 3), guides = "collect") &
theme(legend.position = "top")
#=============================================================================
rm(df_pctile, df_pctileDays)
#=============================================================================
#=============================================================================
# Calculate price percentile oscillator for all
rollWind <- 144
df_pctl <- df_p
df_pctl$Element <- NULL
df_pctl <- df_pctl %>% group_by(Item) %>%
mutate(Value = rollapply(Value, rollWind, pctileFun, fill = NA, align = "right")) %>%
as.data.frame()
#-----------------------------------------------------------------------------
# Calculate days above/below threshold percentile oscillator for all
df_pctlDays <- df_pctl
df_pctlDays$Element <- NULL
colnames(df_pctlDays)[ncol(df_pctlDays)] <- "Pctile"
thresh_pctiles <- c(0.05, 0.95)
item_vec <- unique(df_pctlDays$Item); n_items <- length(item_vec)
list_df <- list()
for(i in 1:n_items){
list_df[[i]] <- subset(df_pctlDays, Item == item_vec[i])
}
list_out <- lapply(list_df, daysAbovePctile, thresh_pctiles)
df_pctlDays <- as.data.frame(do.call(rbind, list_out))
df_pctlDays <- df_pctlDays %>% group_by(Item) %>%
mutate(`Pctile days above` = rollapply(`T-steps above thresh`, rollWind, pctileFun, fill = NA, align = "right")) %>%
as.data.frame()
df_pctlDays <- df_pctlDays %>% group_by(Item) %>%
mutate(`Pctile days below` = rollapply(`T-steps below thresh`, rollWind, pctileFun, fill = NA, align = "right")) %>%
as.data.frame()
df_pctlDays$Pctile <- NULL
df_pctlDays$`T-steps above thresh` <- NULL
df_pctlDays$`T-steps below thresh` <- NULL
df_pctlDays$pctlDaysDiff <- df_pctlDays$`Pctile days below` - df_pctlDays$`Pctile days above`
df_pctlDaysAbove <- df_pctlDays[, c("date", "Item", "Pctile days above")]
df_pctlDaysBelow <- df_pctlDays[, c("date", "Item", "Pctile days below")]
df_pctlDaysDif <- df_pctlDays[, c("date", "Item", "pctlDaysDiff")]
colnames(df_pctlDaysAbove)[3] <- "Value"
colnames(df_pctlDaysBelow)[3] <- "Value"
colnames(df_pctlDaysDif)[3] <- "Value"
#=============================================================================
# PCA
df_wide <- df_pctlDaysDiff %>% spread(Item, Value)
rows_rm <- 1:(rollWind - 1)
mat_X_in <- na.approx(df_wide[-rows_rm, -c(1)])
ind_rm <- which(is.na(mat_X_in[, 2]))
mat_X_in <- mat_X_in[-ind_rm, ]
# o <- apply(mat_X_in, 2, function(x) sum(is.na(x)))
# table(o)
list_out <- get_S_and_corrXS(mat_X_in)
mat_L <- list_out[[2]]
n_sigs <- 4
mat_L <- mat_L[, 1:n_sigs]
fx_vec <- item_vec[grep("/", unique(df_p$Item))]
us_vec <- c("ES=F", "ZB=F", "ZN=F", "ZF=F",
"XLF", "XLI", "XLK", "IYR")
commod_vec <- c("GC=F", "NG=F", "CL=F", "CT=F", "KC=F", "CC=F", "SB=F")
list_groups <- list(fx_vec, us_vec, commod_vec)
group_names <- c("FX pairs", "US stocks\n& bonds", "Commodities")
n_groups <- length(list_groups)
bag_of_colors <- randomcoloR::distinctColorPalette(k = 5 * n_groups)
group_colors <- sample(bag_of_colors, n_groups)
group_info <- list(list_groups, group_names, group_colors)
plot_corrXS_barchart(mat_L, group_info = group_info)
# Varimax rotated loadings
mat_Lrot <- varimax(mat_L)[[1]]
mat_Lrot <- matrix(as.numeric(mat_Lrot),
attributes(mat_Lrot)$dim,
dimnames = attributes(mat_Lrot)$dimnames)
mat_R <- varimax(mat_L)[[2]]
mat_R <- matrix(as.numeric(mat_R),
attributes(mat_R)$dim,
dimnames = attributes(mat_R)$dimnames)
xAxis_title <- "Varimax Rotated Correlation"
plot_corrXS_barchart(mat_Lrot, group_info = group_info, xAxis_title, sigNames = NULL)
#=============================================================================
#=============================================================================
# Train ML model
#=============================================================================
#=============================================================================
df_upStart <- data.frame(date = df_up$`Start date`, y = "Buy")
df_upStop <- data.frame(date = df_up$`Stop date`, y = "Sell")
df_yUp <- as.data.frame(rbind(df_upStart, df_upStop))
df_yUp <- df_yUp[order(df_yUp$date), ]
df_yUp$y <- as.character(df_yUp$y)
df_dnStart <- data.frame(date = df_down$`Start date`, yDn = "Sell")
df_dnStop <- data.frame(date = df_down$`Stop date`, yDn = "Buy")
df_yDn <- as.data.frame(rbind(df_dnStart, df_dnStop))
df_yDn$yDn <- as.character(df_yDn$yDn)
df_yDn <- df_yDn[order(df_yDn$date), ]
#---
df_modP <- df_p %>% spread(Item, Value)
df_modP[, -1] <- na.approx(df_modP[, -1])
apply(df_modP[, -1], 2, function(x) sum(is.na(x)))
ind_rm <- which(is.na(df_modP[, which(colnames(df_modP) == "CC=F")]))
df_modP <- df_modP[-ind_rm, ]
colnames(df_modP)[-1] <- paste(colnames(df_modP)[-1], "p")
#-----------------------------------------------------------------------------
# Check to make sure the dependent var lines up with up and down trends
df_check <- plyr::join_all(list(df_modP, df_yUp, df_yDn), by = "date")
#df_check[which(!is.na(df_check$y) & !is.na(df_check$yDn)), ]
ind_dn <- which(!is.na(df_check$yDn))
df_check$y[ind_dn] <- df_check$yDn[ind_dn]
df_check$yDn <- NULL
df_plot <- subset(df_check[, c(1, which(colnames(df_check) == "usd/eur"))])
df_plot$date <- as.character(df_plot$date)
colnames(df_plot)[2] <- "p"
my_breaks <- df_plot$date[seq.int(1, length(df_plot$date), length.out = 20)]
gg <- ggplot()
gg <- gg + geom_rect(data = df_up, aes(xmin = `Start date_chr`, xmax = `Stop date_chr`,
ymin = -Inf, ymax = Inf, fill = `Pct. Change`), alpha = 0.7)
gg <- gg + geom_rect(data = df_down, aes(xmin = `Start date_chr`, xmax = `Stop date_chr`,
ymin = -Inf, ymax = Inf, fill = `Pct. Change`), alpha = 0.7)
gg <- gg + scale_fill_gradient2(low = "darkmagenta", mid = "khaki2", high = "green")
gg <- gg + geom_line(data = df_plot, aes(x = date, y = p, group = 1))
gg <- gg + scale_x_discrete(breaks = my_breaks)
gg <- gg + theme(legend.position = "none",
axis.title.x = element_blank(),
axis.text.x = element_text(angle = 60, hjust = 1),
legend.title = element_blank())
gg <- gg + guides(fill = guide_colorbar(title.position="top",
title.hjust = 0.5),
color = "none")
#---
df_plot <- subset(df_mod[, c(1, which(colnames(df_mod) %in% c("usd/eur", "y")))])
df_plot$date <- as.character(df_plot$date)
colnames(df_plot)[2] <- "p"
df_plot$colr <- NA
df_plot$colr[which(df_plot$y == "Buy")] <- "green"
df_plot$colr[which(df_plot$y == "Sell")] <- "red"
ind <- which(!is.na(df_plot$y))
df_plot$y[ind] <- df_plot$p[ind]
df_plot$y <- as.numeric(df_plot$y)
gg <- gg + geom_point(data = df_plot, aes(x = date, y = y, color = colr), size = 2)
gg <- gg + scale_color_manual(values = c("green", "red"))
gg
#-----------------------------------------------------------------------------
# Get feature datasets set up for input into model
df_pctlVol <- df_vol
df_pctlVol$Element <- NULL
df_pctlVol <- df_pctlVol %>% group_by(Item) %>%
mutate(Value = rollapply(Value, rollWind, pctileFun, fill = NA, align = "right")) %>%
as.data.frame()
df_modVol <- df_pctlVol %>% spread(Item, Value)
ind_rm <- 1:(rollWind - 1)
df_modVol <- df_modVol[-ind_rm, ]
df_modVol[, -1] <- as.data.frame(na.approx(df_modVol[, -1], na.rm = F))
#df_modVol[, -1] <- log(df_modVol[, -1])
apply(df_modVol[, -1], 2, function(x) sum(is.na(x)))
ind_rm <- which(is.na(df_modVol[, which(colnames(df_modVol) == "CC=F")]))
df_modVol <- df_modVol[-ind_rm, ]
colnames(df_modVol)[-1] <- paste(colnames(df_modVol)[-1], "vol")
#---
df_pctlDif <- df_dif
df_pctlDif$Element <- NULL
df_pctlDif <- df_pctlDif %>% group_by(Item) %>%
mutate(Value = rollapply(Value, rollWind, pctileFun, fill = NA, align = "right")) %>%
as.data.frame()
df_modDif <- df_pctlDif %>% spread(Item, Value)
ind_rm <- 1:(rollWind - 1)
df_modDif <- df_modDif[-ind_rm, ]
df_modDif[, -1] <- na.approx(df_modDif[, -1])
apply(df_modDif[, -1], 2, function(x) sum(is.na(x)))
ind_rm <- which(is.na(df_modDif[, which(colnames(df_modDif) == "CC=F")]))
df_modDif <- df_modDif[-ind_rm, ]
colnames(df_modDif)[-1] <- paste(colnames(df_modDif)[-1], "dif")
#---
df_modPctl <- df_pctl %>% spread(Item, Value)
ind_rm <- 1:(rollWind - 1)
df_modPctl <- df_modPctl[-ind_rm, ]
df_modPctl[, -1] <- na.approx(df_modPctl[, -1])
apply(df_modPctl[, -1], 2, function(x) sum(is.na(x)))
ind_rm <- which(is.na(df_modPctl[, which(colnames(df_modPctl) == "CC=F")]))
df_modPctl <- df_modPctl[-ind_rm, ]
colnames(df_modPctl)[-1] <- paste(colnames(df_modPctl)[-1], "pctl")
#---
df_modPctlDaysDif <- df_pctlDaysDif %>% spread(Item, Value)
ind_rm <- 1:(rollWind - 1)
df_modPctlDaysDif <- df_modPctlDaysDif[-ind_rm, ]
df_modPctlDaysDif[, -1] <- na.approx(df_modPctlDaysDif[, -1])
apply(df_modPctlDaysDif[, -1], 2, function(x) sum(is.na(x)))
ind_rm <- which(is.na(df_modPctlDaysDif[, which(colnames(df_modPctlDaysDif) == "CC=F")]))
df_modPctlDaysDif <- df_modPctlDaysDif[-ind_rm, ]
colnames(df_modPctlDaysDif)[-1] <- paste(colnames(df_modPctlDaysDif)[-1], "pctlDaysDif")
#---
df_modP[, -1] <- log(df_modP[, -1])
#---
#list_df <- list(df_modDif, df_modPctl, df_modPctlDaysDif, df_yUp, df_yDn)
#-----------------------------------------------------------------------------
# Which features to include
#list_df <- list(df_modP, df_modDif, df_modPctl, df_yUp, df_yDn)
list_df <- list(df_modPctl, df_modVol, df_modDif, df_modPctlDaysDif, df_yUp, df_yDn)
#-----------------------------------------------------------------------------
df_mod <- plyr::join_all(list_df, by = "date")
this_condition <- (length(grep(" p| vol| dif", colnames(df_mod))) != 0 &
length(grep(" pctl| pctlDaysDif| pctlDaysAbove| pctlDaysBelow", colnames(df_mod))))
if(this_condition){
ind_rm <- 1:(rollWind - 1)
df_mod <- df_mod[-ind_rm, ]
}
#df_mod[which(!is.na(df_mod$y) & !is.na(df_mod$yDn)), ]
ind_dn <- which(!is.na(df_mod$yDn))
df_mod$y[ind_dn] <- df_mod$yDn[ind_dn]
df_mod$yDn <- NULL
df_mod$y[which(is.na(df_mod$y))] <- "Hold"
#---
numNA_vec <- apply(df_mod[, -1], 2, function(x) sum(is.na(x)))
table(numNA_vec)
#df_mod$date[which(is.na(df_mod$`CL=F p`))]
df_mod[, -c(1, ncol(df_mod))] <- na.approx(df_mod[, -c(1, ncol(df_mod))])
#---
# How correlated are the features?
df_cor <- df_mod %>% select(-date, -y) %>% cor() %>% as.data.frame()
#corrplot::corrplot(method = 'circle',tl.pos='n')
#---
df_mod$y <- as.factor(df_mod$y)
df_mod$y <- relevel(df_mod$y, ref = "Hold")
contrasts(df_train$y)
trainDat_pctot <- .7
indtrain_beg <- 1
indtrain_end <- round(nrow(df_mod) * trainDat_pctot)
indtest_beg <- indtrain_end + 1
indtest_end <- nrow(df_mod)
indtrain <- indtrain_beg:indtrain_end
indtest <- indtest_beg:indtest_end
df_train <- df_mod[indtrain, ]
df_test <- df_mod[indtest, ]
# nrow(df_mod)
# nrow(df_train)
# nrow(df_test)
# Multinomial logistic regression using nnet package
#https://medium.com/@PAdhokshaja/using-anova-and-multinomial-logistic-regression-to-predict-human-activity-cd2101a5e8bf
df_train$date <- NULL
model <- nnet::multinom(y ~ ., data = df_train, maxit = 400)
#summary(model)
#library(AER)
#coeftest(model)
library(car)
Anova(model)
# df_broom <- as.data.frame(broom::tidy(model))
# df_broom[which(df_broom$p.value < 0.05), ]
#Prediction
mat_pred <- predict(model, df_test, type = "class")#type = "probs")
# ind_na <- which(is.na(mat_pred))
# ind_na
# mat_pred[ind_na] <- "Hold"
unique(mat_pred)
df_pred <- as.data.frame(mat_pred)
colnames(df_pred)[1] <- "yPred"
df_pred$yObs <- df_test$y
#misClasificError <- mean(df_pred$yPred != df_pred$yObs)
library(caret)
postResample(df_pred$yObs, df_pred$yPred)
# Confusion matrix
x <- confusionMatrix(df_pred$yPred, df_pred$yObs)
confmat <- x$table
confmat <- round(confmat %*% solve(diag(colSums(confmat))), 3)
confmat <- as.table(confmat)
colnames(confmat) <- rownames(confmat)
names(dimnames(confmat))[2] <- "Reference"
print(confmat)
print(x$table)
#class(confmat)
df_plot <- as.data.frame(confmat)
gg <- ggplot(df_plot) + geom_tile(aes(x = Prediction, y = Reference, fill = Freq))
gg <- gg + scale_fill_gradient(low = "orange", high = "cyan")
gg
#-----------------------------------------------------------------------------
# If binary model
# model <- glm(y ~., family = binomial(link = 'logit'), data = df_train)
# summary(model)
# anova(model, test="Chisq")
# fitted.prob <- predict(model, newdata = df_test, type = 'response')
# fitted.binry <- ifelse(fitted.prob > 0.6, 1, 0)
# df_compare <- data.frame(predicted_prob = fitted.prob, predicted_binry = fitted.binry, observed = df_test$y)
# df_compare$observed <- ifelse(df_test$y == "Start", 1, 0)
# misClasificError <- mean(df_compare$predicted_binry != df_compare$observed)
# print(paste('Accuracy', round(1 - misClasificError, 2)))
# misClasificError <- mean(abs(df_compare$observed - df_compare$predicted_prob))
# print(paste('Accuracy (prob)', round(misClasificError, 2)))
#-----------------------------------------------------------------------------
df_compare$Date <- df_ML_date[indtest]
df_plot <- fortify(xts_cp_mat[, this_ts_name])
colnames(df_plot) <- c("Date", "cp")
df_plot <- left_join(df_plot, df_compare)
indtest_cp <- c(which(df_plot$Date == df_compare$Date[1]):nrow(df_plot))
df_plot <- df_plot[indtest_cp,]
df_plot_up_true <- subset(df_upTrends, `False uptrend` == 0)
df_plot_up_false <- subset(df_upTrends, `False uptrend` == 1)
n_bins <- length(indtest_cp)
df_probGradient <- data.frame(xmin = df_plot$Date[-n_bins], xmax = df_plot$Date[-1])
df_probGradient$predProbs <- df_plot$predicted_prob[-1]
u <- df_probGradient$predProbs
df_probGradient$predProbs[which(is.na(u))] <- 0.5
gg <- ggplot()
gg <- gg + geom_rect(data = df_probGradient, aes(xmin = xmin, xmax = xmax,
ymin = -Inf, ymax = Inf, fill = predProbs), alpha = 0.7)
gg <- gg + scale_fill_gradient2(low = "darkmagenta", mid = "white", high = "green", midpoint = 0.5)
gg <- gg + geom_line(data = df_plot, aes(x = Date, y = cp))
gg <- gg + theme_bw()
gg <- gg + ggtitle("Backtest: Predicted start in green, false start in red. Shade = confidence.")
gg <- gg + theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none",
plot.title = element_text(hjust = 0.5))
gg
gg_predicted <- gg
indtest_trends <- which(df_upTrends$UpStartDate %in% df_plot$Date)
df_upTrends_test <- df_upTrends[indtest_trends,]
gg <- ggplot()
gg <- gg + geom_rect(data = df_upTrends_test, aes(xmin = UpStartDate, xmax = UpStopDate,
ymin = -Inf, ymax = Inf, fill = `Pct. Change/Time`), alpha = 0.7)
gg <- gg + scale_fill_gradient2(low = "darkmagenta", mid = "khaki2", high = "green")
gg <- gg + geom_line(data = df_plot, aes(x = Date, y = cp))
gg <- gg + scale_x_date(date_breaks = "1 month", date_labels = "%b-%Y")
gg <- gg + geom_vline(data = df_plot_up_false, aes(xintercept = UpStartDate), color = "darkmagenta", alpha = 0.4)
gg <- gg + geom_vline(data = df_plot_up_false, aes(xintercept = UpStopDate), color = "darkmagenta", alpha = 0.4)
#gg <- gg + geom_vline(data = df_compare, aes(xintercept = Date), color = "green", size = 0.5, linetype = "dotted")
# gg <- gg + geom_hline(data = df_plot_mean_line, aes(yintercept = mu_line), color = "blue")
# gg <- gg + geom_hline(data = df_plot_sd_lines1, aes(yintercept = sd_line), color = "orange")
# gg <- gg + geom_hline(data = df_plot_sd_lines2, aes(yintercept = sd_line), color = "orange")
gg <- gg + theme_bw()
gg <- gg + ggtitle("Uptrends marked in green, false trends in red. Shade = return intenstity")
gg <- gg + theme(axis.title.x = element_blank(),
# axis.text.x = element_blank(),
# axis.ticks.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none",
plot.title = element_text(hjust = 0.5))
gg
gg_observed <- gg
#library(gridExtra)
grid.arrange(gg_predicted, gg_observed)
|
991adc12b62ca84995ccd2e1515e5e32de6ac19b
|
01f73fa4fd26e494ee7bf64a1f0fb310e4159491
|
/map_plot.R
|
53ba51eb293c848ecb1d37adcc265368de39770a
|
[] |
no_license
|
xz2476/EDAV_Project_2
|
dd170b0504e1e27f58358d2c7f7c8fe1bd606f8d
|
892a944323007043fa547bea9f489169d472d208
|
refs/heads/master
| 2020-04-09T19:48:48.232574
| 2016-03-09T03:30:12
| 2016-03-09T03:30:12
| 52,036,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,907
|
r
|
map_plot.R
|
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#library(ggmap)
#library(mapproj)
#map <- get_map(zoom = 3)
#ggmap(map)
#library(rworldmap)
library(ggplot2)
flood_data = read.csv("GlobalFloodsRecord.csv")
#newmap <- getMap(resolution = "hi")
#plot(newmap)
#points(as.numeric(as.character(flood_data$Centroid.X)), as.numeric(as.character(flood_data$Centroid.Y)), col = "blue",cex = .6)
flood_data = flood_data[1:3419,]
flood_data$Centroid.X = as.numeric(as.character(flood_data$Centroid.X))
flood_data$Centroid.Y = as.numeric(as.character(flood_data$Centroid.Y))
flood_data$Magnitude..M... = as.numeric(as.character(flood_data$Magnitude..M...))
flood_data$Duration.in.Days = as.numeric(as.character(flood_data$Duration.in.Days))
flood_data$Severity..= as.numeric(as.character(flood_data$Severity..))
flood_data$Affected.sq.km = as.numeric(as.character(flood_data$Affected.sq.km))
colnames(flood_data)[19] = "Magnitude"
colnames(flood_data)[12] = "Duration"
colnames(flood_data)[17] = "Severity"
#mp <- NULL
#mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
#mp <- ggplot() + mapWorld
##Now Layer the cities on top
#mp <- mp + geom_point(aes(flood_data$Centroid.X, flood_data$Centroid.Y) ,color="blue", size=1)
#mp
attach(flood_data)
# four standers
mp <- NULL
mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp <- ggplot() + mapWorld
mp <- mp+ geom_point(aes(Centroid.X, Centroid.Y,size = Magnitude),shape=21,colour = "black",fill="cornsilk",alpha = 0.5)
mp <- mp + scale_size_area(breaks= c(2,3,4,5,6,7),trans = 'exp',max_size = 15)
mp2 <- NULL
mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp2 <- ggplot() + mapWorld
mp2 <- mp2+ geom_point(aes(Centroid.X, Centroid.Y,size = Duration),shape=21,colour = "black",fill="cornsilk",alpha = 0.5)
mp2 <- mp2 + scale_size_area(max_size=15)
mp3 <- NULL
mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp3 <- ggplot() + mapWorld
mp3 <- mp3+ geom_point(aes(Centroid.X, Centroid.Y,size = Severity),shape=21,colour = "black",fill="cornsilk",alpha = 0.5)
mp3 <- mp3 + scale_size_area(breaks= c(1,1.5,2),trans = 'exp',max_size = 8)
mp4 <- NULL
mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp4 <- ggplot() + mapWorld
mp4 <- mp4+ geom_point(aes(Centroid.X, Centroid.Y,size = Affected.sq.km),shape=21,colour = "black",fill="cornsilk",alpha = 0.5)
mp4 <- mp4 + scale_size_area(max_size=15)
multiplot(mp, mp2, mp3, mp4, cols=1)
# four seasons (1995-2015 (21 years))
# Spring: March - May
# Summer: June - Aug
# Autume: Sep - Nov
# Winter: Dec - Feb
Began = as.Date(Began,"%d-%b-%y")
Month = as.numeric(format(Began, "%m"))
detach(flood_data)
Spring = flood_data[(Month %in% c(3,4,5))[1:3393],19:21]
Summer = flood_data[(Month %in% c(6,7,8))[1:3393],19:21]
Autume = flood_data[(Month %in% c(9,10,11))[1:3393],19:21]
Winter = flood_data[(Month %in% c(12,1,2))[1:3393],19:21]
mp <- NULL
mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp <- ggplot() + mapWorld
mp <- mp+ geom_point(aes(Spring$Centroid.X, Spring$Centroid.Y,size = Spring$Magnitude),shape=21,colour = "black",fill="cornsilk",alpha = 0.5)
mp <- mp + scale_size_area(breaks= c(2,3,4,5,6,7),trans = 'exp',max_size = 15)
mp2 <- NULL
mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp2 <- ggplot() + mapWorld
mp2 <- mp2+ geom_point(aes(Summer$Centroid.X, Summer$Centroid.Y,size = Summer$Magnitude),shape=21,colour = "black",fill="cornsilk",alpha = 0.5)
mp2 <- mp2 + scale_size_area(breaks= c(2,3,4,5,6,7),trans = 'exp',max_size = 15)
mp3 <- NULL
mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp3 <- ggplot() + mapWorld
mp3 <- mp3+ geom_point(aes(Autume$Centroid.X, Autume$Centroid.Y,size = Autume$Magnitude),shape=21,colour = "black",fill="cornsilk",alpha = 0.5)
mp3 <- mp3 + scale_size_area(breaks= c(2,3,4,5,6,7),trans = 'exp',max_size = 15)
mp4 <- NULL
mapWorld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp4 <- ggplot() + mapWorld
mp4 <- mp4+ geom_point(aes(Winter$Centroid.X, Winter$Centroid.Y,size = Winter$Magnitude),shape=21,colour = "black",fill="cornsilk",alpha = 0.5)
mp4 <- mp4 + scale_size_area(breaks= c(2,3,4,5,6,7),trans = 'exp',max_size = 15)
multiplot(mp, mp2, mp3, mp4, cols=1)
# reasons
|
e579290096781f012aeda218a7c1e714e3702957
|
a45bdb32cd9b137bc8d6744186d997a80315deb4
|
/code/prepare_data/aggregate_VWC_data.R
|
8cffb9f16446bbf0e51a9d5e321a32b4a3d77c75
|
[] |
no_license
|
akleinhesselink/forecast-plants
|
afff9bd51bd19dd37aeaed020eb84d857195874f
|
272774addd8be03492f8fc095bd36a6089eaf522
|
refs/heads/master
| 2023-08-22T23:59:24.015321
| 2023-08-14T16:11:03
| 2023-08-14T16:11:03
| 181,106,169
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,453
|
r
|
aggregate_VWC_data.R
|
#######################################################################################
#
# Setup seasonal SoilWAT variables for demographic rate models
#
#######################################################################################
rm(list = ls())
library(tidyverse)
library(zoo)
# input ---------------------------------------------------- #
load('code/figure_scripts/my_plotting_theme.Rdata')
df <- read_csv('data/temp/daily_swVWC_treatments.csv')
# comes from the soilMoistureTreatmentEffects script
# output ---------------------------------------------------- #
seasonal_outfile <- 'data/temp/seasonal_VWC.csv'
annual_outfile <- 'data/temp/annual_VWC.csv'
monthly_outfile <- 'data/temp/monthly_avg.csv'
# make time periods --------------------------------------------------------------------
p1 <- data.frame( Period = 'Modern', year = 2007:2016)
p2 <- data.frame( Period = 'not monitored', year = 1958:2006)
p3 <- data.frame( Period = 'Historical', year = 1925:1957)
periods <- data.frame( rbind( p1, p2, p3 ))
# set-up aggregate seasonal variables for model ----------------------------------------#
df <-
df %>%
ungroup() %>%
mutate( year = year(date)) %>%
mutate( water_year = year + lag_year ) %>%
dplyr::select(year, month, season, season_label, precip_seasons, water_year, Treatment, date, VWC)
# ---------- annual soil moisture -------------------------------------------------#
annual_VWC <-
df %>%
group_by( year, Treatment ) %>%
summarise (avg_VWC = mean(VWC, na.rm = TRUE)) %>%
left_join(periods, by = 'year')
# ---------- seasonal soil moisture -----------------------------------------------#
seasonal_VWC <-
df %>%
mutate(year = ifelse(month == 12 , year + 1, year )) %>% # December of year x is in winter of year x + 1
group_by(year, season, Treatment) %>%
summarise( avg = mean(VWC, na.rm = TRUE) ) %>%
left_join(periods, by = 'year')
# ---------- seasonal soil moisture -----------------------------------------------#
monthly_avg <-
df %>%
filter( Treatment == 'Control') %>%
group_by( month, year ) %>%
summarise (avg_VWC = mean(VWC, na.rm = TRUE) ) %>%
left_join(periods, by = 'year')
# -------- output -----------------------------------------------------------------------------#
write_csv( seasonal_VWC, file = seasonal_outfile)
write_csv( annual_VWC, file = annual_outfile)
write_csv( monthly_avg, file = monthly_outfile)
|
ef32d1e7433ba3079285d6fedad6cef6f0ad8d86
|
f87d3c4960a5a6b835ab16b41827c804bc6bd9db
|
/man/simCounts.Rd
|
338d1482499b60c08c51aa58a0dfb36962107cbd
|
[] |
no_license
|
XimenezJP/CeTF
|
82442535eb2d76340084feb169f6e173ef17fda4
|
ea5f0273726856e3841ed93b3c196cd222de1e70
|
refs/heads/master
| 2020-12-23T22:52:42.218493
| 2020-01-27T11:31:13
| 2020-01-27T11:31:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 443
|
rd
|
simCounts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simCounts-data.R
\docType{data}
\name{simCounts}
\alias{simCounts}
\title{Simulated counts data}
\format{An dataframe.}
\usage{
data(simCounts)
}
\description{
Simulated counts data created using PROPER package.
This data contains 21,000 genes, 1,000 transcript factors
and 20 samples (divided in two conditions).
}
\examples{
data(simCounts)
}
\keyword{datasets}
|
7889f3d05f3c895e259a973d957b8a46a7f145ae
|
a4e972b81b458f5cccb3952484c13611d0236b0e
|
/findVerbs.R
|
db919f9651e9d540e05cdd53a7c8b8a9c4a6d01a
|
[] |
no_license
|
jeroenclaes/JCmisc
|
8576d7bfb70536c053b2527af54f893dd9a1b973
|
d6f1e5a8d261e44edf3258b93fff275b828808e7
|
refs/heads/master
| 2021-01-12T14:26:12.666371
| 2016-10-07T13:57:06
| 2016-10-07T13:57:06
| 70,060,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,621
|
r
|
findVerbs.R
|
#READ ME CAREFULLY!
#This script is released "as is" under the terms of the Creative Commons #Attribution-Non Commercial 2.0 Generic (CC BY-NC 2.0) license, without any #warranty. You may use them at your own risk, provided you respect the terms #of the license.
#https://creativecommons.org/licenses/by-nc/2.0/
#This script will read text files, parse them with the Stanford POS tagger, and extract/annotate all conjugated verbs.
#It was designed for use in an investigation on variable absence/presence of Spanish subject personal pronouns
#The script divides sentence units into clauses (conjugated verbs) and annotates these clause units for the following features: verb lemma, person and number, tense, mode, absence/presence of negation, clause number, number of words between verb and pronoun site, words between verb and pronoun site, and tense aspect.
#***Needless to say, the output requires careful manual post-editing.****
#To use this script:
#1. Download the **FULL** Stanford tagger from the Stanford NLP Group website (http://nlp.stanford.edu/software/stanford-postagger-full-2015-04-20.zip). You may also want to update the JAVA installation on your computer
#2. Unzip the Stanford tagger
#3. Remember where you unzipped the tagger :).
#. Run findVerbs(file.choose(), "path to your installation of the parser", "path to the installation of the parserModel")
#4.Find file.csv. This script uses ";" as field separator, if you're in a 'comma' csv region, do a search/replace to change 'write.csv2' to 'write.csv'
findVerbs<-function(file, parserPath, parserModel)
{
require("XML")
require("stringr")
require("foreach")
z=1
verbs=1
start.time<-proc.time()
corpus.df<<-data.frame(sentence.id=numeric(), verb.nr=numeric(), who=character(),before=character(), token=character(),after=character(),verb.lemma=character(), verb.person=character(), type=character(), tense=character(), number.words.before.verb=numeric(), words.before.verb=character(), negation=character(), mode=character(), aspect=character(), stringsAsFactors=FALSE)
discarded.df<<-data.frame(sentence=character(), sentence.id=numeric(), stringsAsFactors = FALSE)
system(paste0("java -cp ", parserPath, " edu.stanford.nlp.tagger.maxent.MaxentTagger -model ", parserModel," -textFile ", file," -outputFormat xml > ",getwd(),"/document.xml", sep="", collapse=""))
xmltop<-xmlRoot(xmlParse("document.xml"))
file.remove("document.xml")
total<-xmlSize(xmltop)
#loop through sentences
foreach (i=1:total) %do%
{
tryCatch({
vector.v<-c()
#loop through words in sentences
foreach (y= 1:xmlSize(xmltop[[i]])) %do%
{
if(grepl("vs|vmi|va|vms|dp|nc|da|pp|np|p0", xmlAttrs(xmltop[[i]][[y]])["pos"]))
{
v<-paste0(c(xmlValue(xmltop[[i]][[y]]), xmlAttrs(xmltop[[i]][[y]])["pos"]), collapse="/")
}
else
{
v<-xmlValue(xmltop[[i]][[y]])
}
append(vector.v, v)->vector.v
}
sentence<-trimws(paste(vector.v, collapse=" "))
if(grepl("RRB|LRB", sentence))
{
if(grepl("Aja.", sentence))
{
who<-"speaker"
}
else
{
who<-"interviewer"
}
}
else
{
who<-"speaker"
}
sentence<-gsub("-RRB-/vmis000", ")", sentence)
sentence<-gsub("/vmn0000", "", sentence)
sentence<-gsub("/vmp0000", "", sentence)
sentence<-gsub("/vap0000", "", sentence)
sentence<-gsub("o sea/vssp000", "o sea", sentence)
sentence<-gsub("O sea/vssp000", "O sea", sentence)
sentence<-gsub("/vsn0000", "", sentence)
sentence<-gsub("/vsp0000", "", sentence)
sentence<-gsub("es/vsip000 decir", "es decir", sentence)
sentence<-gsub("Es/vsip000 decir", "es decir", sentence)
sentence<-gsub(" -LRB-/np00000", "(", sentence)
sentence<-gsub("-RRB-/np00000", ")", sentence)
sentence<-gsub("-LRB-", "(", sentence)
sentence<-gsub("-RRB-", ")", sentence)
sentence<-gsub("( Risas/np00000 )/np00000","", sentence)
sentence<-gsub("( Aja/np00000 . )", "", sentence)
sentence<-gsub("Aja/np00000 .", "", sentence)
sentence<-gsub("()", "", sentence)
sentence<-gsub("acá/vmn0000", "", sentence)
sentence<-gsub("Acá/vmn0000", "", sentence)
count.v<-as.integer(str_count(sentence, "([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\w")[[1]])
if (count.v > 1)
{
instances<- str_locate_all(sentence, "\\b([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b")
total<-as.integer(str_count(sentence, "\\b([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b")[[1]])
foreach (x=1:total) %do%
{
start<-instances[[1]][x, "start"]
stop<-instances[[1]][x, "end"]
token<-trimws(substring(sentence,start, stop))
verbs+1->verbs
if(x==1)
{
clause<-trimws(substring(sentence, 0, instances[[1]][x+1, "start"]))
}
else if(x==total)
{
clause<-trimws(substring(sentence, instances[[1]][x-1, "end"]+1))
}
else
{
clause<-trimws(substring(sentence, instances[[1]][x-1, "end"], instances[[1]][x+1, "start"]))
}
if (grepl("\\b([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b[\\s]\\b([a-z]+(ado|ido))\\b", clause, perl=T))
{
token<-trimws(str_extract(clause, "\\b([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b[\\s]\\b([a-z]+(ado|ido))\\b"))
}
else
{
token<-trimws(str_extract(clause, "\\b([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b"))
}
if (grepl("((yo|tú|él|ella|usted|nosotros|vosotros|ustedes|ellos|ellas|uno)[/][a-z]+[0]+)\\b(([\\s](\\bno\\b|\\bnunca\\b)[\\s]\\b([a-z]+[/][a-z]+[0]+)\\b[\\s]\\b([a-z]+[/][a-z]+[0]+)\\b[\\s])|([\\s]\\b([a-z]+[/][a-z]+[0]+)\\b[\\s]\\b([a-z]+[/][a-z]+[0]+)\\b[\\s])|([\\s]\\b(no|nunca)\\b[\\s]([a-z]+[/][a-z]+[0]+)\\b[\\s])|([\\s]\\b(no|nunca)\\b[\\s])|([\\s]\\b[a-z]+[/][a-z]+[0]+)\\b[\\s]|[\\s])([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b", tolower(clause), perl=T))
{
type<-"preposed.pronoun"
}
else if (grepl("([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b[\\s](yo|tú|él|ella|usted|nosotros|vosotros|ustedes|ellos|ellas|uno)\\b", tolower(clause), perl=T))
{
type<-"postposed.pronoun"
}
else
{
type<-"zero"
}
tense<-trimws(gsub("[0]+","", str_extract(token, "([/][v][a-z]+[0]+)\\w"), perl=T))
tense<-gsub("[/]", "", tense, perl=T)
#human-readable tense labels & code for aspect
tense<-gsub("vmip", "present", tense)
tense<-gsub("vmii", "imperfect", tense)
tense<-gsub("vsip", "present", tense)
tense<-gsub("vsii", "imperfect", tense)
tense<-gsub("vmsp", "present.subjunctive", tense)
tense<-gsub("vssp", "present.subjunctive", tense)
tense<-gsub("vmic", "conditional", tense)
tense<-gsub("vsic", "conditional", tense)
tense<-gsub("vmis", "preterit", tense)
tense<-gsub("vsis", "preterit", tense)
tense<-gsub("vmsi", "past.subjunctive", tense)
tense<-gsub("vssi", "past.subjunctive", tense)
tense<-gsub("vsif", "morphological.future", tense)
tense<-gsub("vmif", "morphological.future", tense)
if ((str_count(token, " ") > 0) && (tense=="vaip"))
{
tense<-gsub("vaip", "perfect", tense)
}
else if ((str_count(token, " ") > 0) && (tense=="vaii"))
{
tense<-gsub("vaip", "pluperfect", tense)
}
else if ((str_count(token, " ") > 0) && (tense=="vaif"))
{
tense<-gsub("vaip", "future.perfect", tense)
aspect<-"perfective"
}
else if((any(grep("\\b([a-z]+[/][va][a-z]+[0]+)\\b[\\s]\\b[a-z]+", sentence, perl=T))) && (any(grep("va", tense))))
{
tense<-"existential.haber?"
aspect<-"NA"
}
if (any(grep("morphological.future|past.subjunctive|conditional|present.subjunctive|imperfect|present", tense)))
{
aspect<-"imperfective"
}
else if (any(grep("preterit|perfect", tense)))
{
aspect<-"perfective"
}
else
{
aspect<-"NA"
}
#extract verb domain, negation/pronouns before verb
verb.domain<- trimws(str_extract(gsub("((yo|tú|él|ella|usted|nosotros|vosotros|ustedes|ellos|ellas|uno)[/][a-u]+[0]+)\\w", "", tolower(clause), perl=T), "(((\\bno\\b|\\bnunca\\b)[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b)|(\\b([a-z]+[/][a-u]+[0]+)\\b[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b)|((\\bno\\b|\\bnunca\\b)[\\s]([a-z]+[/][a-u]+[0]+)\\b)|((\\bno\\b|\\bnunca\\b))|(\\b[a-z]+[/][a-u]+[0]+))\\b[\\s]([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b"))
verb.domain.no.verb<-trimws(str_extract(tolower(verb.domain), "(((\\bno\\b|\\bnunca\\b)[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b)|(\\b([a-z]+[/][a-u]+[0]+)\\b[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b)|((\\bno\\b|\\bnunca\\b)[\\s]([a-z]+[/][a-u]+[0]+)\\b)|((\\bno\\b|\\bnunca\\b))|(\\b[a-z]+[/][a-u]+[0]+))\\b"))
words<-str_count(trimws(tolower(verb.domain.no.verb)), "\\b[a-z]+\\b")
#coding for negation
negation<-grepl("(\\bno\\b|\\bnunca\\b)", tolower(verb.domain), perl=T)
if(isTRUE(negation))
{
negation<-"present"
}
else
{
negation<-"absent"
}
if((negation=="present") || (tense=="present.subjunctive")||(tense=="morphological.future")||(tense=="past.subjunctive")||(tense=="conditional"))
{
mode<-"irrealis"
}
else
{
mode<-"realis"
}
verb.domain.no.verb<-trimws(gsub("([/][a-z]+[0]+)", "", verb.domain.no.verb, perl=T))
token<-trimws(gsub("([/][a-z]+[0]+)", "", token, perl=T))
token.original<-token
token<-tolower(token)
#clean up text for readability
clause <-trimws(gsub("([/][a-z]+[0][a-z]+[0]+)", "",clause, perl=T))
clause <-trimws(gsub("([/][a-z]+[0]+)", "", clause, perl=T))
#split up clause
split<-strsplit(clause, token)
before<-trimws(split[[1]][1])
after<-trimws(split[[1]][2])
before<-gsub("()", "", before)
after<-gsub("()", "", after)
#lookup verb and annotate
verb.info<-lookup.verb(token)
verb.lemma<-verb.info[[1]]
verb.person<-verb.info[[2]]
#writing to dataframe
newrow.corpus<-data.frame(sentence.id=xmlAttrs(xmltop[[i]]),verb.nr=verbs,who=who, before=before, token=token, after=after, verb.lemma=verb.lemma, verb.person=verb.person, type=type, tense=tense, number.words.before.verb=words, words.before.verb=verb.domain.no.verb,negation=negation, mode=mode, aspect=aspect, stringsAsFactors=FALSE)
rbind(globalenv()$corpus.df, newrow.corpus)->>corpus.df
}
}
else if(count.v==1)
{
verbs+1->verbs
if (grepl("([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\w[\\s]([a-z]+(ado|ido))", tolower(sentence), perl=T))
{
token<-str_extract(tolower(sentence), "([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\w[\\s]([a-z]+(ado|ido))")
}
else
{
token<-trimws(str_extract(sentence, "([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\w"))
}
if (grepl("((yo|tú|él|ella|usted|nosotros|vosotros|ustedes|ellos|ellas|uno)[/][a-z]+[0]+)\\b(([\\s](\\bno\\b|\\bnunca\\b)[\\s]\\b([a-z]+[/][a-z]+[0]+)\\b[\\s]\\b([a-z]+[/][a-z]+[0]+)\\b[\\s])|([\\s]\\b([a-z]+[/][a-z]+[0]+)\\b[\\s]\\b([a-z]+[/][a-z]+[0]+)\\b[\\s])|([\\s](\\bno\\b|\\bnunca\\b)[\\s]([a-z]+[/][a-z]+[0]+)\\b[\\s])|([\\s](\\bno\\b|\\bnunca\\b)[\\s])|([\\s]\\b[a-z]+[/][a-z]+[0]+)\\b[\\s]|[\\s])([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b", tolower(sentence), perl=T))
{
type<-"preposed.pronoun"
}
else if (grepl("([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\w[\\s](yo|tú|él|ella|usted|nosotros|vosotros|ustedes|ellos|ellas|uno)\\w", tolower(sentence), perl=T))
{
type<-"postposed.pronoun"
}
else
{
type<-"zero"
}
#human-readable tense labels and code for aspect
tense<-trimws(gsub("[0]+","", str_extract(token, "([/][v][a-z]+[0]+)\\w"), perl=T))
tense<-gsub("[/]", "", tense, perl=T)
tense<-gsub("vmip", "present", tense)
tense<-gsub("vmii", "imperfect", tense)
tense<-gsub("vsip", "present", tense)
tense<-gsub("vsii", "imperfect", tense)
tense<-gsub("vmsp", "present.subjunctive", tense)
tense<-gsub("vssp", "present.subjunctive", tense)
tense<-gsub("vmic", "conditional", tense)
tense<-gsub("vsic", "conditional", tense)
tense<-gsub("vmis", "preterit", tense)
tense<-gsub("vsis", "preterit", tense)
tense<-gsub("vmsi", "past.subjunctive", tense)
tense<-gsub("vssi", "past.subjunctive", tense)
tense<-gsub("vsif", "morphological.future", tense)
tense<-gsub("vmif", "morphological.future", tense)
if ((str_count(token, " ") > 0) && (tense=="vaip"))
{
tense<-gsub("vaip", "perfect", tense)
}
else if ((str_count(token, " ") > 0) && (tense=="vaii"))
{
tense<-gsub("vaip", "pluperfect", tense)
}
else if ((str_count(token, " ") > 0) && (tense=="vaif"))
{
tense<-gsub("vaip", "future.perfect", tense)
aspect<-"perfective"
}
else if((any(grep("\\b([a-z]+[/][va][a-z]+[0]+)\\b[\\s]\\b[a-z]+", sentence, perl=T))) && (any(grep("va", tense))))
{
tense<-"existential.haber?"
aspect<-"NA"
}
if (any(grep("morphological.future|past.subjunctive|conditional|present.subjunctive|imperfect|present", tense)))
{
aspect<-"imperfective"
}
else if (any(grep("preterit|perfect", tense)))
{
aspect<-"perfective"
}
else
{
aspect<-"NA"
}
verb.domain<- trimws(str_extract(gsub("((yo|tú|él|ella|usted|nosotros|vosotros|ustedes|ellos|ellas|uno)[/][a-u]+[0]+)\\w", "", tolower(sentence), perl=T), "(((\\bno\\b|\\bnunca\\b)[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b)|(\\b([a-z]+[/][a-u]+[0]+)\\b[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b)|((\\bno\\b|\\bnunca\\b)[\\s]([a-z]+[/][a-u]+[0]+)\\b)|((\\bno\\b|\\bnunca\\b))|(\\b[a-z]+[/][a-u]+[0]+))\\b[\\s]([a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+[/][v][a-z]+[0]+)\\b"))
verb.domain.no.verb<-trimws(str_extract(tolower(verb.domain), "(((\\bno\\b|\\bnunca\\b)[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b)|(\\b([a-z]+[/][a-u]+[0]+)\\b[\\s]\\b([a-z]+[/][a-u]+[0]+)\\b)|((\\bno\\b|\\bnunca\\b)[\\s]([a-z]+[/][a-u]+[0]+)\\b)|((\\bno\\b|\\bnunca\\b))|(\\b[a-z]+[/][a-u]+[0]+))\\b"))
words<-str_count(trimws(tolower(verb.domain.no.verb)), "\\b[a-z]+\\b")
negation<-grepl("(no|nunca)", tolower(verb.domain), perl=T)
negation<-grepl("(\\bno\\b|\\bnunca\\b)", tolower(verb.domain), perl=T)
if(isTRUE(negation))
{
negation<-"present"
}
else
{
negation<-"absent"
}
if((negation=="present") || (tense=="present.subjunctive")||(tense=="morphological.future")||(tense=="past.subjunctive")||(tense=="conditional"))
{
mode<-"irrealis"
}
else
{
mode<-"realis"
}
#clean up text for readability
sentence <-trimws(gsub("([/][a-z]+[0][a-z]+[0]+)", "",sentence, perl=T))
sentence <-trimws(gsub("([/][a-z]+[0]+)", "", sentence, perl=T))
verb.domain.no.verb<-trimws(gsub("([/][a-z]+[0]+)", "", verb.domain.no.verb, perl=T))
token<-trimws(gsub("([/][a-z]+[0]+)", "", token, perl=T))
token.original<-token
token<-tolower(token)
#split up clause in before - after token
split<-strsplit(sentence, token.original)
before<-trimws(split[[1]][1])
after<-trimws(split[[1]][2])
before<-gsub("()", "", before)
after<-gsub("()", "", after)
#lookup verb person/number and infinitive and annotate
verb.info<-lookup.verb(token)
verb.lemma<-verb.info[[1]]
verb.person<-verb.info[[2]]
newrow.corpus<-data.frame(sentence.id=xmlAttrs(xmltop[[i]]),verb.nr=verbs,who=who, before=before, token=token, after=after, verb.lemma=verb.lemma, verb.person=verb.person, type=type, tense=tense, number.words.before.verb=words, words.before.verb=verb.domain.no.verb,negation=negation, mode=mode, aspect=aspect, stringsAsFactors=FALSE)
rbind(globalenv()$corpus.df, newrow.corpus)->>corpus.df
}
else
{
if(grepl("dp|nc|da|pp|np", sentence))
{
#clean up text for readability
token<-"NA"
tense<-"NA"
type<-"NPs-nouns-dps"
negation<-"NA"
words<-0
verb.domain.no.verb<-"NA"
mode<-"NA"
verb.lemma<-"NA"
verb.person<-"NA"
sentence <-gsub("([/][a-u]+[0][a-z]+[0]+)", "", sentence, perl=T)
sentence <-gsub("([/][a-u]+[0]+)", "", sentence, perl=T)
before<-sentence
after<-"NA"
aspect<-"NA"
newrow.corpus<-data.frame(sentence.id=xmlAttrs(xmltop[[i]]),verb.nr=verbs,who=who, before=before, token=token, after=after, verb.lemma=verb.lemma,verb.person=verb.person, type=type, tense=tense, number.words.before.verb=words, words.before.verb=verb.domain.no.verb,negation=negation, mode=mode, aspect=aspect, stringsAsFactors=FALSE)
rbind(globalenv()$corpus.df, newrow.corpus)->>corpus.df
}
else
{
newrow.discarded<-data.frame(sentence=sentence, sentence.id=xmlAttrs(xmltop[[i]]))
rbind(globalenv()$discarded.df, newrow.discarded)->>discarded.df
}
z+1->z
}
},
warning=function(w){print(as.character(w))},
error=function(e){print(as.character(e))} )
}
elapsed<-proc.time()-start.time
cat ("\n Searching the data took ", elapsed[[3]], " seconds \n")
cat("\n Skipped", z, "sentence units which did not contain content words\n")
cat("\n Call the variable 'discarded.df' to inspect them.\n")
write.csv2(corpus.df, file="file.csv", fileEncoding="iso-8859-1")
}
lookup.verb<-function(token)
{
token<<-token
database<-read.csv2("/Users/jeroenclaes/Dropbox/Public/Google Drive/PostDoc/Methods/verbInfo.csv", header=T, row.names=1, fileEncoding="iso-8859-1", stringsAsFactors=F)
lemma.result<-"NA"
conjugation<-"NA"
#let's see if we have the token already
tryCatch({
if(any(which(database[,"token"]==as.character(trimws(token)))))
{
lemma.result<-as.character(database[which(database[,"token"]==as.character(trimws(token)))[1], "verb.lemma"])
conjugation<-as.character(database[which(database[,"token"]==as.character(trimws(token)))[1], "verb.person"])
}
else
{
require("RCurl")
require("stringr")
url<- paste(
"http://cartago.lllf.uam.es/grampal/grampal.cgi?m=etiqueta&e=",
gsub(" ", "%20", trimws(token)), sep = "" )
page<- getURL(url = url,header = F, .encoding="UTF-8")
conjugation<-str_extract_all(page, "(singular|plural) [1-6]", T)
lemma<-str_extract_all(page, "lema <b>[a-zA-Z\\xE1\\xE9\\xED\\xF3\\xFA\\xC1\\xC9\\xCD\\xD3\\xDA\\xF1\\xD1]+</b>", T)
category<-str_extract(page, "categoría <b>[A-Z]+</b>")
category<-sub("categoría <b>", "", category)
category<-sub("</b>", "", trimws(category))
if(any(grep("(lema <b>UNKN</b>)|(rasgos <b> gerundio </b>)|(rasgos <b> infinitivo </b>)", page, perl=T)))
{
conjugation<-"NA"
lemma.result<-"NA"
}
else if ((any(grep("[A-U]+", category, perl=T))&&(!any(grep("AUX", category, perl=T)))))
{
conjugation<-"NA"
lemma.result<-"NA"
}
else
{
conjugation<-gsub(" ", " ", conjugation[1,1])
conjugation<-trimws(sub(" ", " ", conjugation))
conjugation<-gsub(" ", ".", conjugation)
if(ncol(lemma)>1)
{
lemma.result<-gsub("lema <b>", "", lemma[1,2])
lemma.result<-sub("</b>", "", lemma.result)
lemma.result<-trimws(tolower(lemma.result))
}
else
{
lemma.result<-gsub("lema <b>", "", lemma[1,1])
lemma.result<-sub("</b>", "", lemma.result)
lemma.result<-trimws(tolower(lemma.result))
}
update<-data.frame(token=as.character(token), verb.lemma=as.character(lemma.result), verb.person=as.character(conjugation), stringsAsFactors = F)
rbind(database,update)->database
write.csv2(database, "/Users/jeroenclaes/Dropbox/Public/Google Drive/PostDoc/Methods/verbInfo.csv",fileEncoding ="iso-8859-1")
}
}
result<-list(lemma=lemma.result, conjugation=conjugation)
return(result)
},warning=function(w) {
cat("Warning: ", str_trim(as.character(w)), "for token ", globalenv()$token)
cat ("\n <NA> returned\n.")
result<-list(lemma="NA", conjugation="NA")
return(result)
},
error= function(e) {
cat("Error: ", str_trim(as.character(e)), "for token ", globalenv()$token);
cat ("\n <NA> returned\n.")
result<-list(lemma="NA", conjugation="NA")
return(result)
})
}
|
48b7016a6fa0f565a082dbd255d955af76ef01bb
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.machine.learning/man/sagemaker_list_pipeline_parameters_for_execution.Rd
|
2da41f7e82f0ba419f65d4e263cd6f1f8102b2d7
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,088
|
rd
|
sagemaker_list_pipeline_parameters_for_execution.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_list_pipeline_parameters_for_execution}
\alias{sagemaker_list_pipeline_parameters_for_execution}
\title{Gets a list of parameters for a pipeline execution}
\usage{
sagemaker_list_pipeline_parameters_for_execution(
PipelineExecutionArn,
NextToken = NULL,
MaxResults = NULL
)
}
\arguments{
\item{PipelineExecutionArn}{[required] The Amazon Resource Name (ARN) of the pipeline execution.}
\item{NextToken}{If the result of the previous
\code{\link[=sagemaker_list_pipeline_parameters_for_execution]{list_pipeline_parameters_for_execution}}
request was truncated, the response includes a \code{NextToken}. To retrieve
the next set of parameters, use the token in the next request.}
\item{MaxResults}{The maximum number of parameters to return in the response.}
}
\description{
Gets a list of parameters for a pipeline execution.
See \url{https://www.paws-r-sdk.com/docs/sagemaker_list_pipeline_parameters_for_execution/} for full documentation.
}
\keyword{internal}
|
e686d353a2a3ecf1203116f5c2978e9aa3ceff8f
|
660e39446906d71751b6d31f3d7597e7397a7ce8
|
/Marie-Pierre Etienne/Stats Bayesienne/Algo.R
|
648222cec6aa9c7ae36bd06e8f3114cc89c1e011
|
[] |
no_license
|
AgroSTAT2122/Elias
|
bde7bad87828a8972280e64c6d34b23cb5eb396e
|
4937a80dae6f806422fd3c1529f4a69c03c44797
|
refs/heads/main
| 2023-09-04T07:52:05.294544
| 2021-11-10T15:19:59
| 2021-11-10T15:19:59
| 422,130,624
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,170
|
r
|
Algo.R
|
n <- 100
mu <- 10
var <- 4
data <- rnorm(n, mean = mu, sd = sqrt(var))
mean(data) #Moyenne
mean(data^2)- mean(data)^2 #Variance
library(ggplot2)
library(tidyverse)
data %>% as_tibble() %>% ggplot() + geom_histogram(aes(x=value, y = ..density..))
sort(data)[c(3,97)]
#############
#Loi de proposition
Zi <- rnorm(n)
#calcule des poids non normalisés
#le poids pour la réalisation i
#si zi < 0 ou zi > 1 poids c'est 0
#sinon numérateur i = zi^4 (1-zi)^6
poids_numerateur <- Zi^4 * (1-Zi)^6 * ifelse(Zi<0 | Zi > 1, 0, 1)
poids_denom <- dnorm(Zi, mean=0,sd=1)
poids_non_norm <- poids_numerateur/poids_denom
sum(poids_non_norm) #Pas égal à 1 puisque non normalisés
poids_norm <- poids_non_norm/sum(poids_non_norm)
sum(poids_norm) #égal à 1 car normalisés
hist(poids_norm)
sum(poids_norm==0) #Pas un échantillon de taille 100 puisque beaucoup de valeurs ont un poids de 0
weighted.mean(Zi,w=poids_norm)
#On réechantillonne avec les nouvelles probas (poids) pour surreprésenter nos valeurs comprises dans la loi Beta
Xi <- sample(Zi, size= length(Zi), replace = TRUE, prob = poids_norm)
hist(Xi)
|
cd74837e6a6377b715280f6e50c2f09d02bc0595
|
0611363f7a2fa5bf07dd6185c9f04619f234e87f
|
/R assignment 546x.R
|
f5ed0b25708b9903b86e7f5c3780ae159a8c4dd0
|
[] |
no_license
|
mighster/Basic_Stats_Scripts
|
0b32cfae1ee892ad5af8571004e245f0cf0c25e7
|
211cb6d47b7308c23e2f84d0dcc42a4a414a4086
|
refs/heads/master
| 2020-04-04T18:41:43.469404
| 2018-11-05T07:15:22
| 2018-11-05T07:15:22
| 156,174,423
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,747
|
r
|
R assignment 546x.R
|
setwd("/Users/jmshook/Desktop/Class/EEOB546/BCB546X-Fall2017/UNIX_Assignment")
genotypes <- read.table("fang_et_al_genotypes.txt", header = T, stringsAsFactors = F)
head(genotypes)
dim(genotypes)
str(genotypes)
nrow(genotypes)
ncol(genotypes)
colnames(genotypes)
row.names(genotypes)
snps <- read.delim("snp_position.txt", header = T, stringsAsFactors = F)
head(snps)
dim(snps)
str(snps)
nrow(snps)
ncol(snps)
colnames(snps)
row.names(snps)
############
#extract only maize rows
maize <- genotypes[genotypes$Group == "ZMMIL" | genotypes$Group == "ZMMLR" | genotypes$Group == "ZMMMR",]
nrow(maize)
transposed_maize <- t(maize)
merged_maize <- merge(snps, transposed_maize, by.x = "SNP_ID", by.y = "row.names")
nrow(merged_maize)
maize_cut <- merged_maize[,-c(2,5:15)]
maize_cut_increasing_snps <- maize_cut[order(as.numeric(as.character(maize_cut$Position))),]
for (i in 1:10) {
maize_loop <- maize_cut_increasing_snps[maize_cut_increasing_snps$Chromosome == i,]
write.csv(maize_loop, sprintf("maize_chromosome_%d_increasing_snps", i), row.names = F)
}
maize_cut_decreasing_snps <- maize_cut[order(-as.numeric(as.character(maize_cut$Position))),]
maize_dashes <- maize_cut_decreasing_snps
maize_dashes[maize_dashes == "?/?"] <- "-/-"
for (i in 1:10) {
maize_loop <- maize_dashes[maize_dashes$Chromosome == i,]
write.csv(maize_loop, sprintf("maize_chromosome_%d_decreasing_snps", i), row.names = F)
}
###############
#extract only teosinte rows
teosinte <- genotypes[genotypes$Group == "ZMMIL" | genotypes$Group == "ZMMLR" | genotypes$Group == "ZMMMR",]
nrow(teosinte)
transposed_teosinte <- t(teosinte)
merged_teosinte <- merge(snps, transposed_teosinte, by.x = "SNP_ID", by.y = "row.names")
nrow(merged_teosinte)
teosinte_cut <- merged_teosinte[,-c(2,5:15)]
teosinte_cut_increasing_snps <- teosinte_cut[order(as.numeric(as.character(teosinte_cut$Position))),]
for (i in 1:10) {
teosinte_loop <- teosinte_cut_increasing_snps[teosinte_cut_increasing_snps$Chromosome == i,]
write.csv(teosinte_loop, sprintf("teosinte_chromosome_%d_increasing_snps", i), row.names = F)
}
teosinte_cut_decreasing_snps <- teosinte_cut[order(-as.numeric(as.character(teosinte_cut$Position))),]
teosinte_dashes <- teosinte_cut_decreasing_snps
teosinte_dashes[teosinte_dashes == "?/?"] <- "-/-"
for (i in 1:10) {
teosinte_loop <- teosinte_dashes[teosinte_dashes$Chromosome == i,]
write.csv(teosinte_loop, sprintf("teosinte_chromosome_%d_decreasing_snps", i), row.names = F)
}
#############
transposed_genotypes <- t(genotypes)
snp_genotype_full <- merge(snps, transposed_genotypes, by.x = "SNP_ID", by.y = "row.names")
snp_genotype <- snp_genotype_full[,-c(2,5:15)]
log_snp_genotype <- snp_genotype[,-c(1:3)]
log_snp_genotype[log_snp_genotype == "A/C"] <- 1
log_snp_genotype[log_snp_genotype == "A/G"] <- 1
log_snp_genotype[log_snp_genotype == "A/T"] <- 1
log_snp_genotype[log_snp_genotype == "C/G"] <- 1
log_snp_genotype[log_snp_genotype == "C/T"] <- 1
log_snp_genotype[log_snp_genotype == "C/A"] <- 1
log_snp_genotype[log_snp_genotype == "G/A"] <- 1
log_snp_genotype[log_snp_genotype == "G/C"] <- 1
log_snp_genotype[log_snp_genotype == "T/A"] <- 1
log_snp_genotype[log_snp_genotype == "G/T"] <- 1
log_snp_genotype[log_snp_genotype == "T/C"] <- 1
log_snp_genotype[log_snp_genotype == "T/G"] <- 1
log_snp_genotype[log_snp_genotype == "A/A"] <- 1
log_snp_genotype[log_snp_genotype == "C/C"] <- 1
log_snp_genotype[log_snp_genotype == "G/G"] <- 1
log_snp_genotype[log_snp_genotype == "T/T"] <- 1
log_snp_genotype[log_snp_genotype == "?/?"] <- 0
write.csv(log_snp_genotype, file = "log_snp_genotype", row.names = F)
#couldn't get R to sum up the 1s for each row, so did it in excel, then read in the csv file
log_snp_genotype_csv <- read.csv("log_snp_genotype.csv", stringsAsFactors = F)
snp_genotype_cut <- snp_genotype[,1:3]
snp_counts <- log_snp_genotype_csv[,2783]
snp_genotype_counts <- cbind(snp_genotype_cut, snp_counts)
snp_genotype_counts_ordered <- snp_genotype_counts[order(as.numeric(as.character(snp_genotype_counts$Chromosome))),]
stringsAsFactors = F
snp_count_1 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 1,])
snp_count_2 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 2,])
snp_count_3 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 3,])
snp_count_4 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 4,])
snp_count_5 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 5,])
snp_count_6 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 6,])
snp_count_7 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 7,])
snp_count_8 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 8,])
snp_count_9 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 9,])
snp_count_10 <- t(snp_genotype_counts_ordered[snp_genotype_counts_ordered$Chromosome == 10,])
snp_counts <- data.frame(sum(as.numeric(snp_count_1[4,])), sum(as.numeric(snp_count_2[4,])), sum(as.numeric(snp_count_3[4,])), sum(as.numeric(snp_count_4[4,])), sum(as.numeric(snp_count_5[4,])), sum(as.numeric(snp_count_6[4,])), sum(as.numeric(snp_count_7[4,])), sum(as.numeric(snp_count_8[4,])), sum(as.numeric(snp_count_9[4,])), sum(as.numeric(snp_count_10[4,])))
snp_counts_vector <- c(sum(as.numeric(snp_count_1[4,])), sum(as.numeric(snp_count_2[4,])), sum(as.numeric(snp_count_3[4,])), sum(as.numeric(snp_count_4[4,])), sum(as.numeric(snp_count_5[4,])), sum(as.numeric(snp_count_6[4,])), sum(as.numeric(snp_count_7[4,])), sum(as.numeric(snp_count_8[4,])), sum(as.numeric(snp_count_9[4,])), sum(as.numeric(snp_count_10[4,])))
colnames(snp_counts) <- 1:10
#Plot of snps per chromosome
plot(snp_counts_vector, xlab = "Chromosome", ylab = "Number of Snps", main = "Number of Snps per Chromosome")
############
genotypes_log <- genotypes
genotypes_log[genotypes_log == "A/C"] <- 1
genotypes_log[genotypes_log == "A/G"] <- 1
genotypes_log[genotypes_log == "A/T"] <- 1
genotypes_log[genotypes_log == "C/G"] <- 1
genotypes_log[genotypes_log == "C/T"] <- 1
genotypes_log[genotypes_log == "C/A"] <- 1
genotypes_log[genotypes_log == "G/A"] <- 1
genotypes_log[genotypes_log == "G/C"] <- 1
genotypes_log[genotypes_log == "T/A"] <- 1
genotypes_log[genotypes_log == "G/T"] <- 1
genotypes_log[genotypes_log == "T/C"] <- 1
genotypes_log[genotypes_log == "T/G"] <- 1
genotypes_log[genotypes_log == "A/A"] <- 1
genotypes_log[genotypes_log == "C/C"] <- 1
genotypes_log[genotypes_log == "G/G"] <- 1
genotypes_log[genotypes_log == "T/T"] <- 1
genotypes_log[genotypes_log == "?/?"] <- 0
genotypes_TRIPS <- t(genotypes_log[genotypes_log$Group == "TRIPS",])
genotypes_ZDIPL <- t(genotypes_log[genotypes_log$Group == "ZDIPL",])
genotypes_ZPERR <- t(genotypes_log[genotypes_log$Group == "ZPERR",])
genotypes_ZLUXR <- t(genotypes_log[genotypes_log$Group == "ZLUXR",])
genotypes_ZMHUE <- t(genotypes_log[genotypes_log$Group == "ZMHUE",])
genotypes_ZMPBA <- t(genotypes_log[genotypes_log$Group == "ZMPBA",])
genotypes_ZMPJA <- t(genotypes_log[genotypes_log$Group == "ZMPJA",])
genotypes_ZMXCH <- t(genotypes_log[genotypes_log$Group == "ZMXCH",])
genotypes_ZMXCP <- t(genotypes_log[genotypes_log$Group == "ZMXCP",])
genotypes_ZMXNO <- t(genotypes_log[genotypes_log$Group == "ZMXNO",])
genotypes_ZMXNT <- t(genotypes_log[genotypes_log$Group == "ZMXNT",])
genotypes_ZMPIL <- t(genotypes_log[genotypes_log$Group == "ZMPIL",])
genotypes_ZMXIL <- t(genotypes_log[genotypes_log$Group == "ZMXIL",])
genotypes_ZMMLR <- t(genotypes_log[genotypes_log$Group == "ZMMLR",])
genotypes_ZMMMR <- t(genotypes_log[genotypes_log$Group == "ZMMMR",])
genotypes_ZMMIL <- t(genotypes_log[genotypes_log$Group == "ZMMIL",])
sum(as.numeric(genotypes_TRIPS[4:986,]))
group_snp_counts <- c(sum(as.numeric(genotypes_TRIPS[4:986,])),
sum(as.numeric(genotypes_ZDIPL[4:986,])),
sum(as.numeric(genotypes_ZPERR[4:986,])),
sum(as.numeric(genotypes_ZMHUE[4:986,])),
sum(as.numeric(genotypes_ZMPBA[4:986,])),
sum(as.numeric(genotypes_ZMPJA[4:986,])),
sum(as.numeric(genotypes_ZMXCH[4:986,])),
sum(as.numeric(genotypes_ZMXCP[4:986,])),
sum(as.numeric(genotypes_ZMXNO[4:986,])),
sum(as.numeric(genotypes_ZMXNT[4:986,])),
sum(as.numeric(genotypes_ZMPIL[4:986,])),
sum(as.numeric(genotypes_ZMXIL[4:986,])),
sum(as.numeric(genotypes_ZMMLR[4:986,])),
sum(as.numeric(genotypes_ZMMMR[4:986,])),
sum(as.numeric(genotypes_ZMMIL[4:986,])),
sum(as.numeric(genotypes_ZLUXR[4:986,]))
)
names(group_snp_counts) <- c("TRIPS", "ZDIPL", "ZPERR", "ZMHUE", "ZMPBA", "ZMPJA",
"ZMXCH", "ZMXCP", "ZMXNO", "ZMXNT", "ZMPIL", "ZMXIL", "ZMMLR", "ZMMMR", "ZMMIL", "ZLUXR")
#Plot of snps per chromosome
plot(group_snp_counts, xaxt = "n", xlab = "Group", ylab = "Number of Snps", main = "Number of Snps per Group")
axis(1, at=1:16, labels = names(group_snp_counts))
###############
####homo/hetero
genotypes_homo_hetero <- genotypes
#make heterozygotes = 1, homozygotes = 0, other = N/A
genotypes_homo_hetero[genotypes_homo_hetero == "A/C"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "A/G"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "A/T"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "C/G"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "C/T"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "C/A"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "G/A"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "G/C"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "T/A"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "G/T"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "T/C"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "T/G"] <- 1
genotypes_homo_hetero[genotypes_homo_hetero == "A/A"] <- 0
genotypes_homo_hetero[genotypes_homo_hetero == "C/C"] <- 0
genotypes_homo_hetero[genotypes_homo_hetero == "G/G"] <- 0
genotypes_homo_hetero[genotypes_homo_hetero == "T/T"] <- 0
genotypes_homo_hetero[genotypes_homo_hetero == "?/?"] <- "N/A"
homo_hetero_TRIPS <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "TRIPS",])
homo_hetero_ZDIPL <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZDIPL",])
homo_hetero_ZPERR <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZPERR",])
homo_hetero_ZLUXR <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZLUXR",])
homo_hetero_ZMHUE <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMHUE",])
homo_hetero_ZMPBA <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMPBA",])
homo_hetero_ZMPJA <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMPJA",])
homo_hetero_ZMXCH <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMXCH",])
homo_hetero_ZMXCP <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMXCP",])
homo_hetero_ZMXNO <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMXNO",])
homo_hetero_ZMXNT <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMXNT",])
homo_hetero_ZMPIL <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMPIL",])
homo_hetero_ZMXIL <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMXIL",])
homo_hetero_ZMMLR <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMMLR",])
homo_hetero_ZMMMR <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMMMR",])
homo_hetero_ZMMIL <- t(genotypes_homo_hetero[genotypes_homo_hetero$Group == "ZMMIL",])
TRIPS <- table(unlist(homo_hetero_TRIPS))
ZDIPL <- table(unlist(homo_hetero_ZDIPL))
ZPERR <- table(unlist(homo_hetero_ZPERR))
ZLUXR<- table(unlist(homo_hetero_ZLUXR))
ZMHUE<- table(unlist(homo_hetero_ZMHUE))
ZMPBA<- table(unlist(homo_hetero_ZMPBA))
ZMPJA<- table(unlist(homo_hetero_ZMPJA))
ZMXCH<- table(unlist(homo_hetero_ZMXCH))
ZMXCP<- table(unlist(homo_hetero_ZMXCP))
ZMXNO<- table(unlist(homo_hetero_ZMXNO))
ZMXNT<- table(unlist(homo_hetero_ZMXNT))
ZMPIL<- table(unlist(homo_hetero_ZMPIL))
ZMXIL<- table(unlist(homo_hetero_ZMXIL))
ZMMLR<- table(unlist(homo_hetero_ZMMLR))
ZMMMR<- table(unlist(homo_hetero_ZMMMR))
ZMMIL<- table(unlist(homo_hetero_ZMMIL))
snp_proportion <- data.frame(c(14220, 678, 6728),
c(12717, 501, 1527),
c(7354,875,618),
c(14507, 472, 1732),
c(8515, 772, 543),
c(648658, 195961, 40081),
c(27393, 5394, 635),
c(56925, 13637, 3163),
c(51887, 13188, 2752),
c(5664, 973, 244),
c(3269, 146, 517),
c(37841, 731, 1731),
c(5521, 141, 236),
c(981471, 206037, 47140),
c(22854, 759, 2928),
c(259176, 1017, 24877)
)
colnames(snp_proportion) = c("TRIPS", "ZDIPL", "ZPERR", "ZLUXR",
"ZMHUE", "ZMPBA", "ZMPJA", "ZMXCH", "ZMXCP", "ZMXNO",
"ZMXNT", "ZMPIL", "ZMXIL", "ZMMLR", "ZMMMR", "ZMMIL")
#end
|
db17156c73e3a98b3306488ff4cb63f0b544303c
|
609b3d66630101d7e6d9e4c0711dd170d1124cbc
|
/Week 3/3_2.R
|
deb707520f3ed8c64b15a20134d0f253e8efedcf
|
[] |
no_license
|
ankitkokkeri/Practical-Machine-Learning
|
08919cbb3ae5a6bfa93c2b45925ac33046acf51b
|
491c25ae0f38150c344012233654a2b595b083ec
|
refs/heads/master
| 2020-09-06T20:16:28.537267
| 2016-07-22T08:23:20
| 2016-07-22T08:23:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,118
|
r
|
3_2.R
|
# Week 3: bagging
# reading ozone data
library(ElemStatLearn); data(ozone, package="ElemStatLearn")
ozone <- ozone[order(ozone$ozone), ]
head(ozone)
# Bagged loess
ll <- matrix(NA, nrow=10, ncol=155)
for(i in 1:10){
ss <- sample(1:dim(ozone)[1], replace=T)
ozone0 <- ozone[ss, ]; ozone0 <- ozone0[order(ozone$ozone), ]
loess0 <- loess(temperature ~ ozone, data=ozone0, span=0.2)
ll[i, ] <- predict(loess0, newdata=data.frame(ozone=1:155))
}
plot(ozone$ozone, ozone$temperature, pch=19, cex=0.5)
for(i in 1:10) {lines(1:155, ll[i, ], col="gray", lwd=2)}
lines(1:155, apply(ll, 2, mean), col="red", lwd=2)
# Train function with bagEarth, treebag, bagFDA
predictors = data.frame(ozone=ozone$ozone)
temperature = ozone$temperature
treebag <- bag(predictors, temperature, B=10,
bagControl = bagControl(fit = ctreeBag$fit,
predict = ctreeBag$pred,
aggregate = ctreeBag$aggregate))
plot(ozone$ozone, temperature, col='lightgrey', pch=19)
points(ozone$ozone, predict(treebag$fits[[1]]$fit, predictors), pch=19, col="red")
points(ozone$ozone, predict(treebag, predictors), pch=19, col="blue")
|
80c2a37cd7bd472a40a41ab556becfa88fb95c04
|
2f675e00427ac14f6def9ed96276df0632a2f61e
|
/man/lp.perf.Rd
|
4568f6f0797c1a12ea42c2fabcca3dfbbec15d2b
|
[] |
no_license
|
josephguillaume/indexCurveUncert
|
9b9c09b01e42c8f58661fa8084a6c8a73670d7d8
|
51d623272835d53f96dad92824470c5c6afaf714
|
refs/heads/master
| 2021-01-01T15:31:35.370253
| 2014-06-24T12:15:36
| 2014-06-24T12:15:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,001
|
rd
|
lp.perf.Rd
|
\name{lp.perf}
\alias{lp.perf}
\title{
Extreme index curves by constrained linear programming
}
\description{
Calculate minimum or maximum difference in suitability between two
scenarios by varying ordinates of breakpoints of a piece-wise linear index
curve. Uses linear programming within constraints expressing knowledge
about behaviour of the index curve.
}
\usage{
lp.perf(xs, ev, bounds = NULL, dir = "min", constr = list(), dur = NULL)
}
\arguments{
\item{xs}{
Abcissa of break-points defining piece-wise linear index curve
}
\item{ev}{
List of two numeric vectors with data observations for each
scenario. \code{NULL} if one of the scenarios has no observations.
}
\item{bounds}{
List with numeric vectors named \code{lower} and \code{upper}, of
same length as \code{xs}, defining upper and lower limits of the
break-point abcissa. Usually produced by a \code{getPrefConstraints}
function, e.g. \code{\link{getPrefConstraintsLists}}
}
\item{dir}{
"min" to produce minimum difference in suitability, "max" to produce
maximum difference in suitability
}
\item{constr}{
\code{data.frame} with four columns \code{a},\code{b},\code{status} ("<="
or ">="), and
optionally \code{min.gap} (otherwise it defaults to zero). Each
row is interpreted as: breakpoints number \code{a} and \code{b}
satisfy the constraint specified by \code{status}: \code{a <= b -
min.gap} or \code{a >= b + min.gap}. Usually produced by a \code{getPrefConstraints}
function, e.g. \code{\link{getPrefConstraintsLists}}
}
\item{dur}{
List of two numeric vectors of same length as \code{ev} with weights
for each observation of each scenario. A common use is to specify
duration of events.
}
}
\details{
The code constructs a linear program of the form:
\code{A0+A1*y1+A2*y2+...+Ai*yi}, with linear constraints on \code{yi} defined
by \code{bounds} and \code{constr}.
\code{yi} are the ordinates of the breakpoints of the piece-wise
linear index curve of abcissa \code{xs}
\code{Ai} are calculated from the values of
\code{xs}, \code{ev} and \code{dur}.
The linear program is then solved using the \code{lpSolveAPI} package.
}
\value{
List with elements
\item{obj}{Difference in suitability of second scenario - first
scenario}
\item{ys}{Values of the ordinates of the breakpoints of abcissa
\code{xs}}
\item{duals}{Dual values of the \code{ys}, as reported by \code{lpSolveAPI}}
}
\author{
Joseph Guillaume
}
\seealso{
\code{\link{envindex.diff.qp}} will generally be used instead as a
wrapper around this function.
}
\examples{
## Index curve with breakpoints at 0,40,100,300
## Two scenarios each with three observations
## The output (suitability of the second scenario - suitability of the
## first scenario) will be maximised
## Bounds are specified
## Single constraint specifies that the ordinate of the 4th breakpoint
## (x=300) must be less than or equal to the ordinate of the 3rd breakpoint (100).
## The suitabilities of every observation will be multiplied by 100.
lp.perf(xs=c(0, 40, 100, 300),
ev=list(c(5,60,170), c(4,70,180)),
bounds=list(
lower = c(0.8, 0.8, 0.8, 0),
upper = c(1, 1, 1,0.2)),
dir="max",
constr=data.frame(a = 4, b = 3, status = "<=", min.gap = 0),
dur=list(c(100,100,100), c(100,100,100))
)
## One observation of data for one scenario, none for the other
## The output will be minimised. In this case, as the
## first scenario is NULL, this amounts to maximising the
## suitability of the second scenario.
## The suitability of the observation in the second scenario is multiplied by 100
lp.perf(xs=c(0, 40, 100, 300),
ev=list(NULL, 100),
bounds=list(
lower = c(0.8, 0.8, 0.8, 0),
upper = c(1, 1, 1,0.2)),
dir="min",
constr=data.frame(a = 4, b = 3, status = "<=", min.gap = 0),
dur=list(numeric(0), 100)
)
}
|
50af6fc0843a599c64f2bd55086f052b43583c1a
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/statespacer/man/SimSmoother.Rd
|
b33eec3dd1e4b7f2abe87034e1451c7976ef73f9
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,584
|
rd
|
SimSmoother.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimSmoother.R
\name{SimSmoother}
\alias{SimSmoother}
\title{Generating Random Samples using the Simulation Smoother}
\usage{
SimSmoother(object, nsim = 1, components = TRUE)
}
\arguments{
\item{object}{A statespacer object as returned by \code{\link{statespacer}}.}
\item{nsim}{Number of random samples to draw. Defaults to \code{1}.}
\item{components}{Boolean indicating whether the components of
the model should be extracted in each of the random samples.}
}
\value{
A list containing the simulated state parameters and disturbances.
In addition, it returns the components as specified by the State Space model
if \code{components = TRUE}. Each of the objects are arrays, where the first
dimension equals the number of time points, the second dimension the number
of state parameters, disturbances, or dependent variables, and the third
dimension equals the number of random samples \code{nsim}.
}
\description{
Draws random samples of the specified model conditional
on the observed data.
}
\examples{
# Fits a local level model for the Nile data
library(datasets)
y <- matrix(Nile)
fit <- statespacer(initial = 10, y = y, local_level_ind = TRUE)
# Obtain random sample using the fitted model
sim <- SimSmoother(fit, nsim = 1, components = TRUE)
# Plot the simulated level against the smoothed level of the original data
plot(sim$level[, 1, 1], type = 'p')
lines(fit$smoothed$level, type = 'l')
}
\author{
Dylan Beijers, \email{dylanbeijers@gmail.com}
}
|
d99d199c1a93ef9bb5eb058c41915b1d73ce179f
|
e0b764d823e9926777e74d9b944108ad44d140bc
|
/COVID19/ui.R
|
69699768e2d0fb2b594a0b9c1ba7bc0cd4c5cac6
|
[] |
no_license
|
Tianyu-Wang/Developing-Data-Products-Course-Project
|
f1ecc964aff060f968f5981ef635173b20eff572
|
e40863f7490b0384307ad8a37cd9eabf4c69ea66
|
refs/heads/main
| 2023-01-21T23:02:49.862323
| 2020-11-28T11:22:07
| 2020-11-28T11:22:07
| 313,687,039
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,104
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(leaflet)
vars <- c(
"Confirmed Cases" = "Confirmed",
"Fatalities" = "Deaths",
"Incident Rate" = "Incident_Rate"
)
# Define UI for application that draws a map with COVID-19 data
shinyUI(fluidPage(
# Application title
titlePanel("COVID-19 in the US Data (15.11.2020)"),
# Sidebar with a slider input for the value range of confirmed cases
sidebarLayout(
sidebarPanel(
h4("Description"),
p("This Shiny app visualizes COVID-19 data in the US on an interactive map. It shows one of the following three metrics per state:"),
p("- Confirmed Cases"),
p("- Fatalities"),
p("- Incident Rate (per 100k Persons)"),
p("Circles are sized and colored by value of the chosen metric of interest. The app also allows you to filter for only the top N states with the highest values of the chosen metric (default: top 20 states with highest confirmed case count)."),
p("(Data from November 15th 2020, source: ", a("Johns Hopkins University)", href="https://github.com/CSSEGISandData/COVID-19")),
br(),
h4("How to Use"),
p("Use the ", strong("dropdown menu"), "to choose the metric of interest to be shown on the map:"),
selectInput("metric", NULL, vars, selected = "Confirmed"),
p("Use the ", strong("slider"), "to display only the top N states by chosen metric:"),
sliderInput("top_n",
NULL,
min = 0,
max = 60,
step = 1,
value = 20)
),
# Show a plot of the generated map
mainPanel(
leafletOutput("covidMap")
)
)
))
|
c5b2f21696a4b7ba7a7fdd169cffc764e7227931
|
5e592a02894f5ef1dfa484f1335b7e9967b0a94d
|
/assignment05/codes/lcr_tests.R
|
64314250fd391b7f851da2907455d361105794ed
|
[] |
no_license
|
oscaralejandro1907/probability-in-R
|
4293c68bf55357801b7a93ab97d6e1b0b8332cd3
|
cdd174e2866e2a9b69c869f432901b121e550d98
|
refs/heads/master
| 2023-05-10T13:56:47.203301
| 2021-06-21T17:41:03
| 2021-06-21T17:41:03
| 292,079,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,406
|
r
|
lcr_tests.R
|
# Title : TODO
# Objective : TODO
# Created by: oscarhernandezlopez
# Created on: 05/10/20
require(swfscMisc)
require(ggplot2)
n <- 1000
mu <- 10
std_dev <- 2
seed <- 27
linearCongruentialGen <- function (n, seed) {
a <- 2505
c <- 5005
m <- 15145
x <- seed
gen_data <- numeric()
while (length(gen_data)<n){
x <- (a * x + c) %% m
gen_data <- c(gen_data,x)
}
return (gen_data/(m-1)) #Return a seed generation pseudo-random numbers
}
lcr <- linearCongruentialGen(n,seed)
sink('unif_test.txt',append = TRUE)
uniform.test(hist(lcr))
sink(file = NULL)
gaussian_lcr <- function (mu, sigma, list) {
z0_list <- sqrt(-2*log(list[i])) * cos(2*pi*list[i+1])
z1_list <- sqrt(-2*log(list[i])) * sin(2*pi*list[i+1])
pair <- c(z0_list,z1_list)
return (sigma * pair + mu)
}
dat_z0_lcr <- numeric()
#dat_z1_lcr <- numeric()
for (i in 1:n-1){
dat_z0_lcr <- c(dat_z0_lcr,gaussian_lcr(mu,std_dev,lcr))
#dat_z1_lcr <- c(dat_z1_lcr,gaussian_lcr(mu,std_dev,lcr))
}
df_lcr <- as.data.frame(dat_z0_lcr)
# Histogram with density plot
png('hist2.png',width = 1366, height = 768,res = 200)
breaks <- pretty(range(dat_z0_lcr), n = nclass.FD(dat_z0_lcr), min.n = 1)
ggplot(df_lcr, aes(x=dat_z0_lcr)) +
geom_histogram(aes(y=..density..), colour="black", fill="white",binwidth = breaks[2]-breaks[1])+
geom_density(alpha=.2, fill="#FF6666")+ theme_bw()
dev.off()
shapiro.test(dat_z0_lcr)
|
e5c11b8f7c1450e818631f27ffefc02cce83d0ed
|
080619940b88c49bf276def7e056a153bd8b4128
|
/plot3.R
|
9977574f85e8805c0dac8248468651fcca0d4e15
|
[] |
no_license
|
zbi441/ExData_Plotting1
|
98d24804c364bca5f12173bfe5ffba0774eeda2e
|
fd70056d26f9b86fe296a44ed9ebe997de067340
|
refs/heads/master
| 2020-05-29T08:47:09.863828
| 2016-09-25T04:09:27
| 2016-09-25T04:09:27
| 69,142,732
| 0
| 0
| null | 2016-09-25T03:59:56
| 2016-09-25T03:59:56
| null |
UTF-8
|
R
| false
| false
| 649
|
r
|
plot3.R
|
#####funciton for plot 3
a=read.csv("household_power_consumption.csv",header=TRUE,sep=";")
b=a[a$Date%in%c("1/2/2007","2/2/2007"),]
##file reading
x=paste(b$Date,b$Time,sep="")
d=strptime(x,"%d/%m/%Y %H:%M:%S")
sub1=as.numeric(b$Sub_metering_1)
sub2=as.numeric(b$Sub_metering_2)
sub3=as.numeric(b$Sub_metering_3)
## generate x and y values
png("plot3.png", width=480, height=480)
plot(d,sub1, type="l", xlab="", ylab="Energy Submetering")
lines(d,sub2,type="l",col="red")
lines(d,sub3,type="l", col="blue")
legend("topright",c("Sub_meeting_1","Sub_meeting_2","Sub_meeting_3"), lty=1, lwd=2.5, col=c("black","red","blue"))
dev.off()
## generate plot
|
9370a7e8d4b56d0d6231e280b2b301ed0c95e5c1
|
1963ea1341f60c40055035cb7609fb1e140bfef2
|
/flowtype_flowcap_pipeline-master/201704/_funcAlice.R
|
33b3b59ff348b9cd791b9740e7884bad8e94ddf1
|
[] |
no_license
|
aya49/flowGraph_experiments
|
86359a457c0049790b892b91d7713ff7e65b27b3
|
2ef4e0b53f425a090b2ee6c1010d91e675a893de
|
refs/heads/master
| 2023-04-27T04:46:53.195705
| 2023-04-15T22:31:32
| 2023-04-15T22:31:32
| 178,804,954
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54,320
|
r
|
_funcAlice.R
|
#Written 20151026 by Alice Yue
#Last modified 20151026
#Note: All parameters are case sensative!
# Prints out the time since start_time. Used for optimizing code.
TimeOutput <- function(start_time) {
start_time <- as.POSIXct(start_time)
dt <- difftime(Sys.time(), start_time, units="secs")
# Since you only want the H:M:S, we can ignore the date...
# but you have to be careful about time-zone issues
cat(format(.POSIXct(dt,tz="GMT"), "%H:%M:%S"))
}
'%between%'<-function(x,rng) (x>rng[1] & x<rng[2]) | (x<rng[1] & x>rng[2])
colIndBelow <- function(m,thres) {
return(which(apply(m,2,function(x) all(x<=thres))))
}
#Output: Random matrix
randomMatrix <- function(nrow,ncol) {
return(matrix(rexp(ncol*nrow, rate=.1), ncol=ncol))
}
#Input: package
#Output: TRUE if package is installed, FALSE otherwise
is.installed <- function(pkg){
is.element(pkg, installed.packages()[,1])
}
charOccurences <- function(char, str) {
str2 <- gsub(char,"",str)
return (nchar(str) - nchar(str2))
}
#Input: file paths of matrices with column names you want to compare
#Output: list of unique column names; last vector in list corresponds to which filepath the unique column names belong to
colNamesSame <- function(pathList) {
count <- 1
colNameSame <- NULL
colNameSame[[1]] <- colnames(read.csv(pathList[1]))
colNameSameIndex <- c(1)
for (i in 2:length(pathList)) {
s <- colnames(read.csv(pathList[i]))
if(!TRUE %in% (colNameSame%in%list(s)) ) {
count <- count+1
colNameSameIndex[count] <- i
colNameSame[[count]] <- s
}
}
colNameSame[[count+1]] <- colNameSameIndex
return(colNameSame)
}
#Input: file path and a file extension
#Output: List of all file names in given path with the given extension
fileNames <- function(pathList, ext="fcs") {
for (i in 1:length(pathList)) {
temp.str <- strsplit(pathList[i], split="/")
pathList[i] <- sub(paste(".", ext, sep=""), "", temp.str[[1]][ length(temp.str[[1]]) ], ignore.case=TRUE)
}
return(pathList)
}
#Input: file path
#Output: List of all last folder names in given path (Genotype)
folderNames <- function(pathList) {
folders <- c()
for (i in 1:length(pathList)) {
temp.str <- strsplit(pathList[i],split="/")
folders[i] <- temp.str[[1]][length(temp.str[[1]])-1]
}
return(folders)
}
loadFrames <- function(pathList) {
myFrames <- new.env()
cat("loading", length(pathList), "files:")
for (i in 1:length(pathList)) {
myFrames[[as.character(i)]] <- get(load(pathList[i]))
cat(" ", i, sep="")
}
return(myFrames)
}
#input: a phenotype with all markers
#output: vector of markers
getMarkers <- function(phenotype) {
return (unlist(strsplit(as.character(phenotype), '[-+]')))
}
#given number of markers, output number of nodes, edges
getGraphInfo <- function(m) {
npl = epl = rep(0,m)
for (i in 1:m) {
npl[i] = choose(m,i)*(2^i)
epl[i] = npl[i]*i
}
n = 3^m
e = n*2*m/3
return (list(n=n, npl=npl, e=e, epl=epl))
}
## Input: PhenoMeta
## Output: Index of leaves (phenotypes with no children)
getleaves <- function (phenoMeta, no_cores) {
require(foreach)
require(doMC)
registerDoMC(no_cores)
finalLevel = which(phenoMeta$phenolevel==max(phenoMeta$phenolevel))
notFinalLevel = setdiff(1:nrow(phenoMeta), finalLevel)
nflleaves = foreach (i=1:length(notFinalLevel), .combine="c") %dopar% {
pheni = unlist(strsplit(phenoMeta$phenocode[i],""))
phenind = which(pheni!="0")
zeroind = setdiff(1:length(pheni),phenind) #must have zeros because notFinalLevel
childphenocode = as.vector(sapply(zeroind, function(x) { pi1=pi2=pheni; pi1[x]="1"; pi2[x] = "2"; return(c(paste(pi1,collapse=""),paste(pi2,collapse=""))) } ))
childrenind = match(childphenocode, phenoMeta$phenocode)
if (all(is.na(childrenind))) return(i)
return(NA)
}
leaves = notFinalLevel[nflleaves[!is.na(nflleaves)]]
return(list(leaves=leaves, finalLevel=finalLevel))
}
## Input: phenoMeta ($phenolevel, $phenotype, $phenocode)
## Output: Index of list of children
getphenoChild <- function (phenoMeta, no_cores) {
require(foreach)
require(doMC)
registerDoMC(no_cores)
notFinalLevel = which(phenoMeta$phenolevel!=max(phenoMeta$phenolevel))
ppcc = foreach (i=1:nrow(phenoMeta)) %dopar% {
if (!sum(notFinalLevel%in%i)>0) return(list(pc=NULL,pcpn=NULL))
pheni = unlist(strsplit(phenoMeta$phenocode[i],""))
phenind = which(pheni!="0")
zeroind = setdiff(1:length(pheni),phenind)
childphenocode = as.vector(sapply(zeroind, function(x) { pi1=pi2=pheni; pi1[x]="1"; pi2[x] = "2"; return(c(paste(pi1,collapse=""),paste(pi2,collapse=""))) } ))
childrenind = match(childphenocode, phenoMeta$phenocode)
childrenind = childrenind[!is.na(childrenind)]
if (length(childrenind)>0) { #if non-leaf node; this condition is only used when full hierarchy not available; otherwise, can take this out
childsum = sapply(phenoMeta$phenocode[childrenind], function(x) sum(as.numeric(unlist(strsplit(x,"")))) ) #sum children phenocodes up, larger ones have +, smaller ones have - in new marker, split them up
childplus = which(childsum==max(childsum))
#split between positive and negative
# phenoChild[[i]] = list()
# phenoChild[[i]][[1]] = childrenind[-childplus]
# phenoChild[[i]][[2]] = childrenind[childplus]
pc = list()
pc[[1]] = childrenind[-childplus]
pc[[2]] = childrenind[childplus]
## if there are missing counterparts
nopm1 = gsub("[+-]","",phenoMeta$phenotype[pc[[1]]])
nopm2 = gsub("[+-]","",phenoMeta$phenotype[pc[[2]]])
# unique1 = which(nopm1%in%setdiff(nopm1,nopm2))
# unique2 = which(nopm2%in%setdiff(nopm2,nopm1))
# if (length(unique1)>0) {
# }
# if (length(unique2)>0) {
# }
negposintersect1 = which(nopm1%in%intersect(nopm1,nopm2))
negposintersect2 = which(nopm2%in%intersect(nopm2,nopm1))
if (length(negposintersect1)>0) {
# phenoChildpn[[i]] = list()
# phenoChildpn[[i]][[1]] = phenoChild[[i]][[1]][negposintersect1]
# phenoChildpn[[i]][[2]] = phenoChild[[i]][[2]][negposintersect2]
pcnp = list()
pcnp[[1]] = pc[[1]][negposintersect1]
pcnp[[2]] = pc[[2]][negposintersect2]
return(list(pc=pc, pcnp=pcnp))
} else {
return(list(pc=pc, pcnp=NULL))
}
} else {
return(list(pc=NULL, pcnp=NULL))
}
}
phenoChild = list()
phenoChildpn = list()
for (i in 1:length(ppcc)) {
if (!is.null(ppcc[[i]]$pc)) phenoChild[[i]] = ppcc[[i]]$pc
if (!is.null(ppcc[[i]]$pc)) phenoChildpn[[i]] = ppcc[[i]]$pcnp
}
phenoChild_ind = which(vapply(phenoChild, Negate(is.null), NA))
phenoChildpn_ind = which(vapply(phenoChildpn, Negate(is.null), NA))
phenoChild = phenoChild[phenoChild_ind]
phenoChildpn = phenoChildpn[phenoChildpn_ind]
names(phenoChild) = phenoMeta$phenotype[phenoChild_ind]
names(phenoChildpn) = phenoMeta$phenotype[phenoChildpn_ind]
return(list(phenoChild=phenoChild, phenoChild_ind=phenoChild_ind, phenoChildpn=phenoChildpn, phenoChildpn_ind=phenoChildpn_ind))
}
getphenoParent_phenoChild <- function(phenoChild,phenoChild_ind, phenoChildpn=NULL, phenoChildpn_ind=NULL, no_cores=1) {
require(foreach)
require(doMC)
registerDoMC(no_cores)
phenoParent = NULL
phenoParentpn = NULL
phenoParent_ind = NULL
phenoParentpn_ind = NULL
for(i in 1:length(phenoChild)) {
parind = phenoChild_ind[i]
children = unlist(phenoChild[[i]])
childrennp = NULL
childrenboth = NULL
childrenonly = NULL
if (sum(phenoChildpn_ind==phenoChild_ind[i])>0) childrennp = unlist(phenoChildpn[[which(phenoChildpn_ind==phenoChild_ind[i])]])
if (length(childrennp)>0) {
childrenboth = intersect(children, childrennp)
childrenonly = setdiff(children, childrennp)
if (length(childrenboth)>0) {
for (ind in childrenboth) {
if (!(sum(phenoParent_ind==ind)>0)) {
phenoParent_ind = append(phenoParent_ind, ind)
phenoParent[[ind]] = c(parind)
} else {
a = append(phenoParent[[ind]], parind)
phenoParent[[ind]] = a
}
if (!(sum(phenoParentpn_ind==ind)>0)) {
phenoParentpn_ind = append(phenoParent_ind, ind)
phenoParentpn[[ind]] = c(parind)
} else {
phenoParentpn[[ind]] = append(phenoParentpn[[ind]], parind)
}
}
}
if (length(childrenonly)>0) {
for (ind in childrenonly) {
if (!(sum(phenoParent_ind==ind)>0)) {
phenoParent_ind = append(phenoParent_ind, ind)
phenoParent[[ind]] = c(parind)
} else {
phenoParent[[ind]] = append(phenoParent[[ind]], parind)
}
}
}
} else {
for (ind in children) {
if (!(sum(phenoParent_ind==ind)>0)) {
phenoParent_ind = append(phenoParent_ind, ind)
phenoParent[[ind]] = c(parind)
} else {
phenoParent[[ind]] = append(phenoParent[[ind]], parind)
}
}
}
}
return(list(phenoParent=phenoParent[which(vapply(phenoChild, Negate(is.null), NA))], phenoParent_ind=sort(phenoParent_ind), phenoParentpn=phenoParentpn[which(vapply(phenoChild, Negate(is.null), NA))], phenoParentpn_ind=sort(phenoParentpn_ind)))
}
getphenoParent <- function(phenoMeta, phenoChildpn, phenohChildpn_ind, no_cores) {
require(foreach)
require(doMC)
registerDoMC(no_cores)
phenoParent = foreach (i=(1:nrow(phenoMeta))) %dopar% {
pheni = unlist(strsplit(phenoMeta$phenocode[i],""))
phenind = which(pheni!="0")
#enumerate all possible parents
parentphenocode = sapply(phenind, function(x) { pi=pheni; pi[x]="0"; return(paste(pi,collapse="")) } )
parentind = match(parentphenocode, phenoMeta$phenocode)
parentind = parentind[!is.na(parentind)]
if (length(parentind)>0) return(parentind)
return(NULL)
}
phenoParent_ind = which(vapply(phenoParent, Negate(is.null), NA))
phenoParent = phenoParent[phenoParent_ind]
names(phenoParent) = phenoMeta$phenotype[phenoParent_ind]
#delete parents from child whom doesn't have a twin under that parent.
phenoParentpn = phenoParent
phenoParentpn_ind = phenoParent_ind
delind = c()
for (i in 1:length(phenoParent)) {
child = phenoParent_ind[i]
parents = phenoParent[[i]]
delparents = c()
for (j in 1:length(phenoParent[[i]])) {
parent = phenoParent[[i]][j]
ind = (phenoChildpn_ind==parent)
if (sum(ind)>0) {
if (!sum(unlist(phenoChildpn[[which(ind)]])==child)>0) delparents = append(delparents, j)
}
}
if (length(delparents)>0) parents = parents[-delparents]
if (!length(parents)>0) {
delind = append(delind, i)
} else {
phenoParentpn[[i]] = parents
}
}
if (length(delind)>0) {
phenoParentpn[delind] = NULL
phenoParentpn_ind = phenoParentpn_ind[-delind]
}
return(list(phenoParent=phenoParent, phenoParent_ind=phenoParent_ind, phenoParentpn=phenoParentpn, phenoParentpn_ind=phenoParentpn_ind))
}
## Input: Phenotype
## Output: Phenotype, PhenoCode, Phenolevel (number of markers per phenotype)
getPhen <- function(phen, markers=NULL, phenotype=T) {
require(flowType)
require(stringr)
if (length(grep("+", phen))>0) { #if phen is phenotype, build phenocde
if (is.null(markers)) markers = unlist(strsplit(phen[which.max(nchar(phen))],"[+-]"))
phenoCode = unlist(lapply(phen, function(x){return( encodePhenotype(x, markers) )}))
} else if (phenotype) { #if phen is phenocode
if (is.null(markers)) {
cat("input markers")
return(NULL)
} else {
phenoCode = phen
phen = unlist(lapply(pheno, function(x){return( decodePhenotype(x, markers, rep(2,length(markers))) )}))
}
}
require(stringr)
phenolevel = str_count(phen, "[+-]")
if (phenotype) return(list(phenotype=phen,phenoCode=phenoCode,phenoLevel=phenolevel))
return(list(phenoCode=phenoCode,phenoLevel=phenolevel))
}
## Jensen-shannon-divergence (half of kullback divergence both ways)
# http://stackoverflow.com/questions/11226627/jensen-shannon-divergence-in-r
jsd <- function(p,q, list=T) { #p,q are two distributions, two lists of distributions if to avg over
if (list) {
JS = 0
for (i in 1:length(p)) {
m = (p[[i]] + q[[i]])/2
JS = ((sum(p[[i]]*log(p[[i]] / m)) + sum(q[[i]]*log(q[[i]] / m))) /2) + JS
}
JS = JS/length(p)
} else {
m = (p + q)/2
JS = ((sum(p * log(p / m)) + sum(q * log(q / m))) /2) + JS
}
return(JS)
}
#Output: vector of 3iTcell gating strategy phenotypes
GatingStrategyPhenotypes <- function(){
GatingStrategyPop <- c(NA, NA, "")
GatingStrategyPop[4] <- "TCRb-CD8a-CD161+TCRd-" #2.1
GatingStrategyPop[5] <- "CD44+CD62L-TCRb-CD8a-CD161+TCRd-" #2.11
GatingStrategyPop[6] <- "CD44+CD62L+TCRb-CD8a-CD161+TCRd-" #2.12
GatingStrategyPop[7] <- "CD44+TCRb-KLRG1+CD8a-CD161+TCRd-" #2.13
GatingStrategyPop[8] <- "TCRb-TCRd+" #1
GatingStrategyPop[9] <- "TCRb-KLRG1+GITR-TCRd+" #1.2
GatingStrategyPop[10] <- "CD44+CD62L-TCRb-TCRd+" #1.3
GatingStrategyPop[11] <- "CD44+CD62L+TCRb-TCRd+" #1.4
GatingStrategyPop[12] <- "TCRb+CD161+CD4-TCRd-" #3.1
GatingStrategyPop[14] <- "CD44+CD62L-TCRb+CD161+CD4-TCRd-" #3.11
GatingStrategyPop[15] <- "CD44+CD62L+TCRb+CD161+CD4-TCRd-" #3.12
GatingStrategyPop[16] <- "TCRb+KLRG1+CD161+CD4-TCRd-" #3.13
GatingStrategyPop[13] <- "TCRb+CD161+CD4+TCRd-" #3.2
GatingStrategyPop[17] <- "CD44+CD62L-TCRb+CD161+CD4+TCRd-" #3.21
GatingStrategyPop[18] <- "CD44+CD62L+TCRb+CD161+CD4+TCRd-" #3.22
GatingStrategyPop[19] <- "TCRb+KLRG1+CD161+CD4+TCRd-" #3.23
GatingStrategyPop[24] <- "TCRb+CD8a-CD161-CD4+TCRd-" #3.31
GatingStrategyPop[20] <- "CD25+TCRb+CD8a-CD161-CD4+GITR+TCRd-" #3.311
GatingStrategyPop[21] <- "CD44+CD62L-CD25+TCRb+CD8a-CD161-CD4+GITR+TCRd-" #3.3112
GatingStrategyPop[22] <- "CD44+CD62L+CD25+TCRb+CD8a-CD161-CD4+GITR+TCRd-" #3.3111
GatingStrategyPop[23] <- "CD44+CD25+TCRb+KLRG1+CD8a-CD161-CD4+GITR+TCRd-" #3.3113
GatingStrategyPop[26] <- "CD25-TCRb+CD8a-CD161-CD4+TCRd-" #3.312
GatingStrategyPop[27] <- "CD44+CD62L-CD25-TCRb+CD8a-CD161-CD4+TCRd-" #3.3121
GatingStrategyPop[28] <- "CD62L+CD25-TCRb+CD8a-CD161-CD4+TCRd-" #3.3122
GatingStrategyPop[29] <- "CD44+CD25-TCRb+KLRG1+CD8a-CD161-CD4+TCRd-" #3.3123
GatingStrategyPop[25] <- "TCRb+CD8a+CD161-CD4-TCRd-" #3.32
GatingStrategyPop[30] <- "CD44+CD62L-TCRb+CD8a+CD161-CD4-TCRd-" #3.322
GatingStrategyPop[31] <- "CD44+CD62L+TCRb+CD8a+CD161-CD4-TCRd-" #3.321
GatingStrategyPop[32] <- "CD44+TCRb+KLRG1+CD8a+CD161-CD4-TCRd-" #3.324
GatingStrategyPop[33] <- "TCRb+TCRd-" #3
GatingStrategyPop[34] <- "TCRb+CD161+TCRd-" #3.1/2 CD4?
GatingStrategyPop[35] <- "TCRb-KLRG1-GITR+TCRd+" #1.1
GatingStrategyPop[36] <- "CD44-CD62L+TCRb-TCRd+" #1.5
GatingStrategyPop[37] <- "TCRb-TCRd-" #2
GatingStrategyPop[38] <- "CD44-CD62L+TCRb+CD8a+CD161-CD4-TCRd-" #3.323
GatingStrategyPop[39] <- "TCRb+CD161-TCRd-" #3.3
return(GatingStrategyPop)
}
#001_Clean: Create Global Frame-------------------------------------------------
#Input: FCS, start & end index of marker columns, path of filePrefixWithDir
clean3iTcell <- function(f, MarkerStartCol, MarkerEndCol, path) {
fi <- clean(f, vectMarkers=c(MarkerStartCol:MarkerEndCol), filePrefixWithDir=path, ext="fcs", diagnostic=TRUE)
if (!is.character(fi)) {
fi@exprs <- fi@exprs[which(fi@exprs[,MarkerEndCol+2] < 10000),]
f <- fi
}
remove(fi)
return(f)
}
#Note: flowDensity (used to get gating thresholds after clean) requires FSC & SSC therefore don't use this function...
#Input: FCS, start & end index of marker columns
#Output: parameters@data of FCS's marker columns only
extractextractMarkerColPar <- function(f, MarkerStartCol, MarkerEndCol) {
f@parameters <- f@parameters[MarkerStartCol:MarkerEndCol]
row.names(f@parameters@data) <- paste("$P", c(1:(1+MarkerEndCol-MarkerStartCol)), sep="")
parData <- NULL
parData <- f@parameters@data
return(parData)
}
#Note: flowDensity (used to get gating thresholds after clean) requires FSC & SSC therefore don't use this function...
#Input: FCS, start & end index of marker columns, parameter data
#Output: FCS with exprs & parameters of only marker columns
extractMarkerCol <- function(f, MarkerStartCol, MarkerEndCol, parData=NULL) {
f@exprs <- f@exprs[,c(MarkerStartCol:MarkerEndCol)]
#store parameter data of 1st FCS file only (consistent) i.e. $P1 column name, desc, range, minRange, maxRange
if(is.null("parData")) {
parData <- extractextractMarkerColPar(f, MarkerStartCol, MarkerEndCol)
}
f@parameters@data <- parData
return(f)
}
#Input: list of FCS
#Output: global frame FCS with (smpl) 1000 rows from each FCS
globalFrame <- function(fcleanFrames=NULL, fCEFilePath=NULL, smpl=1000) {
if (!is.null(fcleanFrames)) {
g <- fcleanFrames[[as.character(1)]]
g@exprs <- g@exprs[sample(1:nrow(g), smpl),]
cat("Sampling", smpl, "rows from", length(fcleanFrames), "fcs files: 1")
for (i in 2:length(fcleanFrames)) {
f <- fcleanFrames[[as.character(i)]]
g@exprs <- rbind(g@exprs, f@exprs[sample(1:nrow(f), smpl),])
cat(", ", i, sep="")
}
}
else if (!is.null(fCEFilePath)) {
g <- get(load(fCEFilePath[1]))
g@exprs <- g@exprs[sample(1:nrow(g), smpl),]
cat("Sampling", smpl, "rows from", length(fCEFilePath), "fcs files: 1")
for (i in 2:length(fCEFilePath)) {
f <- get(load(fCEFilePath[i]))
g@exprs <- rbind(g@exprs, f@exprs[sample(1:nrow(f), smpl),])
remove(f)
cat(", ", i, sep="")
}
}
return(g)
}
#002_Gate----------------------------------
#Input: a vector of numbers
#Output: the input vector with outliers replaced by the average of non-outlier values
replaceOutliers <- function(vec) {
#Outlier detection using arrayMvout
# 4 rows of random variables numbers between 5 and 5.5
# Create fake data that has a a small standard deviation and no outliers to use to trick ArrayOutliers to find outliers from a vector.
# if (!exists("GoodFakeData")) {
# GoodFakeData <- NULL
# for(i in 1:4) {
# GoodFakeData <- cbind(GoodFakeData, runif(length(vec),5,5.5))
# }
# }
# isNA <- which(is.na(vec))
# asDF <- data.frame(cbind(vec, GoodFakeData))
# if(length(isNA)!=0) {
# asDF <- asDF[-isNA,]
# }
# outlier <- ArrayOutliers(asDF, alpha=0.5)$outl # find all outliers for each marker
# outlierNo <- as.numeric(c(isNA, rownames(outlier)))
# outlierNo <- outlierNo[c(order(outlierNo))]
#assumes normal distribution; can use either method 1 or 2
outlier <- getOutliers(vec, method="I", distribution="normal") #Method 1: detects outliers by checking which are below (above) the limit where according to the model distribution less then rho[1] (rho[2]) observations are expected (given length(y) observations)
# outlier <- getOutliers(vec, method="II") #Method 2: detects outliers by finding the observations (not used in the fit) who's fit residuals are below (above) the estimated confidence limit alpha[1] (alpha[2]) while all lower (higher) observations are outliers too
outlierNo <- c(outlier$iRight, outlier$iLeft)
avg <- mean(vec[-outlierNo])
if (length(outlierNo)!= 0) {
cat(paste("\nChanging FCS#", outlierNo, " from ", vec[outlierNo], " to ", round(avg, 4), sep=""))
vec[outlierNo] <- avg
}
return(vec)
}
#Input: list of cleaned FCS
#Output: gthres matrix of thresholds for each FCS and its columns/markers
gthres3iTcell <- function(fcleanFrames=NULL, fcleanFilePath=NULL) {
if (!is.null(fcleanFrames)) { gthres <- matrix(0, nrow=length(fcleanFrames), ncol=ncol(fcleanFrames[[as.character(1)]]@exprs) ) } #matrix of thresholds
else if (!is.null(fcleanFilePath)) { gthres <- matrix(0, nrow=length(fcleanFilePath), ncol=16 ) }
# if ( Genotype[i] != "+_+" && Genotype[i] != "+_Y" ){ # skip wildtypes
#Col 10, 16---------------------------------
cat("\nColumn 10 (TCRb), 16 (TCRd): ")
for(i in 1:max(length(fcleanFrames), length(fcleanFilePath))){
cat(i, " ", sep="")
if (!is.null(fcleanFrames)) { f <- fcleanFrames[[as.character(i)]] }
else if (!is.null(fcleanFilePath)) { f <- get(load(fcleanFilePath[i])) }
gthres[i,10] <- deGate(f,channel=10)
gthres[i,16] <- deGate(f,channel=16, upper=T)
}
#Find and replace outliers using avg; not channel 16 because the outliers of TCRd are actually fine for the 186XX files
gthres[,10] <- replaceOutliers(gthres[,10])
#Col 12, 13, 14---------------------------------
gthres13 <- NULL
cat("\nColumn 12 (CD8a), 13 (CD161), 14 (CD4): ")
for(i in max(length(fcleanFrames), length(fcleanFilePath)):1){
cat(i, " ", sep="")
if (!is.null(fcleanFrames)) { f <- fcleanFrames[[as.character(i)]] }
else if (!is.null(fcleanFilePath)) { f <- get(load(fcleanFilePath[i])) }
F2 <- getflowFrame(flowDensity(f, channels=c(10,16), position=c(F,F), gates=c(gthres[i,10],gthres[i,16]))) #TCR-
F3 <- getflowFrame(flowDensity(f, channels=c(10,16), position=c(T,F), gates=c(gthres[i,10],gthres[i,16]))) #ab T-cells
gthres[i,12] <- deGate(F2,channel=12)
gthres[i,14] <- deGate(F3,channel=14)
# gthres[i,13] <- deGate(F3,channel=13)
ClosestTo2p4 <- c(
deGate(F3,channel=13, tinypeak.removal=0.001, all.cut=T),
deGate(F3,channel=13, upper=T, tinypeak.removal=0.9))
gthres13[i] <- which(rank(abs(ClosestTo2p4 - 2.4)) == 1)
gthres[i,13] <- ClosestTo2p4[gthres13[i]]
}
#Find and replace outliers using avg
gthres[,12] <- replaceOutliers(gthres[,12])
gthres[,13] <- replaceOutliers(gthres[,13])
gthres[,14] <- replaceOutliers(gthres[,14])
#Col 7, 8, 11---------------------------------
cat("\nColumn 7 (CD44), 8 (CD62L), 11 (KLRG1): ")
for(i in 1:max(length(fcleanFrames), length(fcleanFilePath))){
cat(i, " ", sep="")
if (!is.null(fcleanFrames)) { f <- fcleanFrames[[as.character(i)]] }
else if (!is.null(fcleanFilePath)) { f <- get(load(fcleanFilePath[i])) }
F2 <- getflowFrame(flowDensity(f, channels=c(10,16), position=c(F,F), gates=c(gthres[i,10],gthres[i,16]))) #TCR-
F3 <- getflowFrame(flowDensity(f, channels=c(10,16), position=c(T,F), gates=c(gthres[i,10],gthres[i,16]))) #ab T-cells
F2.1 <- getflowFrame(flowDensity(F2, channels=c(13,12), position=c(T,F), gates=c(gthres[i,13],gthres[i,12]))) #TCR-; NK-cells
F3.3 <- getflowFrame(flowDensity(F3, channels=c(13,14), position=c(F,NA), gates=c(gthres[i,13],gthres[i,14]))) #ab; P4
gthres[i,7] <- deGate(F2.1,channel=7, use.percentile=T, percentile=0.001)
# gthres[i,8] <- deGate(F2.1,channel=8)
gthres[i,8] <- 2
gthres[i,11] <- deGate(F2.1,channel=11)
}
#Find and replace outliers using avg; do channel 2 seperately,
gthres[,7] <- replaceOutliers(gthres[,7])
gthres[,11] <- replaceOutliers(gthres[,11])
#Col 9, 15---------------------------------
gthres15a <- 0
gthres15b <- 0
cat("\nColumn 9 (CD25), 15 (GITR): ")
for(i in max(length(fcleanFrames), length(fcleanFilePath)):1){
cat(i, " ", sep="")
if (!is.null(fcleanFrames)) { f <- fcleanFrames[[as.character(i)]] }
else if (!is.null(fcleanFilePath)) { f <- get(load(fcleanFilePath[i])) }
F3 <- getflowFrame(flowDensity(f, channels=c(10,16), position=c(T,F), gates=c(gthres[i,10],gthres[i,16]))) #ab T-cells
F3.3 <- getflowFrame(flowDensity(F3, channels=c(13,14), position=c(F,NA), gates=c(gthres[i,13],gthres[i,14]))) #ab; P4
F3.31 <- getflowFrame(flowDensity(F3.3, channels=c(12,14), position=c(F,T), gates=c(gthres[i,12],gthres[i,14]))) #ab; P4; CD4+ T-cells
gthres[i,9] <- deGate(F3.31,channel=9)
# gthres[i,15] <- deGate(F3.31,channel=c(15))
gthres15temp1a <- deGate(F3.31, channel=15, tinypeak.removal=0.01)
gthres15temp1b <- deGate(F3.31, channel=15, upper=T)
tempD <- density(F3.31@exprs[,15]) # have
peakValue <- tempD$x[which.max(tempD$y)]
if(gthres15temp1a > peakValue && gthres15temp1b > peakValue) {
gthres15temp1 <- min(gthres15temp1a, gthres15temp1b)
} else {
gthres15temp1 <- max(gthres15temp1a, gthres15temp1b)
}
if(gthres15temp1==gthres15temp1a) {
gthres15a <- gthres15a+1
} else {
gthres15b <- gthres15b+1
}
gthres[i,15] <- gthres15temp1
}
#Find and replace outliers using avg
gthres[,9] <- replaceOutliers(gthres[,9])
gthres[,15] <- replaceOutliers(gthres[,15])
colnames(gthres) <- c(1:ncol(gthres))
return(gthres)
}
#003_ScatterPlots -----------------------------------------------
#By Justin Meskas; modified 20151221 by Alice Yue
#3iTCell project specific (see gating strategy)
#Input: FCS, gthres[i,], path where png scatter plot should be saved
#Output: saves a png scatter lot into path (does so by first extracting required cell populations)
ScatterPlots3iTCell <- function(f, gthresi, path) {
#Create cell populations, one for each plot; Only cell populations used for plotting are isolated here
F1 <- getflowFrame(flowDensity(f, channels=c(10,16), position=c(F,T), gates=c(gthresi[10],gthresi[16]))) #gd T-cells
F2 <- getflowFrame(flowDensity(f, channels=c(10,16), position=c(F,F), gates=c(gthresi[10],gthresi[16]))) #TCR-
F3 <- getflowFrame(flowDensity(f, channels=c(10,16), position=c(T,F), gates=c(gthresi[10],gthresi[16]))) #ab T-cells
F2.1 <- getflowFrame(flowDensity(F2, channels=c(13,12), position=c(T,F), gates=c(gthresi[13],gthresi[12]))) #TCR-; NK-cells
F3.1 <- getflowFrame(flowDensity(F3, channels=c(13,14), position=c(T,F), gates=c(gthresi[13],gthresi[14]))) #ab; NKT-cells
F3.2 <- getflowFrame(flowDensity(F3, channels=c(13,14), position=c(T,T), gates=c(gthresi[13],gthresi[14]))) #ab; iNKT-cells
F3.3 <- getflowFrame(flowDensity(F3, channels=c(13,14), position=c(F,NA), gates=c(gthresi[13],gthresi[14]))) #ab; P4
F3.31 <- getflowFrame(flowDensity(F3.3, channels=c(12,14), position=c(F,T), gates=c(gthresi[12],gthresi[14]))) #ab; P4; CD4+ T-cells
F3.32 <- getflowFrame(flowDensity(F3.3, channels=c(12,14), position=c(T,F), gates=c(gthresi[12],gthresi[14]))) #ab; P4; CD8+ T-cells
F3.311 <- getflowFrame(flowDensity(F3.31, channels=c(9,15), position=c(T,T), gates=c(gthresi[9],gthresi[15]))) #ab; P4; CD4+ T-cells; KLRG1+
F3.312 <- getflowFrame(flowDensity(F3.31, channels=c(9,15), position=c(F,NA), gates=c(gthresi[9],gthresi[15]))) #ab; P4; CD4+ T-cells;
#Plot! Error :(
tryCatch({
png(file=path) #, width=1800, height=1200)
#par(mar(c(5, 5, 10, 2) + 0.1))
layout(rbind(c(1:6),c(7:11,0),c(12,13,0,0,0,0)))
plotDens(f, c(10,16), main=paste("0_TCRb_TCRd"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[10]); abline(h=gthresi[16]);
plotDens(F1, c(11,15), main=paste("1_KLRG1_GITTR"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[11]); abline(h=gthresi[15]);
plotDens(F1, c(8,7), main=paste("1_CD62L_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[8]); abline(h=gthresi[7]);
plotDens(F2, c(13,12), main=paste("2_CD161_CD8a"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[13]); abline(h=gthresi[12]);
plotDens(F2.1, c(8,7), main=paste("2.1_CD62L_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[8]); abline(h=gthresi[7]);
plotDens(F2.1, c(7,11), main=paste("2.1_CD44_KLRG1"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[7]); abline(h=gthresi[11]);
plotDens(F3, c(13,14), main=paste("3_CD161_CD4"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[13]); abline(h=gthresi[14]);
plotDens(F3.1, c(8,7), main=paste("3.1_CD62L_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[8]); abline(h=gthresi[7]);
plotDens(F3.1, c(11,15), main=paste("3.1_KLRG1_GITR"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[11]); abline(h=gthresi[15]);
plotDens(F3.2, c(8,7), main=paste("3.2_CD62L_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[8]); abline(h=gthresi[7]);
plotDens(F3.2, c(11,15), main=paste("3.2_KLRG1_GITR"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[11]); abline(h=gthresi[15]);
plotDens(F3.3, c(12,14), main=paste("3.3_CD8a_CD4"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[12]); abline(h=gthresi[14]);
plotDens(F3.31, c(9,15), main=paste("3.31_CD25_GITR"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[9]); abline(h=gthresi[15]);
plotDens(F3.311, c(8,7), main=paste("3.311_CD62L_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[8]); abline(h=gthresi[7]);
plotDens(F3.311, c(11,7), main=paste("3.311_KLRG1_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[11]); abline(h=gthresi[7]);
plotDens(F3.312, c(8,7), main=paste("3.312_CD62L_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[8]); abline(h=gthresi[7]);
plotDens(F3.312, c(11,7), main=paste("3.312_KLRG1_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[11]); abline(h=gthresi[7]);
plotDens(F3.32, c(8,7), main=paste("3.32_CD62L_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[8]); abline(h=gthresi[7]);
plotDens(F3.32, c(11,7), main=paste("3.32_KLRG1_CD44"), cex.lab = 2, cex.axis = 2, cex.main=2, devn=FALSE); abline(v=gthresi[11]); abline(h=gthresi[7]);
dev.off()
}, error = function(err) {
cat(" Error in plotting\n")
})
}
flowtypeDF <- function(f, PropMarkers=7:16, Thresholds) {
# FP-Growth
#
# Input: A database DB, represented by FP-tree constructed according to Algorithm 1
# Output: The complete set of frequent patterns.
# Method: call FP-growth(FP-tree, null).
# Procedure FP-growth(Tree, a) {
# (01) if Tree contains a single prefix path then // Mining single prefix-path FP-tree {
# (02) let P be the single prefix-path part of Tree;
# (03) let Q be the multipath part with the top branching node replaced by a null root;
# (04) for each combination (denoted as ??) of the nodes in the path P do
# (05) generate pattern ?? ?? a with support = minimum support of nodes in ??;
# (06) let freq pattern set(P) be the set of patterns so generated;
# }
# (07) else let Q be Tree;
# (08) for each item ai in Q do { // Mining multipath FP-tree
# (09) generate pattern ?? = ai ?? a with support = ai .support;
# (10) construct ????s conditional pattern-base and then ????s conditional FP-tree Tree ??;
# (11) if Tree ?? ?? ?? then
# (12) call FP-growth(Tree ?? , ??);
# (13) let freq pattern set(Q) be the set of patterns so generated;
# }
# (14) return(freq pattern set(P) ?? freq pattern set(Q) ?? (freq pattern set(P) ?? freq pattern set(Q)))
# }
# FP-tree construction
# Input: A transaction database DB and a minimum support threshold ?.
# Output: FP-tree, the frequent-pattern tree of DB.
# Method: The FP-tree is constructed as follows.
# Scan the transaction database DB once. Collect F, the set of frequent items, and the support of each frequent item. Sort F in support-descending order as FList, the list of frequent items.
# Create the root of an FP-tree, T, and label it as ??null??. For each transaction Trans in DB do the following:
# Select the frequent items in Trans and sort them according to the order of FList. Let the sorted frequent-item list in Trans be [ p | P], where p is the first element and P is the remaining list. Call insert tree([ p | P], T ).
# The function insert tree([ p | P], T ) is performed as follows. If T has a child N such that N.item-name = p.item-name, then increment N ??s count by 1; else create a new node N , with its count initialized to 1, its parent link linked to T , and its node-link linked to the nodes with the same item-name via the node-link structure. If P is nonempty, call insert tree(P, N ) recursively.
# FPTree
# One root labeled as ??null?? with a set of item-prefix subtrees as children, and a frequent-item-header table (presented in the left side of Figure 1);
# Each node in the item-prefix subtree consists of three fields:
# Item-name: registers which item is represented by the node;
# Count: the number of transactions represented by the portion of the path reaching the node;
# Node-link: links to the next node in the FP-tree carrying the same item-name, or null if there is none.
# Each entry in the frequent-item-header table consists of two fields:
# Item-name: as the same to the node;
# Head of node-link: a pointer to the first node in the FP-tree carrying the item-name.
# Additionally the frequent-item-header table can have the count support for an item. The Figure 1 below show an example of a FP-tree.
}
#004_CellPopMatrix ?? Phenotypes not working----------------------------------------------------
#By Justin Meskas; modified 20151221 by Alice Yue
#3iTcell specific (MarkerNo & markers 7:16)
#Input: Number of Markers/col, path list of flowtype-ed ft, list of phenotypes, vector of genotypes corresponding to ftFrames/ftFilePath
#Output: Matrix with all cell population proporitions row(phenotypes) col(samples; col names=genotypes)
matrixCellCountF <- function(MarkersNo, ftFrames=NULL, ftFilePath=NULL, markers, Genotype=Genotype) {
matrixP <- matrix(0, nrow=calcNumPops(rep(2,MarkersNo), MarkersNo), ncol=length(ftFrames) )
#Fill Matrix by col(FCS file)
cat("reading & adding ", max(length(ftFilePath), length(ftFrames)), " flowtypes/samples: ", sep="")
for (i in 1:length(ftFrames)) { cat(" ", i)
if (!is.null(ftFrames)) { ft <- ftFrames[[as.character(i)]] }
if (!is.null(ftFilePath)) { load(ftFilePath[i]) }
matrixP[,i] <- ft@CellFreqs
}
Phenotypes <- unlist(lapply(ft@PhenoCodes, function(x){return( decodePhenotype(x, markers, ft@PartitionsPerMarker) )}))
rownames(matrixP) <- Phenotypes
colnames(matrixP) <- Genotype
# -- Merge flowType Res begin; project specific 3iTcells ------------------------------
# sub("CD44[+][+]","CD44+",(dude[which(regexec("CD44[+][+]", dude) !=-1)])[7:16])
# dude[which(regexec("CD44-", dude) !=-1)]
#
# dude[which(regexec("CD44-", dude) !=-1)][7:16]
# dude[which(regexec("CD44[+][+]", dude) !=-1)][7:16]
# setdiff(dude[which(regexec("CD44[+]", dude) !=-1)], dude[which(regexec("CD44[+][+]", dude) !=-1)])[7:16]
#
# Proportions[which(regexec("CD44-", dude) !=-1)][7:16]
# Proportions[which(regexec("CD44[+][+]", dude) !=-1)][7:16]
# setdiff(Proportions[which(regexec("CD44[+]", dude) !=-1)], Proportions[which(regexec("CD44[+][+]", dude) !=-1)])[7:16]
# -- merge flowType Res end
#remove files that look bad ------------------------------
# reducedIndices <- union(which(regexec("L00001865", FTKOFilePath) !=-1), which(regexec("L00001866", FTKOFilePath) !=-1))
# FTKOFilePath_reduced <- FTKOFilePath[-reducedIndices]
# Matrix3iTcell_reduced <- Matrix3iTcell[,-reducedIndices]
# FTKOFilePath <- FTKOFilePath[-reducedIndices]
# Matrix3iTcell <- Matrix3iTcell[,-reducedIndices]
# FTGenotype <- FTGenotype[-reducedIndices]
# FTGenotypeLong <- FTGenotypeLong[-reducedIndices]
return(matrixP)
}
#005_Barcode --------------------------------------------------------------------
#Input: Matrix of cell counts (Phenotypes (row) vs. Samples/FCS files (colNames=Genotype)),
matrixSigINPROGRESS <- function(matrixCellCount, colCombos, compareColCombos, method, #comparedCols=list of columns to compare
test="wilcox", pAdj="BH", # pValThres=.05, cellCountThres=300, method="pVal"; test=c(), pAdj=c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none")
colLabel) {
if (method=="pVal") {
#Produce Matrix of p values (Phenotypes (row) vs. Genotype (col))
matrixP <- 1
}
return (matrixP)
}
#Make sure to name your columns and rows if you are going to specify cellCount and pVal Thresholds! Function will cut matrix down
matrixPValBC <- function(matrixCellCount, colCombos, colLabel,
cellCountThres=0, pValThres=NULL, #don't set these if you want the full matrix, these trim things down, a bit faster
test="wilcox", adjust=NULL) {
#ensure all parameters exist
if ( !(test %in% c("wilcox", "ttest")) ) {
cat("specify whether you would like to do the wilcox test or the ttest")
return()
}
#start!
matrixPVal <- matrix(1, nrow=nrow(matrixCellCount), ncol=length(colLabel), dimnames=list(rownames(matrixCellCount), colLabel))
cat(paste("getting pValues of ", nrow(matrixCellCount), " possible cell populations for ", ncol(matrixPVal), " genotypes: ", sep="")) #3iTCell specific
for (j in ncol(matrixPVal):1){ cat(paste(" ", j, sep="")) #for every KO genotype (# of compares that need to be done)
for (i in nrow(matrixPVal):1) {
compare1 <- as.numeric(matrixCellCount[i, colCombos[[j]][[1]] ])
compare2 <- as.numeric(matrixCellCount[i, colCombos[[j]][[2]] ])
if (median(compare1)<cellCountThres & median(compare2)<cellCountThres) { #TRIM: if both WT and KO medians (to avoid outler influence) is < cell count threshold then change is not significant (set as 1)
matrixPVal[i,j] <- 1
} else {
if (test=="wilcox") matrixPVal[i,j] <- wilcox.test(compare1, compare2)$p.value
else if (test=="ttest") try( matrixPVal[i,j] <- t.test(compare1, compare2)$p.value )
}
}
}
matrixPVal[is.nan(matrixPVal)] <- 1
if (!is.null(pValThres)) { #TRIM: remove rows and columns with only insignificant p values
matrixPVal <- matrixPValTrim(matrixPVal, pValThres)
# if (barcode==TRUE) {
# matrixBC <- matrixBC[rownames(matrixBC)%in%rownames(matrixPVal), colnames(matrixBC)%in%colnames(matrixPVal)]
# matrixBC[matrixPVal>=pValThres] <- 0
# }
}
if (!is.null(adjust)) {
matrixPValAdj <- NULL
if (adjust=="BH") {
for (i in 1:ncol(matrixPVal)) {
matrixPValAdj <- cbind(matrixPValAdj, p.adjust(matrixPVal[,i], method= "BH"))
}
} else if (adjust=="bon") {
for (i in 1:ncol(matrixPVal)) {
matrixPValAdj <- cbind(matrixPValAdj, p.adjust(matrixPVal[,i], method= "bonferroni"))
}
}
return(list(matrixPVal, matrixPValAdj))
}
return(matrixPVal)
}
#Remove rows and columns with only insignificant p values
matrixTrim <- function(m, thres=.05, trimGreater=T) {
if (trimGreater) {
trimRowIndex <- which(apply(m[,-1], 1, function(x) all(x>=thres))==T)
trimColIndex <- which(apply(m[-1,], 2, function(x) all(x>=thres))==T)
} else {
trimRowIndex <- which(apply(m[,-1], 1, function(x) all(x<=thres))==T)
trimColIndex <- which(apply(m[-1,], 2, function(x) all(x<=thres))==T)
}
if (length(trimRowIndex)>0) m <- m[-trimRowIndex,]
if (length(trimColIndex)>0) m <- m[,-trimColIndex]
return(m)
}
#given matrix, outputs matrix that means/median columns with same column name
matrixM <- function(matrixCellCount, matrixType="mean", densn=1e5) {
uniqueCol <- unique(colnames(matrixCellCount))
matrixM <- matrix(0, nrow=nrow(matrixCellCount), ncol=length(uniqueCol), dimnames=list(rownames(matrixCellCount), uniqueCol))
if (matrixType=="mean") {
require(Matrix)
for(j in length(uniqueCol):1) { cat(" ", j)
#del outliers/row
matrixM[,j] <- rowMeans(matrixCellCount[,which(colnames(matrixCellCount)==uniqueCol[j])]) }
} else if(matrixType=="med") {
for(j in length(uniqueCol):1) { cat(" ", j)
matrixM[,j] <- apply(matrixCellCount[,which(colnames(matrixCellCount)==uniqueCol[j])], 1, function(x) median(x)) }
} else if(matrixType=="density") {
for(j in length(uniqueCol):1) { cat(" ", j)
matrixM[,j] <- apply(matrixCellCount[,which(colnames(matrixCellCount)==uniqueCol[j])], 1, function(x) which.max(density(x, n=densn)$y)) }
}
return(matrixM)
}
matrixMDiff <- function(matrixM) {
M <- do.call(cbind, lapply(split(matrixM, rep(1:ncol(matrixM), each = nrow(matrixM))), function(x) return(x-matrixM[,1])) ) [,-1]
colnames(M) <- colnames(matrixM[2,ncol(matrixM)])
return(M)
}
#nodeInfo variable must be a global variable! k=root phenocode
brokenLatticeAdjList <- function(k) {
kIndex <- which(nodeInfo$phenoCode==k)
childIndex <- grep(glob2rx(gsub("0", "?", k)), nodeInfo$phenoCode)
if (length(childIndex)==0) { edgeList <- c() } #stop condition
else { #if we're not at leaf, keep calling
child <- nodeInfo$phenoCode[childIndex] #source/child phenotype index
edgeList <- data.frame(child)
edgeList$parent <- rep(k, nrow(edgeList))
if (nodeInfo$phenoLevel[kIndex]<(max(nodeInfo$phenoLevel)-1)) {
for(l in intersect(edgeList[,1], phenoMeta$phenoCode[which(nodeInfo$phenoLevel==nodeInfo$phenoLevel[kIndex]+1)]) ) { # call all direct child and get their adjacency lists
edgeListChild <- brokenLatticeAdjList(l)
if (length(edgeListChild)>0) {
commonChild <- intersect(edgeList[,1], edgeListChild[,1])
if (length(commonChild>0)) { edgeList[-which(edgeList[,1]%in%commonChild==TRUE),] } #if direct child has same source, delete from current nodes' source
edgeList <- rbind(edgeList, edgeListChild) #return list
}
}
}
}
return(edgeList) #return list
}
#Input: Reference phenocode & its markers; phenocode and markers that need to be changed to reference
#Output: phenoMatch order (p2[phenoMatch]) & markerMatch (m2[markerMatch])
phenoCC <- function(p1,m1,p2,m2) {
markerMatch <- match(m1, m2)
m2new <- m2[markerMatch] #CD44&CD45 different (10), will put in same position here # match(markeri, markers) #index of marker[[i]] in markers e.g. c(1,2,5,7)
m2NA <- which((m2%in%m1)==F)
m2newAll <- append(m2new, m2[m2NA])
# in p2, make NA phenocodes not in p1 & Reorder those in p1
if (length(m2NA)>0) {
p2excludedi <- NULL
for (i in 1:length(m2NA)) { p2excludedi <- append(p2excludedi, which( substr( p2, start=m2NA[i], stop=m2NA[i] ) !="0")) }
p2excludedi <- unique(p2excludedi)
p2temp <- p2
p2temp[p2excludedi] <- NA
p2includedi <- c(1:length(p2))[-p2excludedi]
} else {
p2temp <- p2
p2includedi <- c(1:length(p2))
}
for (i in p2includedi) {
pc2new <- strsplit(p2[i],"")[[1]][markerMatch]
pc2new[which(is.na(pc2new))] <- "0"
p2temp[i] <- paste(pc2new, collapse="")
}
phenoMatch <- match(p1, p2) #p1 <- p2[phenoMatch]
return(list(phenoMatch, markerMatch))
}
#006_RchyOptymix ??Change for more tests ---------------------------------------
#Input: Matrix with actual cell count (row(phenotypes/cell pops) vs col(samples/FCS)), Genotypes vector, unique Genotypes vector that should be tested (optional)
#Output: Matrix with p values of row(phenotypes), col(genotypes - combo samples)
matrixPValFull <- function(matrixPR, Genotype, compareCol, test="wilcox") {
#ensure all parameters exist
if ((test!="wilcox")&(test!="ttest")) {
cat("please specify test = as either wilcox or ttest")
return()
}
WTIndex <- which(is.na(match(Genotype,uniqueWTGT))==FALSE)
#start
Phenotypes <- rownames(matrixPR)
matrixPVal <- matrix(NA, nrow=nrow(matrixPR), ncol=length(compareCol), dimnames=list(Phenotypes, uniqueKOGT)) #get rid of uniqueKOGT
matrixPVal[1,] <- rep(1, length(compareCol))
cat(paste("getting pValues of ", nrow(matrixPR), " possible cell populations for ", length(uniqueKOGT), " genotypes which contain 3+ samples: ", sep="")) #3iTCell specific
for (i in 1:length(compareCol)){ #for every KO genotype (# of compares that need to be done)
cat(paste(" ", i, sep=""))
if (test=="wilcox") {
for (j in 2:nrow(matrixPR)){ #2 because first row is just total cell count; for every cell population #High p values = likely no difference
matrixPVal[j,i] <- wilcox.test(as.numeric(matrixPR[j, compareCol[[i]][[1]] ]), as.numeric(matrixPR[j, compareCol[[i]][[2]] ])) $p.value
}
} else if (test=="ttest") {
for (j in 2:nrow(matrixPR)){
try( matrixPVal[j,i] <- t.test(matrixPR[j, WTIndex], matrixPR[j, iKOIndex])$p.value )
}
}
}
matrixPVal[is.nan(matrixPVal)] <- 1
return(matrixPVal)
}
#Output: Unique values of vec that appears >=threshold times
frequentValues <- function(vec, threshold, display=FALSE) {
for (i in 1:(threshold-1)) {
vec <- vec[duplicated(vec)]
if (display==TRUE) { cat(length(unique(vec)), "phenotypes significant in ", i+1, " KO genotypes\n", sep="") }
}
return(vec)
}
#Plots RchyOptimyx; 3iTcell specific Marker Names
plotRchyPDF <- function(path, markersName=NULL, ylab='-log10(Pvalue)', maxScore=max(-log10(pVal)), pVal=NULL, pheno.codes, phenotypeScores=-log10(pVal), phenotypeScoresPlot=phenotypeScores, startPhenotype, pathCount=1, trimPaths=FALSE, trim.level=length(markersName)) {
if (is.null(markerNames)) { markersName= c("CD44", "CD62L", "CD25", "TCRb", "KLRG1", "CD8a", "CD161", "CD4", "GITR", "TCRd") }
res <- RchyOptimyx(pheno.codes, phenotypeScores, startPhenotype[1], pathCount, trimPaths, trim.level)
if (length(startPhenotype)>1) {
for (j in 2:length(startPhenotype)){
res <- merge(res, RchyOptimyx(pheno.codes, phenotypeScores, startPhenotype[j], pathCount, trimPaths, trim.level))
}
}
if (maxScore=="repeat") {
maxScoreIndices <- NULL
for (j in 1:length(res@nodes[1,])){
if (length(which(pheno.codes==res@nodes[1,j])) == 0){
maxScoreIndices[j] <- maxScoreIndices[j-1] #same as prev
} else { maxScoreIndices[j] <- which(pheno.codes==res@nodes[1,j]) }
}
maxScore <- c(0,max(-log10(pVals[maxScoreIndices])))
}
pdf(file=path, height=18, width=28)
plot(res, phenotypeScoresPlot, phenotypeCodes=pheno.codes, markersName, ylab, maxScore)
dev.off();
}
#007_FPM ----------------------------------------------------
#Input: 2 rows to compare, distance measure
#Output: Ranking of farthes to nearest features
featDist <- function(m, dis) {
require(vegan)
m[1,which(m[1,]==0 & m[2,]!=0)] <- 1
m[2,which(m[1,]!=0 & m[2,]==0)] <- 1
return(unlist( lapply(c(1:ncol(m)), function(x) { return(vegdist(as.matrix(m[,x]), method=dis))}) ))
}
featDistList <- function(m1, m2=NULL, dis) {
d <- NULL
if (is.null(m2)) {
for (i in 1:nrow(m1)) {
for (j in (i+1):nrow(m1)) {
d[[i]][[j]] <- featDist(m1[c(i,j),],dis)
}
}
} else {
for (i in 1:nrow(m1)) {
for (j in 1:nrow(m2)) {
d[[i]][[j]] <- featDist(rbind(m1[i,],m2[j,]),dis)
}
}
}
return(d)
}
createTransactionMatrix <- function(phenoCode, markers) {
m1 <- matrix(0,nrow=length(phenoCode),ncol=length(markers))
m2 <- matrix(0,nrow=length(phenoCode),ncol=length(markers))
for (i in 1:length(phenoCode)) {
pc <- as.numeric(strsplit(as.character(phenoCode[i]),"")[[1]])
pc1 <- pc2 <- pc
pc1[which(pc1==2)] <- 0
pc2[which(pc2==1)] <- 0
pc2[which(pc2==2)] <- 1
m1[i,] <- pc1
m2[i,] <- pc2
}
colnames(m1) <- paste0(markers,"-",sep="")
colnames(m2) <- paste0(markers,"+",sep="")
return(cbind(m1,m2))
}
getGTindex <- function(attlist,control,sampleCountThres,explist=NULL) {
if (is.null(explist)) explist <- attlist
if (length(control)==1) {
controlIndex <- grep(control, attlist)
} else {
controlIndex <- which(attlist%in%control)
}
exp <- unique(explist[-controlIndex]) #Unique KO Genotypes
expIndex <- list() #Index of unique KO Genotypes (one genotype per vector in the list)
for (i in length(exp):1) { expIndex[[i]] <- which(explist==exp[i]) }
goodexpIndex <- which( unlist(lapply(expIndex, length)) >=sampleCountThres ) #Index of unique KO genotypes that has 3+ samples available expIndex(expIndexIndex)
return(list(attlist=attlist, control=control, controlIndex=controlIndex, exp=exp, expIndex=expIndex, goodexpIndex=goodexpIndex))
}
# from https://aurelienmadouasse.wordpress.com/2012/01/13/legend-for-a-continuous-color-scale-in-r/
legend.col <- function(col, lev){
opar <- par
n <- length(col)
bx <- par("usr")
box.cx <- c(bx[2] + (bx[2] - bx[1]) / 1000, bx[2] + (bx[2] - bx[1]) / 1000 + (bx[2] - bx[1]) / 50)
box.cy <- c(bx[3], bx[3])
box.sy <- (bx[4] - bx[3]) / n
xx <- rep(box.cx, each = 2)
par(xpd = TRUE)
for(i in 1:n){
yy <- c(box.cy[1] + (box.sy * (i - 1)),
box.cy[1] + (box.sy * (i)),
box.cy[1] + (box.sy * (i)),
box.cy[1] + (box.sy * (i - 1)))
polygon(xx, yy, col = col[i], border = col[i])
}
par(new = TRUE)
plot(0, 0, type = "n",
ylim = c(min(lev), max(lev)),
yaxt = "n", ylab = "",
xaxt = "n", xlab = "",
frame.plot = FALSE)
axis(side = 4, las = 2, tick = FALSE, line = .25)
par <- opar
}
# http://stackoverflow.com/questions/4787332/how-to-remove-outliers-from-a-dataset
remove_outliers <- function(x, na.rm = TRUE, ...) {
qnt <- quantile(x, probs=c(.25, .75), na.rm = na.rm, ...)
H <- 1.5 * IQR(x, na.rm = na.rm)
y <- x
y[x < (qnt[1] - H)] <- NA
y[x > (qnt[2] + H)] <- NA
y
}
# Gating Strategy populations --------------------------------------------
#SangerTCellSPLEEN
getGSsanger <- function() {
GatingStrategyPop <- NULL
GatingStrategyPop[3] <- (("") )
GatingStrategyPop[4] <- (("TCRb-CD8a-CD161+TCRd-") )
GatingStrategyPop[5] <- (("CD44+CD62L-TCRb-CD8a-CD161+TCRd-") )
GatingStrategyPop[6] <- (("CD44+CD62L+TCRb-CD8a-CD161+TCRd-") )
GatingStrategyPop[7] <- (("CD44+TCRb-KLRG1+CD8a-CD161+TCRd-") )
GatingStrategyPop[9] <- (("TCRb-KLRG1+GITR-TCRd+") )
GatingStrategyPop[10] <- (("CD44+CD62L-TCRb-TCRd+") )
GatingStrategyPop[11] <- (("CD44+CD62L+TCRb-TCRd+") )
GatingStrategyPop[12] <- (("TCRb+CD161+CD4-TCRd-") )
GatingStrategyPop[14] <- (("CD44+CD62L-TCRb+CD161+CD4-TCRd-") )
GatingStrategyPop[15] <- (("CD44+CD62L+TCRb+CD161+CD4-TCRd-") )
GatingStrategyPop[16] <- (("TCRb+KLRG1+CD161+CD4-TCRd-") )
GatingStrategyPop[13] <- (("TCRb+CD161+CD4+TCRd-") )
GatingStrategyPop[17] <- (("CD44+CD62L-TCRb+CD161+CD4+TCRd-") )
GatingStrategyPop[18] <- (("CD44+CD62L+TCRb+CD161+CD4+TCRd-") )
GatingStrategyPop[19] <- (("TCRb+KLRG1+CD161+CD4+TCRd-") )
GatingStrategyPop[24] <- (("TCRb+CD8a-CD161-CD4+TCRd-") )
GatingStrategyPop[20] <- (("CD25+TCRb+CD8a-CD161-CD4+GITR+TCRd-") )
GatingStrategyPop[21] <- (("CD44+CD62L-CD25+TCRb+CD8a-CD161-CD4+GITR+TCRd-") )
GatingStrategyPop[22] <- (("CD44+CD62L+CD25+TCRb+CD8a-CD161-CD4+GITR+TCRd-") )
GatingStrategyPop[23] <- (("CD44+CD25+TCRb+KLRG1+CD8a-CD161-CD4+GITR+TCRd-") )
GatingStrategyPop[26] <- (("CD25-TCRb+CD8a-CD161-CD4+TCRd-") )
GatingStrategyPop[27] <- (("CD44+CD62L-CD25-TCRb+CD8a-CD161-CD4+TCRd-") )
GatingStrategyPop[28] <- (("CD62L+CD25-TCRb+CD8a-CD161-CD4+TCRd-") )
GatingStrategyPop[29] <- (("CD44+CD25-TCRb+KLRG1+CD8a-CD161-CD4+TCRd-") )
GatingStrategyPop[25] <- (("TCRb+CD8a+CD161-CD4-TCRd-") )
GatingStrategyPop[30] <- (("CD44+CD62L-TCRb+CD8a+CD161-CD4-TCRd-") )
GatingStrategyPop[31] <- (("CD44+CD62L+TCRb+CD8a+CD161-CD4-TCRd-") )
GatingStrategyPop[32] <- (("CD44+TCRb+KLRG1+CD8a+CD161-CD4-TCRd-") )
GatingStrategyPop[34] <- (("TCRb+CD161+TCRd-") )
GatingStrategyPop[35] <- (("TCRb-KLRG1-GITR+TCRd+") )
GatingStrategyPop[36] <- (("CD44-CD62L+TCRb-TCRd+") )
GatingStrategyPop[38] <- (("CD44-CD62L+TCRb+CD8a+CD161-CD4-TCRd-") )
GatingStrategyPop[39] <- (("TCRb+CD161-TCRd-") )
GatingStrategyPop <- GatingStrategyPop[-which(is.na(GatingStrategyPop))]
GatingStrategyPop <- gsub("TCRb[-+]","",GatingStrategyPop)
GatingStrategyPop <- gsub("CD8a","CD8",GatingStrategyPop)
GatingStrategyPop <- gsub("KLRG1","KLRG",GatingStrategyPop)
return (GatingStrategyPop)
}
getPhenIndex <- function(phenName, phenoMeta, markers) {
phenIndex <- which(phenoMeta$phenotype==phenName)
if (length(phenIndex)==0) {
phenNameAlph <- strsplit(phenName,"[-+]")[[1]]
phenNameEtc <- strsplit(phenName,"[0-9A-z]")[[1]]
phenNameEtc <- phenNameEtc[which(phenNameEtc!="")]
orders <- NULL
for (i in 1:length(phenNameAlph)) {
orders[i] <- which(markers==phenNameAlph[i])
}
orders <- order(orders)
phenName2 <- NULL
for (i in 1:length(orders)) {
phenName2 <- paste(phenName2, phenNameAlph[orders[i]], phenNameEtc[orders[i]], sep="")
}
phenIndex <- which(phenoMeta$phenotype==phenName2)
}
return(phenIndex)
}
sortByDate <- function(phenData) {
require(lubridate)
phenData$date <- ymd(phenData$date)
barcodes <- str_extract(phenData$fileName,"L[0-9]+")
barcodes <- as.numeric(str_extract( barcodes, "[0-9]+" ))
timeOrder <- order(phenData$date,barcodes)
all(order(barcodes)==c(1:length(barcodes)))
phenData <- phenData[timeOrder,]
}
#008_Dist_plot --------------------------------------------------
plotTsne = function(x, continuous=F, main, colBarWidth=.08, colBarTitle="", leg=T, text=F) {
require(lubridate)
x1 = rownames(x)
if (is.Date(x1) | is.timepoint(x1)) {
x1 = as.numeric(x1-min(x1))
}
x = x[order(x1),]
x1 = sort(x1)
if (continuous) {
ts = as.numeric(factor(x1))
colour = heat.colors(length(unique(ts)))
plot(x, col=colour[ts], pch=20, main=main)
legend.col(col = colour, lev = ts)
} else {
c1 = 7
if (length(unique(x1))/7>25) {
c1 = ceiling(length(unique(x1))/25)
}
colour = rainbow(c1)
cp = expand.grid(c(1:c1),c(20,1:19,21:25))
plot(x, t='n', main=main)
if (text) {
text(x,rownames(x))
} else {
for (g in 1:length(unique(x1))) {
points(x[which(x1%in%unique(x1)[g]),], col=colour[cp[g%%nrow(cp),1]], pch=cp[g%%nrow(cp),2])
}
}
if (leg) legend("topleft", legend=unique(x1), fill=colour[rep(c(1:c1),g)], pch=rep(c(20,1:19,21:25),each=c1)[c(1:g)])
}
}
|
973d803a2bd017eccef9f7abf36b7b9e013997b9
|
0e91d04155de7807539d287d1cdbde3f304bb470
|
/plot4.R
|
7031ffd0a138208b8927a2bdcf9b9719be7775f3
|
[] |
no_license
|
Fall-From-Grace/ExData_Plotting1
|
a5ac2b51a24256d26d666e2ef6cd755a70d12bc9
|
f3a12980883d1c8ece229f2c99d75f5556cdb5d2
|
refs/heads/master
| 2020-12-28T20:19:28.637092
| 2014-05-10T16:23:28
| 2014-05-10T16:23:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,594
|
r
|
plot4.R
|
# Author: Fall-From-Grace
# Date: 10 May 2014
# Exploratory Data Analysis
#read and subset data
powerData <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
powerDataFeb <- subset(powerData, powerData$Date == "1/2/2007" | powerData$Date == "2/2/2007")
#make the graphs
png(filename = "plot4.png", height=480, width=480)
par(mfrow=c(2,2))
#Top left
plot(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Global_active_power), type="l", ylab = "Global Active Power", xlab = "")
#Top right
plot(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Voltage), type="l", ylab = "Voltage", xlab = "datetime")
#Bottom left
plot(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Sub_metering_1), type="l", ylab = "Energy sub metering", xlab = "")
lines(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Sub_metering_2), type="l", col = "red")
lines(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Sub_metering_3), type="l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd = c(2.5, 2.5, 2.5), col = c("black", "red", "blue"), bty="n")
#Bottom right
plot(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Global_reactive_power), type="l", ylab = "Global_reactive_power", xlab = "datetime")
dev.off()
|
40f3c2677360f9abcf5d0aa23a706d64fcc1c822
|
98dc55859093633e1ad5aaed6a16d8513ce0aa80
|
/fixing_Biolsums_RS.R
|
47d8860af5c17e02b9aed4b32e84f1454f189b78
|
[] |
no_license
|
reidsteele2/Fixed-Stations-BioChem-Reload
|
7ca1fbf637f92802a22f9c4c5ba4134635291d5f
|
912931734402745c8fa4c09136162b27cefcb65b
|
refs/heads/master
| 2022-12-03T16:31:51.005131
| 2020-08-19T13:09:49
| 2020-08-19T13:09:49
| 288,531,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,444
|
r
|
fixing_Biolsums_RS.R
|
# fixing the biolsums
# created by Claudette Landry for the Biochem reboot project
# July 2019
# Edited by Reid Steele, August 11 2020
# Contains code to write to the shared drive or a local directory
# set the working directory
wd="C:/Users/steeler/Documents/Reid/BCD Creation/Fixed Station Biochem Reload/convertingtoBCDBCS/fixing biolsums"
setwd(wd)
# required packages and functions
require(zoo)
require(openxlsx)
require(tools)
require(janitor)
source('format_time_CL.R')
# set the file path for the original Biolsums
fp="../original Biolsums"
files=list.files(fp)
# Load in paths
paths=read.csv("../FS BCD BCS/fixed_station_files_RS.csv", stringsAsFactors=FALSE)
# Fix paths to work with shared drive
paths$path = gsub('//ent.dfo-mpo.ca/ATLShares', 'R:/', paths$path)
# read in and edit the biolsum
for (k in 1:length(files)) {
BiolSum=file.path(fp,files[k])
Biol <- read.xlsx(BiolSum,sheet = 'BIOLSUMS_FOR_RELOAD')
# this checks if the headers are correct or sometimes there are lines of metadata present
# and the headers are not in the first row of the file. This ensure the correct headers
if('sdate' %in% names(Biol)==F){
start <- which(Biol[1]=='sdate')+1
Biol <- read.xlsx(BiolSum,sheet = 'BIOLSUMS_FOR_RELOAD',startRow = start)
}
Biol$sdate<-as.numeric(Biol$sdate)
Biol$sdate <- excel_numeric_to_date(Biol$sdate)
# removes all rows without bottle id which gets rid of the repeating depth rows
rem <- which(is.na(Biol$id))
if(length(rem)>0){
Biol <- Biol[-rem,]
}
# fills out the date, time and vessel columns using the most recent date or time in the column
Biol$sdate <- na.locf(Biol$sdate)
if(is.null(Biol$stime)==F){
Biol$stime <- na.locf(Biol$stime)
if((class(Biol$stime)=='numeric')==F){
Biol$stime <- as.numeric(Biol$stime)
}
Biol$stime <- format_time(Biol$stime)
}
if(is.null(Biol$vessel)==F){
Biol$vessel <- na.locf(Biol$vessel)
}
Biol$sdate <- format(Biol$sdate,format='%d-%b-%Y')
print(head(Biol[c(1,2,3,4)]))
g <- paste0('../corrected biolsums/',substr(files[k],start = 1,stop = nchar(files[k])-5),'_edited.csv')
# Local Write
# write.csv(Biol,file=g,row.names = F)
# Shared write
write.csv(Biol,file=paste0(paths$path[k], '/', substr(files[k],start = 1,stop = nchar(files[k])-5), '_edited.csv'),row.names = F)
}
rm(list=ls())
|
d4b6c7dc4655db2309ed1fd490c133799bc14873
|
ed640b2eab34ddbde1435b83aa29d49d2c01422d
|
/man/spearmanRho.Rd
|
4a51843938a18e0189b6c455b4de2d072e6844e2
|
[] |
no_license
|
cran/rcompanion
|
4cf285cf6d43197e55df85de86d23904f9418c37
|
dea4b790b5d78fe350ff303e5c04603c7e672ae1
|
refs/heads/master
| 2023-05-12T14:48:28.937161
| 2023-05-05T07:20:05
| 2023-05-05T07:20:05
| 67,362,460
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,841
|
rd
|
spearmanRho.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spearmanRho.r
\name{spearmanRho}
\alias{spearmanRho}
\title{Spearman's rho, Kendall's tau, Pearson's r}
\usage{
spearmanRho(
formula = NULL,
data = NULL,
x = NULL,
y = NULL,
method = "spearman",
ci = FALSE,
conf = 0.95,
type = "perc",
R = 1000,
histogram = FALSE,
digits = 3,
reportIncomplete = FALSE,
...
)
}
\arguments{
\item{formula}{A formula indicating the two paired variables,
e.g. \code{~ x + y}. The variables should be
vectors of the same length.}
\item{data}{The data frame to use.}
\item{x}{If no formula is given, the values for one variable.}
\item{y}{The values for the other variable.}
\item{method}{One of "spearman", "kendall", or "pearson".
Passed to \code{cor}.}
\item{ci}{If \code{TRUE}, returns confidence intervals by bootstrap.
May be slow.}
\item{conf}{The level for the confidence interval.}
\item{type}{The type of confidence interval to use.
Can be any of "\code{norm}", "\code{basic}",
"\code{perc}", or "\code{bca}".
Passed to \code{boot.ci}.}
\item{R}{The number of replications to use for bootstrap.}
\item{histogram}{If \code{TRUE}, produces a histogram of bootstrapped values.}
\item{digits}{The number of significant digits in the output.}
\item{reportIncomplete}{If \code{FALSE} (the default),
\code{NA} will be reported in cases where there
are instances of the calculation of the statistic
failing during the bootstrap procedure.}
\item{...}{Additional arguments passed to the \code{cor} function.}
}
\value{
A single statistic, rho, tau, or r.
Or a small data frame consisting of rho, tau, or r,
and the lower and upper confidence limits.
}
\description{
Calculates Spearmans's rho, Kendall's tau, or Pearson's r
with confidence intervals by bootstrap
}
\details{
This function is a wrapper for \code{stats::cor}
with the addition of confidence intervals.
The input should include either \code{formula} and \code{data};
or \code{x}, and \code{y}.
Currently, the function makes no provisions for \code{NA}
values in the data. It is recommended that \code{NA}s be removed
beforehand.
When the returned statistic is close to -1 or close to 1,
or with small sample size,
the confidence intervals
determined by this
method may not be reliable, or the procedure may fail.
}
\examples{
data(Catbus)
spearmanRho( ~ Steps + Rating, data=Catbus)
}
\references{
\url{http://rcompanion.org/handbook/I_10.html}
}
\author{
Salvatore Mangiafico, \email{mangiafico@njaes.rutgers.edu}
}
\concept{Kendall's tau}
\concept{Pearson's r}
\concept{Spearman's rho}
\concept{confidence interval}
\concept{correlation}
\concept{effect size}
|
9e82e390bcd30ba2386e8f70e3ff87f279eaab27
|
3e862d725839f36810542aa1996a81fa8b64e789
|
/analysis/utils/test/fu_vax_detect_issue.R
|
71908b3144fbec367bc768d8deda6d14c4bf6474
|
[
"MIT"
] |
permissive
|
opensafely/covid_mortality_over_time
|
7ca3bd46d1a55521958f75796d50b13ed729b326
|
775d322511d818ad881e82b31ad50981f7acb783
|
refs/heads/main
| 2023-08-23T10:42:16.774354
| 2023-05-16T19:19:59
| 2023-05-16T19:19:59
| 454,708,317
| 2
| 0
|
MIT
| 2023-05-11T14:25:31
| 2022-02-02T09:16:07
|
R
|
UTF-8
|
R
| false
| false
| 1,480
|
r
|
fu_vax_detect_issue.R
|
# Load libraries & custom functions ---
library(here)
library(dplyr)
library(readr)
library(purrr)
library(stringr)
utils_dir <- here("analysis", "utils")
source(paste0(utils_dir, "/extract_data.R")) # function extract_data()
source(paste0(utils_dir, "/add_kidney_vars_to_data.R")) # function add_kidney_vars_to_data()
source(paste0(utils_dir, "/process_data.R")) # function process_data()
# Load json config for dates of waves
config <- fromJSON(here("analysis", "config.json"))
# Import data extracts of waves ---
args <- commandArgs(trailingOnly=TRUE)
if(length(args)==0){
# use for interactive testing
wave <- "wave1"
} else {
wave <- args[[1]]
}
# Load data ---
## Search input files by globbing
input_files <-
Sys.glob(here("output", "processed", "input_wave*.rds"))
# vector with waves
input_file_wave <- input_files[str_detect(input_files, wave)]
# select people with negative fu
data <-
read_rds(input_file_wave)
data <-
data %>%
filter(fu_vax_0 < 0 |
fu_vax_1 < 0 |
fu_vax_2 < 0 |
fu_vax_3 < 0 |
fu_vax_4 < 0 |
fu_vax_6 < 0) %>%
select(starts_with("fu_vax"),
starts_with("start_vax_dose"),
start_date_wave,
fu)
data %>% nrow() %>% print()
fs::dir_create(here("output", "data_properties", "detect_issues"))
data <-
data[sample(1:nrow(data), size = ifelse(nrow(data) > 5, 5, 0)), ] %>%
write_csv(here("output", "data_properties", "detect_issues", "fu_neg.csv"))
|
c14ad4e41faa58d5dc79cc73253308a50568adb0
|
31fd055d1d7ed6bd73e85a828aa219cce9fd2f13
|
/man/DissRef.Rd
|
916cf81fbf44c5bb07f257c56e6ff5c9c2f350ce
|
[] |
no_license
|
RenaudJau/Renaudpack2
|
c86fa17b146ae32e5f0024de6b914d9f587235ef
|
cd2046e287e0aa8bc0fa4f5a01aeddb24fd14f8f
|
refs/heads/master
| 2021-01-25T12:19:34.854279
| 2020-12-07T10:19:37
| 2020-12-07T10:19:37
| 83,016,087
| 0
| 0
| null | 2017-02-24T08:02:08
| 2017-02-24T07:52:46
| null |
UTF-8
|
R
| false
| true
| 1,022
|
rd
|
DissRef.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ScriptFonctionsSourcesVpack.r
\name{DissRef}
\alias{DissRef}
\title{Dissimilarite a une reference}
\usage{
DissRef(RELEVES, ETENDUEREL, ETENDUEREF, method, binary = T)
}
\arguments{
\item{RELEVES}{tableau de releves especes en colonnes, releves en lignes}
\item{ETENDUEREL}{etendues des releves sur lesquels le calcul sera fait (exemple 1:9 OU 3:11)}
\item{ETENDUEREF}{etendues des releves de reference (exemple 1:3)}
\item{method}{methode a utiliser cf l'aide de \code{\link{vegdist}}}
\item{binary}{T ou F selon que l'on travaille en presence absence ou non (T par defaut)}
}
\value{
Renvoie une liste de valeurs, une par releves chaque valeur correspond a la moyenne des indices qui ont ete calcules entre ce releves et tous les releves de references
}
\description{
Calcul de moyennes d'indices de similarite/dissimilarite entre des releves et plusieurs releves de references
}
\seealso{
\code{\link{vegdist}} \code{\link{raup.calc}}
}
|
6a04d698459d38b5323f5878589bffd8dbeb37b7
|
f08d0da8ea26baf502fdc7f61e53d8c8397bf29b
|
/R/hdi.R
|
cf9d2a4d8fa8d034929f29bbbc78a4418005577d
|
[] |
no_license
|
CalvinData/CalvinBayes
|
0866251b082f90a5fbbc67a9f3e020ececde4c93
|
dbae67a3c2032871fd6b65c538480ad5fee7ab7b
|
refs/heads/master
| 2023-04-09T02:12:08.502233
| 2021-04-08T21:37:31
| 2021-04-08T21:37:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,481
|
r
|
hdi.R
|
#' Compute highest density intervals
#'
#' Compute highest density intervals
#'
#' @rdname hdi
#' @export
hdi <- function(object, prob = 0.95, pars = NULL, regex_pars, ...) {
UseMethod("hdi")
}
#' @param object an object describing a posterior distribution
#' @param prob the desired mass of the HDI region.
#' @param pars a vector of parameter names
#' @param regex_pars a regular expression for selecting parameter names
#' @param ... additional arguments (ignored for now)
#' @return a data frame with 1 row per paramter and variables
#' * `lo` lower end of hdi
#' * `hi` higher end of hdi
#' * `prob` is the total probability
#' @examples
#' # Determining HDI of a beta(30,12) distribution
#' # Create a grid
#' Grid <-
#' expand.grid(x = seq(0, 1, length.out = 201)) %>%
#' mutate(posterior = dbeta(x, 30, 12))
#' hdi_from_grid(Grid, "x", prob = 0.9)
#' Grid %>% dplyr::slice_sample(n = 1E4, weight_by = posterior, replace = TRUE) %>%
#' pull(x) %>%
#' hdi(prob = 0.9)
#' x <- rnorm(25, 3, 2) # some random data
#' Grid <-
#' expand.grid(
#' mean = seq(0, 10, length.out = 101),
#' sd = seq(0, 10, length.out = 101)
#' ) %>%
#' mutate(
#' prior = 1,
#' loglik =
#' purrr::map2_dbl(mean, sd, ~ sum(dnorm(x, .x, .y, log = TRUE)), x = x),
#' likelihood = exp(loglik),
#' posterior = prior * likelihood
#' )
#' hdi_from_grid(Grid, pars = c("mean", "sd"))
#'
#' @rdname hdi
#' @importFrom coda as.mcmc HPDinterval
#' @export
hdi.default <-
function(object, prob = 0.95, pars = NULL, regex_pars = NULL, ...) {
res <- coda::HPDinterval(coda::as.mcmc(object), prob = prob)
map <- coda::HPDinterval(coda::as.mcmc(object), prob = 0.005)
if (is.list(res)) {
for (i in 1:length(res)) {
res[[i]] <-
convert_to_df(res[[i]], pars = pars, regex_pars = regex_pars,
map = map) %>%
mutate(chain = i)
}
bind_rows(res) %>%
arrange(par, chain)
} else {
convert_to_df(res, pars = pars, regex_pars = regex_pars, prob = prob,
map = map)
}
}
#' @export
hdi.data.frame <-
function(object, prob = 0.95, pars = NULL, regex_pars = NULL, ...) {
if (inherits(pars, "formula")) {
# for y ~ x use y as name and x as expression to evaluate
# for ~ x use x as both name and expression to evaluate
l <- length(pars)
name <- if(is.character(pars[[2]])) {
pars[[2]]
} else {
deparse(pars[[2]])
}
object[[name]] <- eval(pars[[l]], object, parent.frame())
pars <- tail(names(object), 1)
}
object <- object[sapply(object, is.numeric)]
hdi.default(object, prob = prob, pars = pars, regex_pars = regex_pars, ...)
}
convert_to_df <- function(object, prob = 0.95, pars = NULL, regex_pars = NULL,
map = NA, ...) {
if (is.matrix(map)) {
res <-
data.frame(
par = row.names(object),
lo = object[, 1],
hi = object[, 2],
mode = (map[, 1] + map[,2]) / 2, # average of lower and upper for narrow HDI
prob = prob
)
} else {
res <-
data.frame(
par = row.names(object),
lo = object[, 1],
hi = object[, 2],
prob = prob
)
}
if ("mode" %in% names(res) && all(is.na(res$mode))) {
res[["mode"]] <- NULL
}
row.names(res) <- NULL
if (!is.null(pars) && !is.null(regex_pars)) {
return(res %>% filter(par %in% pars | grepl(regex_pars, par)))
}
if (!is.null(pars)) {
return(res %>% filter(par %in% pars))
}
if (!is.null(regex_pars)) {
return(res %>% filter(grepl(regex_pars, par)))
}
res
}
#' @rdname hdi
#' @export
hdi_from_grid <-
function(object, pars = NULL,
prob = 0.95, posterior = "posterior") {
if (is.null(pars)) {
pars <- names(object)[1]
warning("No parameters specified. Using ", pars)
}
if (! posterior %in% names(object)) {
stop(paste0("No column named ", posterior, " in object"))
}
object$posterior <- object[[posterior]]
dplyr::bind_rows(
lapply(
pars,
function(p) {
FOO <-
object %>%
# collapse to parameter of interest
dplyr::group_by_at(vars(p)) %>%
dplyr::summarise(posterior = sum(posterior)) %>%
# standardize names
setNames(c("theta", "posterior")) %>%
# compute resolution (difference in parameter values in grid)
# for use in converting form probability to density scale at the end
mutate(resolution = mean(diff(sort(theta)))) %>%
mutate(posterior = posterior / sum(posterior)) %>%
arrange(posterior) %>% # sort by posterior
mutate(
cum_posterior = cumsum(posterior) # cumulative posterior
) %>%
filter(
cum_posterior >= 1 - prob, # keep highest cum_posterior
)
FOO %>%
summarise( # summarise what's left
param = p,
lo = min(theta),
hi = max(theta),
prob = sum(posterior),
height = min(posterior) / first(resolution), # density scale
mode_height = last(posterior) / first(resolution), # density scale
mode = last(theta),
)
}
)
)
}
|
9e9467d0a87bf2151bc21024799c51e5a906ef68
|
07e03448e11d85e71aaf90b226cf7741c1679b89
|
/Settin up 3 day weather windows.R
|
1a356c3bab5e96ae02949c8f136605d4db6d7ff0
|
[] |
no_license
|
11arc4/Weather-related-mortality-and-growth
|
53faa3f6db828971b9bb177fc26e685acf17b685
|
e13744ba9ec27cdf38695fc72a37893b09e87679
|
refs/heads/master
| 2020-03-08T10:16:37.580421
| 2018-10-30T00:25:20
| 2018-10-30T00:25:20
| 128,068,909
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,612
|
r
|
Settin up 3 day weather windows.R
|
#Set up weather data for a 3 day window prior to when we measured the nestlings
#We have picked a 3 day window based on Winkler et al.
#Load in the data
weather <- read.csv("C:/Users/11arc/Dropbox/Kennedy Everitt Honors Thesis/Daily weather data 2017.csv")
#Want only weather for days between 145 and 198
weather <- weather[weather$JulianDate>144 & weather$JulianDate<195,]
names(weather)[5:11] <- c("MaxTemp", "MinTemp", "MeanTemp", "HeatDegDays", "CoolDegDays", "TotalRain", "TotalPrecip")
#weatherVar2 <- weather[,c(5:7, 10, 12:14)]
#It's better to drop min and max windspeed so that the loadings for total rain
#is better (before it's not on the top 2 PC because there were more wind and temp variables than rain)
weatherVar2 <- weather[,c(5:7, 10, 12)]
weatherVar2 <- weatherVar2 %>% mutate(Rain= ifelse(TotalRain>0, 1, 0))
ggplot(weatherVar2, aes(x=as.factor(Rain), y=meanwindspeed))+
geom_boxplot()+
geom_point()
weather.pca <- prcomp(weatherVar2,
center=T,
scale=T)
plot(weather.pca, type="lines")
summary(weather.pca)
cor(weatherVar2)
#By using weather PCs 1 and 2 we can capture 72% of the weather variation. We will use those 2.
saveRDS(weather.pca, file="~/Masters Thesis Project/Weather determined growth and mortality paper/Weather Analysis/Weather-related-mortality-and-growth/WeatherPCA.rds")
weather$PC1 <- predict(weather.pca, weather[,c(5:7, 10, 12:14)] )[,1]
weather$PC2 <- predict(weather.pca, weather[,c(5:7, 10, 12:14)] )[,2]
#Add in 3 day window averages for temperature and windspeed, and total rainfall
dat <- read.csv("file:///C:/Users/11arc/Documents/Masters Thesis Project/Weather determined growth and mortality paper/Weather Analysis/Nestling and Weather Data.csv", as.is=T)[,-c(20:29)]
dat <- cbind(dat, matrix(nrow=nrow(dat), ncol=6, NA))
names(dat)[20:25] <- c("MaxTemp3day", "MeanTemp3day", "MeanWindspeed3day", "TotalRainFall3day", "PC13day", "PC23day")
for(i in 1:nrow(dat)){
r <- which(dat$Date[i]> weather$JulianDate & dat$Date[i]-3<=weather$JulianDate)
dat$MaxTemp3day[i] <- mean(weather$MaxTemp[r])
dat$MeanTemp3day[i] <- mean(weather$MeanTemp[r])
dat$MeanWindspeed3day[i] <- mean(weather$meanwindspeed[r])
dat$TotalRainFall3day[i] <- sum(weather$TotalRain[r])
dat$PC13day[i] <- mean(weather$PC1[r])
dat$PC23day[i] <- mean(weather$PC2[r])
}
survdat <- read.csv("C:/Users/11arc/Dropbox/Kennedy Everitt Honors Thesis/Statistical Models/Survival Models/Nestling Survival Data.csv", as.is=T)[,-c(11:20)] #already excluding AG
survdat <- cbind(survdat, matrix(nrow=nrow(survdat), ncol=6, NA))
names(survdat)[12:17] <- c("MaxTemp3day", "MeanTemp3day", "MeanWindspeed3day", "TotalRainFall3day", "PC13day", "PC23day")
for(i in 1:nrow(survdat)){
r <- which(survdat$Time2[i]> weather$JulianDate & survdat$Time2[i]-3<=weather$JulianDate)
survdat$MaxTemp3day[i] <- mean(weather$MaxTemp[r])
survdat$MeanTemp3day[i] <- mean(weather$MeanTemp[r])
survdat$MeanWindspeed3day[i] <- mean(weather$meanwindspeed[r])
survdat$TotalRainFall3day[i] <- sum(weather$TotalRain[r])
survdat$PC13day[i] <- mean(weather$PC1[r])
survdat$PC23day[i] <- mean(weather$PC2[r])
}
write.csv(dat,"file:///C:/Users/11arc/Documents/Masters Thesis Project/Weather determined growth and mortality paper/Weather Analysis/Nestling mass and Weather Data 3day.csv" , row.names = F, na="")
write.csv(survdat,"file:///C:/Users/11arc/Documents/Masters Thesis Project/Weather determined growth and mortality paper/Weather Analysis/Nestling survival and Weather Data 3day.csv" , row.names = F, na="")
|
1ea1deaad68bde14fcdf6bbe8d730f037328594e
|
ad899a8ef877d7d812156171f2b304c187d1e586
|
/vizGrimoireJS/mediawiki-analysis.R
|
aa7175f5e28881de290642e9ef49383c6077e0cf
|
[] |
no_license
|
rodrigoprimo/VizGrimoireR
|
e6c6707e209468bf0382de89be1ec22a3a531133
|
67badc39e37f8abf0e03de59329acfdcf40b9e31
|
refs/heads/master
| 2021-01-21T19:01:46.409055
| 2015-07-23T14:23:47
| 2015-07-23T14:23:47
| 39,571,080
| 0
| 0
| null | 2015-07-23T14:12:16
| 2015-07-23T14:12:15
| null |
UTF-8
|
R
| false
| false
| 4,500
|
r
|
mediawiki-analysis.R
|
## Copyright (C) 2012, 2013 Bitergia
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##
## This file is a part of the vizGrimoire.R package
##
## Authors:
## Alvaro del Castillo <acs@bitergia.com>
##
##
## Usage:
## R --vanilla --args -d dbname < mediawiki-analysis.R
library("vizgrimoire")
library("ISOweek")
options(stringsAsFactors = FALSE) # avoid merge factors for toJSON
conf <- ConfFromOptParse()
SetDBChannel (database = conf$database, user = conf$dbuser, password = conf$dbpassword)
if (conf$granularity == 'years') {
period = 'year'
nperiod = 365
} else if (conf$granularity == 'months') {
period = 'month'
nperiod = 31
} else if (conf$granularity == 'weeks') {
period = 'week'
nperiod = 7
} else if (conf$granularity == 'days'){
period = 'day'
nperiod = 1
} else {stop(paste("Incorrect period:",conf$granularity))}
# destination directory
destdir <- conf$destination
# multireport
reports=strsplit(conf$reports,",",fixed=TRUE)[[1]]
# BOTS filtered
bots = c('wikibugs','gerrit-wm','wikibugs_','wm-bot','')
#############
# STATIC DATA
#############
# Tendencies
diffsent.365 = GetMediaWikiDiffReviewsDays(period, conf$enddate, 365)
diffsenders.365 = GetMediaWikiDiffAuthorsDays(period, conf$enddate, conf$identities_db, 365)
diffsent.30 = GetMediaWikiDiffReviewsDays(period, conf$enddate, 30)
diffsenders.30 = GetMediaWikiDiffAuthorsDays(period, conf$enddate, conf$identities_db, 30)
diffsent.7 = GetMediaWikiDiffReviewsDays(period, conf$enddate, 7)
diffsenders.7 = GetMediaWikiDiffAuthorsDays(period, conf$enddate, conf$identities_db, 7)
static.data = GetStaticDataMediaWiki(period, conf$startdate, conf$enddate, conf$identities_db)
static.data = merge(static.data, diffsent.365)
static.data = merge(static.data, diffsent.30)
static.data = merge(static.data, diffsent.7)
static.data = merge(static.data, diffsenders.365)
static.data = merge(static.data, diffsenders.30)
static.data = merge(static.data, diffsenders.7)
createJSON (static.data, paste(destdir,"/mediawiki-static.json", sep=''))
###################
# EVOLUTIONARY DATA
###################
evol_data = GetEvolDataMediaWiki(period, conf$startdate, conf$enddate, conf$identities_db)
evol_data <- completePeriodIds(evol_data, conf$granularity, conf)
createJSON (evol_data, paste(destdir,"/mediawiki-evolutionary.json", sep=''))
#######
# TOPS
#######
top_authors <- list()
top_authors[['authors.']] <- GetTopAuthorsMediaWiki(0, conf$startdate, conf$enddate, conf$identities_db, bots, conf$npeople)
top_authors[['authors.last year']]<- GetTopAuthorsMediaWiki(365, conf$startdate, conf$enddate, conf$identities_db, bots, conf$npeople)
top_authors[['authors.last month']]<- GetTopAuthorsMediaWiki(31, conf$startdate, conf$enddate, conf$identities_db, bots, conf$npeople)
createJSON (top_authors, paste(destdir,"/mediawiki-top.json", sep=''))
###################
# PEOPLE
###################
if ('people' %in% reports){
all.top.authors <- top_authors[['authors.']]$id
all.top.authors <- append(all.top.authors, top_authors[['authors.last year']]$id)
all.top.authors <- append(all.top.authors, top_authors[['authors.last month']]$id)
all.top.authors <- unique(all.top.authors)
createJSON(all.top.authors, paste(destdir,"/mediawiki-people.json",sep=''))
for (upeople_id in all.top.authors){
evol = GetEvolPeopleMediaWiki(upeople_id, period, conf$startdate, conf$enddate)
evol <- completePeriodIds(evol, conf$granularity, conf)
evol[is.na(evol)] <- 0
createJSON(evol, paste(destdir,"/people-",upeople_id,"-mediawiki-evolutionary.json", sep=''))
static <- GetStaticPeopleMediaWiki(upeople_id, conf$startdate, conf$enddate)
createJSON(static, paste(destdir,"/people-",upeople_id,"-mediawiki-static.json", sep=''))
}
}
|
934914cbdddc0afc9b8332d9e5628cd8cb75391e
|
45186934a5fd29818ead3bb5a5a54c14332394fd
|
/patents project.R
|
21f5ddbf7951ca436ce9fa24435b9f579874608d
|
[] |
no_license
|
eceson/Collab_project
|
bb8f36ee810d6081a0664dac7280cdb9ca090aa5
|
1289348c85eed12c8a41101091278e61321467a0
|
refs/heads/main
| 2023-08-24T18:55:12.136570
| 2021-10-24T20:43:12
| 2021-10-24T20:43:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,135
|
r
|
patents project.R
|
#Load data
setwd("C:/Users/Seizal Pathania/Downloads/Rfiles")
patents <- read.csv("patents_10.csv", header = TRUE)
head(patents)
View(patents)
#EDA
sum(table(patents$patnum))
sum(table(patents$patnum_kpss))
patents$patnum_kpss = gsub("D","", as.character(patents$patnum))
View(patents)
sum(table(patents$ptype))
sum(table(patents$applnum))
sum(table(patents$ee_number))
sum(table(patents$ee_name))
sum(table(patents$ee_role))
sum(table(patents$ee_role_desc))
sum(table(patents$ee_state))
cor(patents$grantyear,patents$X)
anov=aov(patents$grantyear~patents$ee_ind_fname,data=patents)
summary(anov)
anov=aov(patents$grantyear~patents$ee_ind_lname,data=patents)
summary(anov)
patent=patents[-c(1,3,10,11)]
View(patents)
#count of patents granted by each country
library(dplyr)
patents_by_country = patents %>% group_by(ee_country) %>% summarize(count = n())
head(patents_by_country)
View(patents_by_country)
#Visualising the Patents by countries data
pie(patents_by_country$count,labels = patents_by_country$ee_country,main = "Patents By Country",col= rainbow(length(patents_by_country$count)))
legend("topright",patents_by_country$ee_country,cex=0.5,fill = rainbow(length(patents_by_country$count)))
d1<- density(patents$backward_cites)
plot(d1,main = "Desity of Backward Cities")
d2<- density(patents$forward_cites)
plot(d2,main = "Desity of Forward Cities")
#patents in the US count of type of patents granted by US cities
patents_us = subset(patents, ee_country == "US")
nrow(patents_us)
View(patents_us)
Us_p= patents_us%>% group_by(ee_city,ptype) %>% summarize(count = n())
View(Us_p)
Us_p_type= Us_p%>% group_by(ptype) %>% summarize(count = n())
head(Us_p_type)
#Visualising the data
hist(Us_p_type$count,main = "Types of Patents in US",xlab = "Patent Types Count",col="darkmagenta")
#patents in the India and count of type of patents granted by IN cities
patents_in =subset(patents, ee_country == "IN")
nrow(patents_in)
View(patents_in)
In_p= patents_in%>% group_by(ee_city,ptype) %>% summarize(count = n())
View(In_p)
In_p_type= In_p%>% group_by(ptype) %>% summarize(count = n())
head(In_p_type)
#Visualising the data
hist(In_p_type$count,main = "Types of Patents in In",xlab = "Patent Types Count",col="darkgreen")
#how long does it take to get granted in the US
patents_us$time_to_approval=patents_us$grantyear - patents_us$applyear
sort(table(patents_us$time_to_approval))
aggregate(time_to_approval ~ ee_state,data = patents_us, FUN = mean)
#Visualising the time taken to approve
hist(a$time_to_approval,main = "Time taken to get granted in Us",xlab = "Time",col = "red")
pie(a$time_to_approval,main = "Time taken to get granted in Us")
#how long does it take to get granted in IN
patents_in$time_to_approval=patents_in$grantyear - patents_in$applyear
table(patents_in$time_to_approval)
aggregate(time_to_approval ~ ee_city,data = patents_in, FUN = mean)
#Visualising the time taken to approve
hist(b$time_to_approval,main = "Time taken to get granted in In",xlab = "Time",col = "blue")
pie(b$time_to_approval,main = "Time taken to get granted in In")
#Counting how many patents are granted per type of patents in countries
table(patents$ptype)
patents_types = patents %>% group_by(ee_country,ptype) %>% summarize(count = n())
head(patents_types)
View(patents_types)
# Count of patents by type per city in US
patents_us_count <- patents_us %>% group_by(ee_city,ptype) %>% summarize(count = n())
head(patents_us_count)
# Top 10 cities with patents in US
patents_us_10 <- patents_us_count[with(patents_us_count, order(-count)),]
top10_patents <- patents_us_10[1:10,]
# Of the top 10 cities in the US with patents, 5 of them are in the Bay Area
# Patent types across countries
table(patents$ptype)
patents_types = patents %>% group_by(ee_country,ptype) %>% summarize(count = n())
head(patents_types)
patent_types_5 <- patents_types[with(patents_types, order(-count)),]
top5_types <- patent_types_5[1:5,]
# Top 5 countries are utility patents of US, Japan, Korea, Germany, and the design patents of US
|
9a4bbac204a1975cf6d723d2b9721410bdc7fb8e
|
36628243c050cc012243cce16d55e6d24c95b1cf
|
/tests/testthat/test-client_telegram.R
|
e7d6b60b14cd436869e4f0521b86300ef02f88fe
|
[
"MIT"
] |
permissive
|
TymekDev/sendeR
|
e5bf9ca406dd130b8003f54c00050de16fedae7a
|
32142f3ee24ad0c1b674102848e41c461a5107d0
|
refs/heads/master
| 2022-11-07T07:07:13.054088
| 2020-06-26T16:48:17
| 2020-06-26T16:48:17
| 213,371,734
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
test-client_telegram.R
|
test_that("creates only with correct argument type", {
expect_error(client_telegram(NA), "argument has to be a character vector of length 1.")
expect_error(client_telegram(NULL), "argument has to be a character vector of length 1.")
expect_error(client_telegram(12), "argument has to be a character vector of length 1.")
expect_error(client_telegram(c("a", "b")), "argument has to be a character vector of length 1.")
})
|
28c000718714cc52565738f8cf394491574ca6b1
|
60ed8fb3dbd7199f2efd95857d0544f255024c51
|
/zillow_sold.R
|
f53eb88f831fd287d2bd1ea13aa2373148d7a041
|
[] |
no_license
|
hack-r/yang
|
33e990e592a79160bf73375959802570bf071a81
|
c9e9bab0c0d9b8de766a1d304e1ba2a64c46ac2a
|
refs/heads/master
| 2021-01-10T11:04:52.090264
| 2016-04-20T18:41:26
| 2016-04-20T18:41:26
| 54,159,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,259
|
r
|
zillow_sold.R
|
# Options -----------------------------------------------------------------
setwd("C://users//jmiller//Desktop//yang")
# Functions and Libraries -------------------------------------------------
pacman::p_load(data.table, rvest, stringi, sqldf, XML)
# returns string w/o leading or trailing whitespace
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
# Extract -----------------------------------------------------------------
# From Extractor
zs1 <- read.csv("zillow sold 155 clean.csv")
zs1 <- zs1[!is.na(zs1$`Price.sqft`),]
zs1$sold_price <- zs1$`Sold.price`
zs1$price_per_sqft <- zs1$`Price.sqft`
zs1$street_address <- zs1$Address
zs1$sold_date <- zs1$`Sold.date`
zs1$`Sold date` <- NULL
zs1$Address <- NULL
zs1$`Price sqft` <- NULL
zs1$`Sold price` <- NULL
zs1$city <- "see address"
zs1$state <- "CA"
zs1$facts <- "None"
zs1 <- zs1[c("url","beds","baths","sqft","sold_price","sold_date","price_per_sqft","facts","street_address","city","state","zip")]
# From Crawler
zs2 <- fread("zillow_sold_single_page_clean.csv")
zs2$Widget <- NULL
# Merge
zs <- rbind(zs1,zs2)
# Save --------------------------------------------------------------------
write.csv(zs, "zillow_sold_combined_clean.csv", row.names = F)
saveRDS(zs, "zs.RDS")
|
d0a3bab2eaef886a53c2846ada833591adfc550f
|
ca0fb42b56f0a01b05e70b8cc8efc9fb3753bd31
|
/genotypes/qtl2/MAGICSimulations/MAGICSim_121718/rsquare.R
|
dad700390b222105a03de779c233eba5f1db5668
|
[] |
no_license
|
sarahodell/biogemma
|
873910b91d27dd539b3a8b1992de66673660c804
|
88d49a4f4974eacf01464547e5cf443191fb2f70
|
refs/heads/master
| 2022-09-22T00:48:43.637061
| 2022-08-16T23:52:21
| 2022-08-16T23:52:21
| 244,755,602
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,746
|
r
|
rsquare.R
|
#!/usr/bin/env Rscript
library('data.table')
library('qtl2')
library('ggplot2')
founders=c("A632_usa","B73_inra","CO255_inra","FV252_inra","OH43_inra","A654_inra","FV2_inra","C103_inra","EP1_inra","D105_inra","W117_inra","B96","DK63","F492","ND245","VA85")
pr=readRDS('MAGICSim_121718_chr10_genoprobs.rds')
bg=read_cross2('MAGICSim_121718_c10.json')
pmap=fread('Physical_map_c10.csv',data.table=F)
dimnames(pr[[1]])[[2]]=founders
fgeno=t(bg$founder_geno[[1]])
sdp=calc_sdp(fgeno)
pos=pmap[pmap$marker %in% names(sdp),]$pos
snpinfo=data.frame(chr=c("10"),pos=pos,sdp=sdp,snp=names(sdp),stringsAsFactors=F)
snpinfo=index_snps(bg$pmap,snpinfo)
snp_prbs=genoprob_to_snpprob(pr,snpinfo)
saveRDS(snp_prbs,'bg10_fulldata_SNPprobs_010819.rds')
rsq<-function(x,y) cor(x,y,method=c("pearson")) ^2
markers=dimnames(snp_prbs[[1]])[[3]]
included=bg$geno[[1]][,dimnames(bg$geno[[1]])[[2]] %in% markers]
r1=0
w1=0
r3=0
w3=0
r_squared=c()
for(i in 1:400){
p_ind=unlist(unname(snp_prbs[[1]][i,,]))
p_snps=sapply(seq(1,dim(p_ind)[2]),function(x) ifelse(max(p_ind[,x])==p_ind[1,x],1,3))
a_snps=unlist(unname(included[i,]))
for(x in 1:dim(p_ind)[2]){
if(p_snps[x]==1 & a_snps[x]==1){
r1=r1+1
}
else if(p_snps[x]==1 & a_snps[x]==3){
w3=w3+1
}
else if(p_snps[x]==3 & a_snps[x]==1){
w1=w1+1
}
else{
r3=r3+1
}
}
r_squared=c(r_squared,rsq(a_snps,p_snps))
}
size=c(r1,w1,w3,r3)
result=data.frame(actual=as.factor(c(1,1,3,3)),predicted=as.factor(c(1,3,1,3)),size=size)
fwrite(result,'imputation_accuracy_010819.txt',row.names=F,quote=F,sep='\t')
r2=mean(r_squared)
png('MAGICSim_Accuracy_plot_010819.png',width=460,height=480)
print(ggplot(result,aes(x=actual,y=predicted,size=size)) + geom_point() + scale_x_discrete("Actual", labels=c(sprintf('Reference, n=%.0f',r1+w3),sprintf('Alternate, n=%.0f',r3+w1))) + scale_y_discrete("Predicted",labels=c('Reference','Alternate')) + labs(title="Imputation Accuracy of 400 Simulated MAGIC lines",subtitle=sprintf("Mean R-Squared: %.3f , n=%.0f , %.3f %% Correct",r2,sum(size),(r1+r3)/sum(size))) + theme_bw() + guides(size=F) + theme(text=element_text(family="Georgia")))
dev.off()
#actual_mat=c()
#for(i in seq(1,dim(pr[[1]][3]))){
# p=array(pr[[1]][,,i],dim=c(400,16))
# marker=dimnames(pr[[1]])[[3]][i]
# pos=pmap[pmap$marker==marker,]$pos
# block=actual[(actual$start<pos) & (actual$end > pos),]$donor1
# a=sapply(seq(1,10),function(x) founders==block[x]
# r_squared=abind(r_squared,rsq(t(p),a))
# a=array(t(a),dim=c(10,16,1))
# actual_mat=abind(actual_mat,a)
#}
#fwrite(actual_mat,'Actual_MAGICSim_121718_geno.csv',row.names=F,quote=F,sep=',')
#saveRDS(r_squared,'MAGICSim_121718_rsquared.rds')
|
802792b1724180fa0ab4fbe410417c2ebe742f6b
|
a81338876c6bddd6d02e564134ff240558d28df2
|
/R/glmnetUtils.r
|
7dc8769106ea4e4dcfb1de32c6fc48d9cf5d138e
|
[] |
no_license
|
dschneiderch/glmnetUtils
|
1b4c53c56ea9c7d9df77ce0d52114724cae2b4d4
|
c0064941cd9b41f4b11e6c28c3766f995861db6c
|
refs/heads/master
| 2021-01-19T12:04:18.280788
| 2016-09-20T07:47:18
| 2016-09-20T07:47:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 910
|
r
|
glmnetUtils.r
|
#' @title Utilities for glmnet
#' @description
#' Some quality-of-life functions to streamline the process of fitting elastic net models with the `glmnet` package, specifically:
#'
#' \itemize{
#' \item `glmnet.formula` provides a formula/data frame interface to `glmnet`.
#' \item `cv.glmnet.formula` does a similar thing for `cv.glmnet`.
#' \item Methods for `predict` and `coef` for both the above.
#' \item A function `cvAlpha.glmnet` to choose both the alpha and lambda parameters via cross-validation, following the approach described in the help page for `cv.glmnet`. Optionally does the cross-validation in parallel.
#' \item Methods for `plot`, `predict` and `coef` for the above.
#' }
#'
#' @docType package
#' @name glmnetUtils
#' @aliases glmnetUtils-package
NULL
dropIntercept <- function(matr)
{
if(!is.matrix(matr))
matr <- as.matrix(matr)
matr[, -1, drop=FALSE]
}
|
3930d243e55292c540c2166d1215a0453f507efa
|
42c091c23673af1b70ce3f9feafed8659afaee73
|
/man/bootstrapcisstat.Rd
|
097f4b5a1d44fa206a54efe2306a18c75a0bfcb8
|
[] |
no_license
|
ClementCalenge/scsl
|
4b20f9b85e77ada0095f6cd81c0f25928f7da999
|
33a16fb1caeae265e871033181308a463fdaf526
|
refs/heads/master
| 2021-09-05T07:56:04.601665
| 2018-01-25T12:00:31
| 2018-01-25T12:00:31
| 118,907,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,437
|
rd
|
bootstrapcisstat.Rd
|
\name{bootstrapcisstat}
\alias{bootstrapcisstat}
\alias{print.bootstrapCisstat}
\alias{sdaij}
\alias{sdejk}
\alias{sdpik}
\title{
Calculate the standard error of abundances estimates with a bootstrap approach.
}
\description{
The function \code{bootstrapcisstat} calculates the standard error of
abundances estimates with a bootstrap approach. The functions
\code{sdaij}, \code{sdeij}, and \code{sdpik} allow to extract the
standard errors of the relative densities, relative effort and of the
parameters pik from the resulting object.
}
\usage{
bootstrapcisstat(Sp, SU, Sta, dfVj, nu, eta = NULL, proximities = NULL,
control = NULL, trace = TRUE, B = 100)
\method{print}{bootstrapCisstat}(x, \dots)
sdaij(x, type=c("link", "response"))
sdejk(x, type=c("link", "response"))
sdpik(x)
}
\arguments{
\item{Sp}{
a factor containing the species label for each detected animal.
}
\item{SU}{
a factor containing the spatial unit label for each detected animal.
}
\item{Sta}{
a factor containing the status label for each detected animal.
}
\item{dfVj}{
a data.frame with three columns and J rows (where J is the number of
spatial units) containing: (i) a factor containing the spatial unit
label (with the same levels as \code{SU}, (ii) a numeric vector
containing the sampling effort of the dead in these units, and (iii)
a numeric vector containing the area of the SAR.
}
\item{nu}{
value of the penalty parameter used in the regularization.
}
\item{eta}{
numeric vector of length I (where I is the number of species)
allowing to introduce differing penalties for the different
species. Thus, the difference of relative density between spatial
units j and m for the species i with:
\code{nu*eta[i]*proximities[j,l]} (this parameter was introduced for
exploratory purposes, but is not actually used in the paper)
}
\item{proximities}{
J x J matrix giving the proximity between the J spatial units.
}
\item{control}{
a list with named elements controlling the fit (see
\code{?penalizedmodel} for a description of the available options.
}
\item{trace}{
Whether information about the progression of the crossvalidation
should be printed.
}
\item{B}{
The number of bootstrap samples
}
\item{x}{
An object of class \code{"bootstrapCisstat"}
}
\item{type}{
Whether the resulting standard errors should be expressed on the
log-scale (\code{type="link"}) or on the response scale
(\code{type="response"}).
}
\item{\dots}{
Arguments passed to and from other methods.
}
}
\value{
A list of class \code{"bootstrapCisstat"} containing one fitted model
(class \code{"cisstats"}, see \code{penalizedmodel}) per bootstrap
sample.
}
\author{
Clement Calenge (Office national de la chasse et de la faune sauvage)\cr
Joel Chadoeuf (INRA Avignon, France)\cr
Christophe Giraud (Universite Orsay, France)\cr
Sylvie Huet (INRA, Jouy-en-Josas, France)\cr
Romain Julliard (Museum national d'histoire naturelle)\cr
Pascal Monestiez (INRA Avignon, France)\cr
Jeremy Piffady (IRSTEA, France)\cr
David Pinaud (CNRS Chize, France)\cr
Sandrine Ruette (Office national de la chasse et de la faune sauvage)
}
\seealso{
\code{\link{crossvalidation}}, \code{\link{penalizedmodel}}
}
\examples{
## See the appendix of the paper
## for examples
}
\keyword{models}
|
4ecb59d5e3f15d3d9447a7d4fed3045c5423f616
|
1b676b2d613bf67d8bec3079b3e9c0c4abb2213b
|
/man/profkern.Rd
|
8ddab0e562290cfab3c3bd2ddc642d3877dbdb02
|
[] |
no_license
|
cran/denpro
|
995a97a3eb39a8a75d75b1fc5b17ab8d497675a0
|
503a536c5b2963f0615a9eacf65aa5a84765d6c6
|
refs/heads/master
| 2016-09-06T13:58:43.857283
| 2015-04-24T00:00:00
| 2015-04-24T00:00:00
| 17,695,458
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,422
|
rd
|
profkern.Rd
|
\name{profkern}
\alias{profkern}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{
Calculates a level set tree of a kernel estimate
}
\description{
Given a data matrix, returns a level set tree of a kernel estimate,
when the dimension of the estimate is less or equal to 4.
}
\usage{
profkern(dendat, h, N, Q, cvol=TRUE, ccen=TRUE, cfre=FALSE,
kernel="epane", compoinfo=FALSE, trunc=3, threshold=0.0000001,
sorsa="crc",hw=NULL)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{dendat}{n*d data matrix; d<=4}
\item{h}{positive real number or vector of positive real numbers;
smoothing parameters from the largest to the smallest}
\item{N}{d-vector of dyadic integers >=4;
number of knots for every direction: kernel
estimate will be evaluated on these knots}
\item{Q}{positive integer; number of quantization levels}
\item{cvol}{TRUE or FALSE; TRUE if one wants that the volumes of
the separated parts of the level sets are returned,
note that one needs the volumes for drawing a volume plot}
\item{ccen}{T or F; TRUE if one wants that the barycenters of the
separated parts of the level sets are returned,
note that one needs the barycenters for drawing a barycenter plot}
\item{cfre}{T or F; TRUE if one wants that the frequencies for
separated parts of the level sets are returned (not implemented)}
\item{kernel}{either "epane" or "gauss";
the kernel of the kernel estimate}
\item{compoinfo}{TRUE or FALSE; whether definitions of separated
regions of level sets will be returned}
\item{trunc}{positive real number; truncation of the Gaussian kernel
to the interval [-trunc,trunc]}
\item{threshold}{positive real number; kernel will be truncated to
0 if the value is smaller than the threshold (not implemented)}
\item{sorsa}{if not "crc" uses slower R code}
\item{hw}{lower level parameter}
}
%\details{}
\value{
Returns a list of level set trees: for each value of h
the level set tree of the corresponding kernel estimate.
If h is not a vector but scalar, then only one
level set tree will be returned.
The level set tree is a list of vectors.
The elements of the vectors supply information for each node of the tree.
Below we denote with "nodenum" the number of nodes of the tree.
\item{parent}{"nodenum"-vector of integers in the range 0,..., nodenum-1;
links to the parent of each node. Root nodes are marked with 0.}
\item{level}{"nodenum"-vector of positive real numbers;
level of the level set from which the set corresponding to the node
is a part of.}
\item{volume}{"nodenum"-vector of positive real numbers;
volumes of sets corresponding to each node}
\item{center}{d*nodenum-matrix; barycenters of sets
corresponding to each node}
\item{invalue}{"nodenum"-vector of positive integers;
level of the level set in terms of original frequencies
(these values are not normalized so that the
estimate would integrate to one}
\item{component}{Will be returned if "compoinfo"=TRUE; for each
node of the level set a tree pointer to "AtomlistAtom"}
\item{AtomlistAtom}{Will be returned if "compoinfo"=TRUE; pointers
to "index"}
\item{AtomlistNext}{Will be returned if "compoinfo"=TRUE; pointers
which define the list of atoms for each separated component (for each node
of the level set tree)}
\item{index}{Will be returned if "compoinfo"=TRUE; matrix with d
columns: determines a knot of the multivariate grid where estimate
was evaluated}
\item{nodefrek}{"nodenum"-vector of positive integers;
number of observations in the set corresponding to the node.
This is useful in cluster analysis applications. (Not implemented.)}
}
%\references{http://www.rni.helsinki.fi/~jsk/denpro}
\author{ Jussi Klemela }
\note{Applies the DynaDecompose algorithm, described in
the article
"Algorithms for manipulation of level sets of nonparametric density
estimates",
by Jussi Klemela }
%~Make other sections like WARNING with \section{WARNING }{....} ~
\seealso{
\code{\link{plotvolu}},
\code{\link{plotbary}},
\code{\link{plottree}},
}
\examples{
set.seed(1)
dendat<-matrix(rnorm(20),10) # 10*2 data-matrix
pk<-profkern(dendat,h=1,N=c(8,8),Q=4)
plotvolu(pk)
dendat<-sim.data(n=200,type="mulmod")
pk<-profkern(dendat,h=1,N=c(64,64),Q=30)
plotvolu(pk)
}
\keyword{smooth}% at least one, from doc/KEYWORDS
|
51bed2d4bf10fd2ef56f1db21afe7cf8893a4fb0
|
365c1c41296c191ded892f0b085f3d8d0cad5078
|
/man/set_meta_directory_api.Rd
|
ecd738b1455720a99d0a3f192afaa5d525dd021b
|
[
"MIT"
] |
permissive
|
Jrakru/rCANSIM
|
917ca02e720f4a6e876e6bfcff90a8a352621ff2
|
553f58753db54e870f53980eb4d5723dff9042c6
|
refs/heads/master
| 2020-04-17T01:19:21.715506
| 2019-01-16T19:11:14
| 2019-01-16T19:11:14
| 166,087,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 568
|
rd
|
set_meta_directory_api.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.r
\name{set_meta_directory_api}
\alias{set_meta_directory_api}
\title{Initializes the meta querying system using Statistics Canada API}
\usage{
set_meta_directory_api(PID_List)
}
\arguments{
\item{PID_List}{List of strings representing that tables PIDs}
}
\value{
NONE (side effect function)
}
\description{
read all the nessary metadata present in the provided path for the querying system
}
\keyword{cansim}
\keyword{directory}
\keyword{internal}
\keyword{metadata}
\keyword{set}
|
9bdd483c1c0e94550db0e82b7b89766f15265b6a
|
b6d80916052926fff06f988a6840335dec6cc997
|
/r_package/AvantGardeDIA/man/MPRA_y_SpectLib.Report.Rd
|
ff5b84d5103d22fcbd45b446e986fde9680d0593
|
[
"BSD-3-Clause"
] |
permissive
|
SebVaca/Avant_garde
|
043df8165272b0becf823bd4d13338fc25a55652
|
2c2fc25789b2bef8524a867d97158d043244297c
|
refs/heads/master
| 2021-06-07T18:51:36.910796
| 2021-05-07T18:07:59
| 2021-05-07T18:07:59
| 167,582,571
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 402
|
rd
|
MPRA_y_SpectLib.Report.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in
% R/20190118_Avant-gardeDIA_v28_CleaningNonUsefulFunctions.R
\name{MPRA_y_SpectLib.Report}
\alias{MPRA_y_SpectLib.Report}
\title{MPRA_y_SpectLib.Report}
\usage{
MPRA_y_SpectLib.Report(Transition.Area, y)
}
\description{
This function is MPRA_y_SpectLib.Report
}
\examples{
MPRA_y_SpectLib.Report()
}
\keyword{AvantGardeDIA}
|
a95ea101bc07a670bdfd527c1623b85428b539c7
|
c860a6f93c778a8840e6ec16e7077b64316be2a2
|
/script/exploratory/comparaison_REPHY_SRN.r
|
37041a4612dc3b4ff1d9b220b41745284d4d8f3f
|
[] |
no_license
|
CoraliePicoche/REPHY-littoral
|
35e73fbcbef73808fb19d54b5e1f089eb6258678
|
c46c12a590f67c5316f6cc03b07a1889985857df
|
refs/heads/master
| 2023-02-01T04:19:43.551503
| 2020-12-20T23:02:44
| 2020-12-20T23:02:44
| 98,662,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,653
|
r
|
comparaison_REPHY_SRN.r
|
graphics.off()
rm(list=ls())
filename=c("data/raw/Q2_170418_site_Tania_UTF8_only_good.csv","data/raw/Q2_170511_SRN_Chnord_UTF8_only_good.csv")
tab_REPHY=read.table(filename[1],sep=";",header=TRUE)
tab_SRN=read.table(filename[2],sep=";",header=TRUE)
id=intersect(unique(tab_REPHY$Lieu_id),unique(tab_SRN$Lieu_id))
tab_REPHY=subset(tab_REPHY,((Lieu_id %in% id)&(Res_code=="FLORTOT")),select=c("Lieu_id","Lieu_libel","Date","Taxon","Val"))
tab_SRN=subset(tab_SRN,((Lieu_id %in% id)&(Res_code=="FLORTOT")),select=c("Lieu_id","Lieu_libel","Date","Taxon","Val"))
tab_REPHY$Date=as.Date(tab_REPHY$Date,'%d/%m/%Y')
tab_SRN$Date=as.Date(tab_SRN$Date,'%d/%m/%Y')
#Comparaison visuelle
pdf("graphe/comparaison_REPHY_SRN.pdf",width=15,height=15)
par(mfrow=c(2,2))
for (i in 1:length(id)){
t1=tapply(tab_REPHY$Val[tab_REPHY$Lieu_id==id[i]],tab_REPHY$Date[tab_REPHY$Lieu_id==id[i]],sum,na.rm=TRUE)
t2=tapply(tab_SRN$Val[tab_SRN$Lieu_id==id[i]],tab_SRN$Date[tab_SRN$Lieu_id==id[i]],sum,na.rm=TRUE)
d1=min(as.Date(c(rownames(t1),rownames(t2))))
d2=max(as.Date(c(rownames(t1),rownames(t2))))
plot(as.Date(rownames(t1)),t1,pch=16,cex=2,col="red",main=tab_REPHY$Lieu_libel[tab_REPHY$Lieu_id==id[i]][1],xlab="",ylab="Abundance totale",xlim=c(d1,d2))
points(as.Date(rownames(t2)),t2,pch=16,cex=1,col="black")
legend("topleft",c("REPHY","SRN"),col=c("red","black"),pch=16)
}
dev.off()
#Comparaison de date uniquement
results_common=vector("list",length(id))
results_left_out=vector("list",length(id))
for (i in 1:length(id)){
d1=unique(as.Date(tab_REPHY$Date[tab_REPHY$Lieu_id==id[i]]))
d2=unique(as.Date(tab_SRN$Date[tab_SRN$Lieu_id==id[i]]))
d3=as.Date(intersect(d1,d2),origin="1970-01-01")
print(paste("Pour",tab_REPHY$Lieu_libel[tab_REPHY$Lieu_id==id[i]][1],'il y a',length(d1),'dans REPHY',length(d2),'dans SRN',length(d3),'dates communes',sep=" "))
t1=subset(tab_REPHY,((Lieu_id==id[i])&(Date %in% d3)),select=c("Date","Taxon","Val"))
t2=subset(tab_SRN,((Lieu_id==id[i])&(Date %in% d3)),select=c("Date","Taxon","Val"))
or=order(as.Date(t1$Date),t1$Taxon)
t1=t1[or,]
or=order(as.Date(t2$Date),t2$Taxon)
t2=t2[or,]
diff1=c()
diff2=c()
if(dim(t1)[1]!=dim(t2)[1]){
for(d in 1:length(d3)){
taxa1=t1$Taxon[t1$Date==d3[d]]
taxa2=t2$Taxon[t2$Date==d3[d]]
diff1=c(diff1,which(t1$Date==d3[d]&t1$Taxon %in% setdiff(taxa1,taxa2)))
diff2=c(diff2,which(t2$Date==d3[d]&t2$Taxon %in% setdiff(taxa2,taxa1)))
}
results_left_out[[i]]=list("REPHY_spec"=t1[diff1,],"SRN_spec"=t2[diff2,])
t1=t1[-diff1,]
t2=t2[-diff2,]
}else{
results_left_out[[i]]=NULL
}
t3=t1
t3$Val=t1$Val-t2$Val
t3=t3[t3$Val!=0,]
results_common[[i]]=t3
}
|
d96c92f5d768c50935f06a339f33bc28bf8ec05b
|
c0465b63b853d062fda02bbcce5415e73a58219e
|
/bin/RNAseq/tximport.R
|
5281916a691682207da1ac8666d8b8d80798eacd
|
[
"MIT"
] |
permissive
|
hustlc/PipeOne
|
14c0163a90a7cffc810d8ac7a3d2d8a617976cb8
|
4a91a16303818b60e88c3a7defee2155ec329800
|
refs/heads/master
| 2023-03-31T02:37:28.922321
| 2021-03-26T02:11:41
| 2021-03-26T02:11:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
tximport.R
|
#!/usr/bin/env Rscript
library("tximport")
library(readr)
wd = 'samples'
tx2gene = read_tsv("protein_coding_and_all_lncRNA.txID_geneID.tsv")
sample_id <- dir(file.path(wd))
sample_id
kal_files = file.path(wd, sample_id, "abundance.h5")
names(kal_files) = sample_id
txi <- tximport(kal_files , type = "kallisto", tx2gene = tx2gene)
write.table(txi$counts, 'kallisto_gene_est_counts.tsv',sep = '\t',quote = F)
write.table(txi$abundance, 'kallisto_gene_tpm.tsv', sep="\t",quote = F )
txi_tx <- tximport(kal_files, type = "kallisto", txOut = TRUE)
write.table(txi_tx$counts, 'kallisto_tx_est_counts.tsv',sep = '\t',quote = F )
write.table(txi_tx$abundance, 'kallisto_tx_tpm.tsv', sep="\t",quote = F )
|
749c1b7de82b396b1f23329b7ab3941c489dba75
|
0ddd4e8817b145d428a279f357ab875eedcb8fae
|
/readData_euronext.R
|
9d647e3e074163108ccc9fb75106e08cdbe8356d
|
[] |
no_license
|
clembl76/prediction-bourse
|
78a7df17a033201a1f47190f5fc4eb92ae777392
|
f15b7fccef38a2ac333d716eb0a680bf9078c702
|
refs/heads/master
| 2020-03-31T12:23:26.880999
| 2015-05-25T09:54:07
| 2015-05-25T09:54:07
| 34,904,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 397
|
r
|
readData_euronext.R
|
saveDir<-getwd()
dataDir<-paste(getwd(),"/Data/",sep="")
dir<-paste(dataDir,"/euronext/",sep="")
setwd(dir)
#loading and preprocessing data
fileName<-"BDM-Euronext-CAL-20100615.csv"
tradingDays <- read.csv(fileName,sep=";",header=TRUE)
tradingDays$Calendar.date<-as.Date(tradingDays$Calendar.date)
tradingDays$Trading.day.indicator<-as.logical(tradingDays$Trading.day.indicator)
setwd(saveDir)
|
18a971f7137f4560de8d677cda770a769ef6f3fa
|
1c066932b18dd710830f9208294e1ec59968f761
|
/cachematrix.R
|
2fc9af34cc221b5c2025b2af517b7a3c6ffb711b
|
[] |
no_license
|
nunezreinaldo/ProgrammingAssignment2
|
3201652befe9e7d7ba9164a9d5c55f818d513b46
|
5b25a3d6fca2b383a3116670fa9a1979c558b2b6
|
refs/heads/master
| 2021-01-14T13:16:57.656029
| 2015-04-25T18:56:13
| 2015-04-25T18:56:13
| 34,578,074
| 0
| 0
| null | 2015-04-25T17:21:00
| 2015-04-25T17:21:00
| null |
UTF-8
|
R
| false
| false
| 2,100
|
r
|
cachematrix.R
|
# R. Nunez April 25, 2015
# This code is submited for free as it without guarante and in accordance with
# the Honor Code in Courser.com - R Programming course.
# Since matrix inversion is usually a costly computation and there may be some
# benefit to caching the inverse of a matrix rather than compute it repeatedly,
# then the function listed below will work to cache the inverse of a matrix:
# a) makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
# b) cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# If the inverse has already been calculated (and the matrix has not # changed), then the cachesolve should retrieve the inverse from the cache.
makeCacheMatrix <- function(x = matrix()) {
# We need to initialize two variables for a dimensions matrix (m, y) instead of a single variable for a vector
m <- NULL
y <- NULL
setmatrix <- function(y) {
x <<- y
m <<- NULL
}
# Returns matrix
getmatrix <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
# Create a list of the functions used for this working environment
list(setmatrix = setmatrix, getmatrix = getmatrix,getinverse = getinverse,setinverse = setinverse)
}
cacheSolve <- function(x, ...) {
# This function computes, caches, solves, set the inverse and returns matrix inverse of the matrix created in makeCacheMatrix
## If the function runs for the first time for any given amtrix
## ithe inverse is created in the current working environment and added to cache
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# create data matrix because it was null
data <- x$getmatrix()
x$setmatrix(data)
# et the inverse and returns matrix inverse
m <- solve(data, ...)
# set the inverse of matrix (dat) into cache
x$setinverse(m)
m
}
|
c5c389d2e8d4b1fda880052377283457f3798ad3
|
91181357498a8d243659119deb95563cb29ca3b4
|
/program/02_Base_Dirichilet.R
|
1a41dbda0ef2098cc00d47ea9ea6be58a7a3d4ff
|
[] |
no_license
|
lamfo-unb/modelos_esporte
|
936ee751c68119eaf2b252e5effc14dfc3513378
|
e5004948efc659555134576d97d28a7728dd2876
|
refs/heads/master
| 2021-01-21T12:12:00.862926
| 2018-02-02T12:49:29
| 2018-02-02T12:49:29
| 102,026,582
| 0
| 2
| null | 2017-09-14T15:18:56
| 2017-08-31T17:27:44
|
HTML
|
UTF-8
|
R
| false
| false
| 19,426
|
r
|
02_Base_Dirichilet.R
|
library(data.table)
library(dplyr)
library(tidyverse)
library(readxl)
library(gtools)
library(stringr)
rm(list = ls())
gc()
## dados para transformação
sigma <- 1
gama <- 1
### IMPORTANDO INFORMAÇÕES DOS JOGADORES ----
fileinput <- "data/static"
jogadores <- readRDS("data/static/dados_jogadores_ordenado.rds")
vars <- c(setdiff(names(jogadores),c(grep("time|Time",names(jogadores),value = T),
"nome_jogador","Posições",
"Data de nascimento")),"time_match3","temporada")
jogadores <- jogadores %>%
select(one_of(vars))
jogadores[,Altura:=gsub("\\scm","",Altura,perl=T)]
jogadores[,Peso:=gsub("\\scm","",Altura,perl=T)]
jogadores[,`Perna boa`:=ifelse(`Perna boa`=="Dir.",1,0)*10]
jogadores[,`Perna ruim`:=as.numeric(`Perna ruim`)*10]
jogadores <- gather(jogadores %>%
select(one_of(vars)),
key = variavel, value = valor,-time_match3,-id_jogador,-temporada)
jogadores <- jogadores %>%
mutate(season=as.numeric(gsub("fifa(..).*","\\1",temporada,perl=T))+2000)
## média por time
jogadores_time <- jogadores %>%
dplyr::group_by(season,time_match3,variavel) %>%
dplyr::summarise(valor= mean(as.numeric(valor)))
saveRDS(jogadores_time,"data/static/dados_media_time.rds")
### Descritiva ----
times_analise <- c("Arsenal","Chelsea","Liverpool","Man Utd","Man City")
variaveis_analise <- unique(jogadores_time$variavel)
jogadores_time_analise <- jogadores_time %>%
filter(variavel %in% variaveis_analise &
time_match3 %in% times_analise)
i <- variaveis_analise[1]
library(stringi)
stri_trans_general(i, "Latin-ASCII")
for(i in variaveis_analise){
var_name <- gsub(" ","_",tolower(stri_trans_general(i, "Latin-ASCII") ))
nome_arquivo <- paste0("report/images/",var_name,"_descri.pdf")
pdf(file = nome_arquivo)
g <- ggplot(data=jogadores_time_analise %>% filter(variavel == i & season!=2017),
aes(x=season, y=valor, group = time_match3 ,
colour=time_match3)) +
geom_line(size = 2 )+
geom_point( size=2, shape=22, fill="white") + #http://sape.inf.usi.ch/quick-reference/ggplot2/shape
scale_colour_manual(name = "Team",
values=c("darkred", "blue","red","lightblue","gold")) +
scale_x_discrete(name ="Season",
limits=c(2008:2016)) +
scale_y_continuous(name = i) +
scale_shape_discrete(name ="Team")
print(g)#https://stackoverflow.com/questions/21321975/loop-does-not-finish-saving-pdf
dev.off()
}
## resultado jogos ----
fileinput <- "data/games"
arquivos <- list.files(path = fileinput,pattern = paste0('resultado_'),
full.names = T)
base <- data.table()
for(i in 1:length(arquivos)){
base_temp <- readRDS(arquivos[i])
base <- rbind(base,base_temp)
}
#
# saas <- grep("2009",arquivos,value=T)
# table(gsub("data/games/S_resultado_.*_J(.*).rds","\\1",saas))
## pegando posição ----
fileinput <- "data/class"
arquivos <- list.files(path = fileinput,pattern = paste0('S'),
full.names = T)
base_class <- data.table()
for(i in 1:length(arquivos)){
base_temp <- readRDS(arquivos[i])
base_class <- rbind(base_class,base_temp)
}
rm(base_temp)
### corrigindo nomes dos times
base_class <- base_class %>%
select(match,season,Date,Time,pos_home,pos_visit,`Home team`,`Visiting team`) %>%
mutate(dia_rodada = gsub("...\\s\\t*(.*)","\\1",Date))
base_class$`Home team` <- ifelse(base_class$`Home team`=="Blackburn Rvrs","Blackburn Rovers",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Blackburn Rovers","Blackburn",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Birm. City" | base_class$`Home team`=="Birmingham City","Birmingham",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Manchester City","Man City",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Manchester United" | base_class$`Home team`=="Manchester Utd." | base_class$`Home team`=="Manchester Utd","Man Utd",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Newcastle United" | base_class$`Home team`=="Newcastle Utd","Newcastle",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Tottenham Hotspur" | base_class$`Home team`=="Spurs","Tottenham",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="West Bromwich","West Brom",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="West Ham United","West Ham",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Wolverhampton Wanderers" | base_class$`Home team`=="Wolverhampton","Wolverhampton Wanderers",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Wigan Athletic","Wigan",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Swansea City","Swansea",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Norwich City","Norwich",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Leicester City","Leicester",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Cardiff City","Cardiff",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Bolton Wanderers","Bolton",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="QPR","Queens Park Rangers",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Arsenal FC","Arsenal",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Chelsea FC","Chelsea",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Reading FC","Reading",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Portsmouth FC","Portsmouth",base_class$`Home team`)
base_class$`Home team` <- ifelse(base_class$`Home team`=="Charlton Athletic","Charlton",base_class$`Home team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Blackburn Rvrs","Blackburn Rovers",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Blackburn Rovers","Blackburn",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Birm. City" | base_class$`Visiting team`=="Birmingham City","Birmingham",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Manchester City","Man City",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Manchester United" | base_class$`Visiting team`=="Manchester Utd." | base_class$`Visiting team`=="Manchester Utd","Man Utd",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Newcastle United" | base_class$`Visiting team`=="Newcastle Utd","Newcastle",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Tottenham Hotspur" | base_class$`Visiting team`=="Spurs","Tottenham",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="West Bromwich","West Brom",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="West Ham United","West Ham",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Wolverhampton Wanderers" | base_class$`Visiting team`=="Wolverhampton","Wolverhampton Wanderers",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Wigan Athletic","Wigan",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Swansea City","Swansea",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Norwich City","Norwich",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Leicester City","Leicester",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Cardiff City","Cardiff",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Bolton Wanderers","Bolton",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="QPR","Queens Park Rangers",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Arsenal FC","Arsenal",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Chelsea FC","Chelsea",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Reading FC","Reading",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Portsmouth FC","Portsmouth",base_class$`Visiting team`)
base_class$`Visiting team` <- ifelse(base_class$`Visiting team`=="Charlton Athletic","Charlton",base_class$`Visiting team`)
base <- base %>%
select(tipo,nome_time,id_time,id_jogo,rodada,dia_rodada,hora_jogo,score,season)
base1 <- spread(base %>%
select(tipo,season,score,id_jogo),tipo,score)
base1 <- base1 %>%
mutate(score_casa = as.numeric(Casa),
score_fora = as.numeric(Fora)) %>%
select(season,id_jogo,score_casa,score_fora)
base2 <- spread(base %>%
select(tipo,season,id_time,id_jogo,dia_rodada),tipo,id_time)
base_resultados <- left_join(base2,base1,
by = c("season","id_jogo"))
rm(base1,base2)
## juntando bases
times_fifa <- unique(jogadores_time %>% ungroup() %>% select(time_match3))
tabela_times <- unique(base %>% select(id_time,nome_time))
tabela_times$id_match <- 1:nrow(tabela_times)
# fazendo batimento
v1 <- NULL
for(i in 1:nrow(times_fifa)){
v1_temp <- grep(times_fifa$time_match3[i],tabela_times$nome_time)
if(length(v1_temp)==1){
v1 <- c(v1,v1_temp)
}else{
v1 <- c(v1,NA)
}
}
times_fifa$id_match <- v1
## ajustando na mão
times_fifa$id_match <- ifelse(times_fifa$time_match3=="Man Utd",
grep("Manchester United",tabela_times$nome_time),times_fifa$id_match)
times_fifa$id_match <- ifelse(times_fifa$time_match3=="Man City",
grep("Manchester City",tabela_times$nome_time),times_fifa$id_match)
tabela_times <- left_join(tabela_times,times_fifa ,
by = "id_match")
##colocando o id_time no base de jogadores ----
jogadores_time <- left_join(jogadores_time,
tabela_times %>%
select("time_match3","id_time"),
by = "time_match3")
jogadores <- left_join(jogadores,
tabela_times %>%
select("time_match3","id_time"),
by = "time_match3")
### colocando classificação
base_class <- base_class %>%
left_join(tabela_times %>%
select(id_time,time_match3),by = c("Home team"="time_match3"))
setnames(base_class,"id_time","Casa")
base_class <- base_class %>%
left_join(tabela_times %>%
select(id_time,time_match3),by = c("Visiting team"="time_match3"))
setnames(base_class,"id_time","Fora")
base_resultados <- base_resultados %>%
left_join(base_class %>%
select(season,dia_rodada,pos_home,Casa),by = c("season","dia_rodada","Casa"))
base_resultados <-base_resultados %>%
left_join(base_class %>%
select(season,dia_rodada,pos_visit,Fora),by = c("season","dia_rodada","Fora"))
base_resultados <- base_resultados %>%
arrange(season,Casa,id_jogo) %>%
mutate(pos_home_ant = lag(pos_home))
base_resultados <- base_resultados %>%
arrange(season,Fora,id_jogo) %>%
mutate(pos_visit_ant = lag(pos_visit))
base_resultados <- base_resultados %>%
mutate(pos_home_ant = ifelse(is.na(pos_home_ant),0,pos_home_ant) ,
pos_visit_ant = ifelse(is.na(pos_visit_ant),0,pos_visit_ant))
base_resultados <- base_resultados %>%
mutate(pos = as.numeric(pos_home_ant)-as.numeric(pos_visit_ant)) %>%
select(season,id_jogo,Casa,Fora,score_casa,score_fora,pos)
## corrigir buracos na base (Expandgrid) ----
jogadores_completo <- merge(unique(jogadores_time %>% ungroup() %>% select(season)),
unique(tabela_times %>% filter(!is.na(time_match3)) %>% ungroup() %>% select(time_match3)),by=NULL) %>%
merge(unique(jogadores_time %>% ungroup() %>% select(variavel)),by=NULL) %>%
left_join(tabela_times %>% ungroup() %>% select(time_match3,id_time),by = c("time_match3")) %>%
left_join(jogadores_time %>% ungroup() %>%
select(season,id_time,variavel,valor,variavel),
by = c("season","id_time","variavel"))
## completando a frente
jogadores_completo <- data.table(jogadores_completo)
jogadores_completo <- jogadores_completo[order(time_match3,variavel,season)]
jogadores_completo[, lag.value:=ifelse(is.na(valor),shift(valor,type="lag"),valor),
, by = .(time_match3,variavel) ]
for(i in 2009:2017){
jogadores_completo[, lag.value:=ifelse(is.na(lag.value),shift(lag.value,type="lag"),lag.value),
, by = .(time_match3,variavel) ]
}
## completando a trás
jogadores_completo <- jogadores_completo[order(rev(time_match3),rev(variavel),rev(season))]
jogadores_completo[, lag.value2:=ifelse(is.na(lag.value),shift(lag.value,type="lag"),lag.value),
, by = .(time_match3,variavel) ]
for(i in 2009:2017){
jogadores_completo[, lag.value2:=ifelse(is.na(lag.value2),shift(lag.value2,type="lag"),lag.value2),
, by = .(time_match3,variavel) ]
}
# BASE PROJETADA + OU -
sum(is.na(jogadores_completo$valor))
sum(is.na(jogadores_completo$lag.value))
sum(is.na(jogadores_completo$lag.value2))
## adicionando informações do time casa na base de resultados ----
base_result_jogadores <- base_resultados %>% left_join(jogadores_completo,
by = c("season"="season","Casa"="id_time"))
base_result_jogadores<- base_result_jogadores %>%
mutate(valor_casa = lag.value2,
time_casa = time_match3) %>%
select(-lag.value,-valor,-lag.value2,-time_match3)
base_resultados <- data.table(base_resultados)
## adicionando informações visitante
base_result_jogadores <- base_result_jogadores %>% left_join(jogadores_completo,
by = c("season"="season","Fora"="id_time","variavel"="variavel"))
base_result_jogadores<- base_result_jogadores %>%
mutate(valor_fora = lag.value2,
time_fora = time_match3) %>%
select(-lag.value,-valor,-lag.value2,-time_match3)
## diferença entre skills
base_result_jogadores<- base_result_jogadores %>%
mutate(valor = valor_casa- valor_fora)
base_result_jogadores_modelo <- spread(base_result_jogadores %>%
select(season,Casa,Fora,score_casa,score_fora,pos,variavel,valor),key = variavel,value = valor)
## Modelo ----
saveRDS(base_result_jogadores_modelo,"data/result/base_modelo_dirichilet_score.rds")
base_result_jogadores_forecast <- base_result_jogadores %>%
select(season,Casa,time_casa,variavel,valor_casa) %>%
unique()
# ## Forecast ----
# saveRDS(base_result_jogadores_forecast,"data/result/base_modelo_bayes01-forecast.rds")
base_result_jogadores_modelo <- base_result_jogadores_modelo %>%
mutate(resultado = ifelse(score_casa>score_fora,"VH",
ifelse(score_casa==score_fora,"TIE","VV")))
aa_analise <- gather(base_result_jogadores_modelo,variable,valor,-season,-Casa,
-Fora,-score_casa,-score_fora,-resultado)
variaveis_analise <- unique(aa_analise$variable)
base_select_variaveis <- aa_analise %>% filter(season!=2017) %>%
group_by(variable,resultado) %>% summarise(valor = mean(valor))
base_select_variaveis <- spread(base_select_variaveis,resultado,valor) %>%
select(variable,VH,TIE,VV)
base_select_variaveis2 <- aa_analise %>% filter(season!=2017) %>%
group_by(variable,resultado,season) %>% summarise(valor = mean(valor)) %>%
spread(resultado,valor) %>%
group_by(variable) %>%
summarise(NVHP = mean(VH>TIE & VH>VV ),
NVVP = mean(VV<TIE & VV<VH),
NTIEP = mean(VH>TIE & TIE>VV),
NP = mean(NVHP,NVVP,NTIEP),
NVHN = mean(VH<TIE & VH<VV ),
NVVN = mean(VV>TIE & VV>VH),
NTIEN = mean(VH<TIE & TIE<VV),
NN = mean(NVHN,NVVN,NTIEN))
base_select_variaveis <- base_select_variaveis %>%
left_join(base_select_variaveis2,by = "variable")
## selecionando variáveis com todos os resultados que fazem sentido
level_select <- 0.8
vars_selecionadas <- (base_select_variaveis %>%
filter(NP>=level_select | NN>=level_select))$variable
base_select_variaveis[,-(1:4)] <- base_select_variaveis[,-(1:4)]*100
base_select_variaveis$variable <- paste0(rep(c("\rowcolor{gray!30!}","\rowcolor{gray!10!}"),15)," ",
base_select_variaveis$variable)
library(xtable)
print(xtable(base_select_variaveis,
digits = c(0,0,3,3,3,1,1,1,1,1,1,1,1)),include.rownames = F)
var_name <- gsub(" ","",tolower(stri_trans_general(variaveis_analise, "Latin-ASCII") ))
var_name <- gsub("\\.","_",var_name )
nome_arquivo <- paste0(var_name,"_result.pdf")
vars_deletadas <- setdiff(variaveis_analise,vars_selecionadas)
for(i in variaveis_analise){
var_name <- gsub(" ","",tolower(stri_trans_general(i, "Latin-ASCII") ))
var_name <- gsub("\\.","_",var_name )
nome_arquivo <- paste0("report/images/",var_name,"_result.pdf")
pdf(file = nome_arquivo)
g <-ggplot(data =aa_analise %>% filter(variable == i & season!=2017),
aes(x=as.factor(season), y=valor)) +
# geom_boxplot(aes(fill=resultado,x=as.factor(season), y=valor)) +
# scale_colour_manual(values = c("green", "blue","red")) +
stat_summary(fun.y=mean, geom="line",
aes( group = resultado ,colour = resultado),size=2) +
scale_x_discrete(name ="Season") +
scale_fill_manual(name = "Result",
values=c("green", "blue","red")) +
scale_colour_discrete("Result") +
scale_y_continuous(name = i)
if(i %in% vars_deletadas){
baselimite <- aa_analise %>% filter(variable == i & season!=2017) %>%
group_by(resultado,season) %>% summarise(valor = mean(valor))
minv <- min(baselimite$valor)
maxv <- max(baselimite$valor)
# diagonal 1
ablines <- data.frame(rbind(c(1,maxv),
c(9,minv)))
names(ablines) <- c("ano","valor")
coefs <- coef(lm(valor ~ ano, data = ablines))
g <- g + geom_abline(intercept = coefs[1], slope = coefs[2],size = 5,alpha = .25)
# diagonal 2
ablines <- data.frame(rbind(c(1,minv),
c(9,maxv)))
names(ablines) <- c("ano","valor")
coefs <- coef(lm(valor ~ ano, data = ablines))
g <- g + geom_abline(intercept = coefs[1], slope = coefs[2],size = 5,alpha = .25)
}
print(g)#https://stackoverflow.com/questions/21321975/loop-does-not-finish-saving-pdf
dev.off()
}
saveRDS(data.frame(vars_selecionadas = unique(vars_selecionadas)),"data/result/variaveis_modelo_select_dissimilaridade.rds")
|
accb2c2c68e67b642b62d89ff771f7bf696f204c
|
0417fda8262e45239762879d827281b4a8be9cb8
|
/TXT data for upload/EDA-plot2-v1-final.txt
|
6ce9a150cbc332e40bec12521de2ec0c67163be2
|
[] |
no_license
|
andcos/Exdata_plotting2
|
948416ca52488a4e0843cfb32f1f64a6948adfd3
|
afe1648f5b75ecfa2e21bde4cd28bbfee29fa138
|
refs/heads/master
| 2021-01-06T20:43:16.629388
| 2015-02-25T22:19:54
| 2015-02-25T22:19:54
| 30,858,006
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,607
|
txt
|
EDA-plot2-v1-final.txt
|
##=====================================
## Coursera EDA
## Date 16.02.2015
## Project 2 programming assigment
## PART 2
##=====================================
##load libraryes
library(dplyr)
library(plyr)
library(data.table)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
require(stats)
##===
##q2
##===
##Have total emissions from PM2.5 decreased in the Baltimore
##City, Maryland (fips == "24510") from 1999 to 2008? Use the
##base plotting system to make a plot answering this question.
explore_q2_plot<-function(){
f_maryland<-filter(NEI,fips == "24510")
f_mary_1999<-filter(NEI,fips == "24510" & year == "1999")
f_mary_2002<-filter(NEI,fips == "24510" & year == "2002")
f_mary_2005<-filter(NEI,fips == "24510" & year == "2005")
f_mary_2008<-filter(NEI,fips == "24510" & year == "2008")
png("EDA_plot2.png", width=480,height=480)
## create plot
## ==========================
## 1 Mean values Emissions
##===========================
## requires(stats)
trending_mean<-c(mean(f_mary_1999$Emissions),
mean(f_mary_2002$Emissions),
mean(f_mary_2005$Emissions),
mean(f_mary_2008$Emissions))
plot(trending_mean,
main=" Mean values of the NSI Emissions in Baltimore City",
ylab=" mean Emmissions",
xlab="years",
axes=FALSE)
axis(1,at=c(1,2,3,4),lab=c("1999","2002","2005","2008"))
axis(2, at=c(0,2,4,6))
abline(lsfit(1:4,trending_mean))
box()
dev.off()
}
|
173014d2c37562e58af8705442960759ffe2ab44
|
2aee395e5ffe1e6fd33441c3b65a5adb42de541d
|
/plot1.R
|
9653935f89f10e7ebc7b7408651117436661a685
|
[] |
no_license
|
narni/ExData_Plotting1
|
d52cb9ee8b07569418ffde8dcecadac2251a23fa
|
af32e1c15869d65ebcae82d8910637c806f6164f
|
refs/heads/master
| 2021-01-24T23:24:11.630761
| 2014-05-10T04:34:56
| 2014-05-10T04:34:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 993
|
r
|
plot1.R
|
#Data downloading and reading
library(RCurl);
library(data.table)
temp <- tempfile()
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip', temp,method='curl')
data <- read.table(unz(temp,'household_power_consumption.txt'),sep=';',header=TRUE)
#Subsetting and data cleansing
alfa <-as.Date(c('01/02/2007','02/02/2007'),format="%d/%m/%Y")
index<-as.Date(as.character(data$Date),format="%d/%m/%Y")==alfa[1]
index2<-as.Date(as.character(data$Date),format="%d/%m/%Y")==alfa[2]
indices = index+index2
datos<-data[as.logical(indices),]
datos$DateTime <- paste(datos$Date,datos$Time,sep=' ')
datos$DateTime <- strptime(datos$DateTime, "%d/%m/%Y %H:%M:%S")
datos$Global_active_power<-as.numeric(as.character(datos$Global_active_power))
#Plot generation
png(filename = "plot1.png",width = 480, height = 480,bg = "transparent")
hist(datos$Global_active_power,col = 'red',main='Global Active Power',xlab = 'Global Active Power (kilowatts)')
dev.off()
|
9f96f72fc1718cdfd7e1dfaa2c8a1428c3ba9159
|
47672a3453dc21b3a4469878dd2801a19337f638
|
/plot_model_fits.R
|
44956f744e9b2257edde7beba5b891e9c591172b
|
[
"MIT"
] |
permissive
|
kgostic/2018_seasonal_flu_manuscript
|
8d9a28e3f2be62d8452725aae03f1fa40c53ee37
|
0ddcfdb90606627ed1d38b22f1f7eecb50cc5e47
|
refs/heads/master
| 2020-04-10T07:33:36.920411
| 2019-09-15T21:15:21
| 2019-09-15T21:15:21
| 160,883,450
| 0
| 0
|
MIT
| 2019-07-06T00:13:34
| 2018-12-07T22:52:50
|
HTML
|
UTF-8
|
R
| false
| false
| 10,366
|
r
|
plot_model_fits.R
|
######################### Plot model fits ################################
## Run from seasonal_flu home directory
## Start with results from AZ data
rm(list = ls())
## setwd('~/Dropbox/R/2018_seasonal_flu') ## Run from the seasonal_flu home folder
## Load libraries
library(viridis)
library(scales)
## Temporarily switch to the 2017_AZ subdirectory and load AZ model fits, CIs and model inputs
## We will load the INISHGT inputs below, when we start that plot.
setwd('~/Dropbox/R/2018_seasonal_flu/2017_AZ/')
load('processed-data/AZ_model_fits.RData')
load('processed-data/AZ_CIs.RData')
source('00-Inputs_multinomial.R')
source('0func_tns_color.R')
setwd('../')# Switch back into the home directory
## OUTPUTS
plot3 = 'figures/AZ_H1N1_fit.tiff'
plot4 = 'figures/AZ_H3N2_fit.tiff'
plot5 = 'figures/AZ_age_fit.tiff'
plot6 = 'figures/AZ_imp_fit.tiff'
colH2N2 = tns('dodgerblue', .9)
colH1N1 = tns('steelblue2', .75)
colH3N2 = tns('red')
## Set up color palette
cols = rev(viridis_pal(alpha = .7, begin = 1, end = .1)(5))
show_col(cols[1])
cols[2] = 'purple'
cols[3] = 'dodgerblue'
cols[4] = 'limegreen'
cols[5] = 'goldenrod'
cols = c(NA, 'goldenrod', 'limegreen', 'dodgerblue', 'purple')
## Define a function analagous to the likelihood above, which outputs model predictions
#### INPUTS
## pars - named vector of paramter values (should be best estimates from fits above)
## pro.H1 - probabilities of H1N1 protection
## pro.H3 - probabilities of H3N2 protection
## i.type - character, type of imprinting protection (can be 'HA subtype', 'HA group', or 'NA subtype'). Used in plot titles.
pars=lk.AS$par
pro.H1 = proH1.master
pro.H3 = proH3.master
i.type = 'HA Sub'
CIs = AS.CIs
plotmod1 = function(pars, CIs, pro.H1 = 1, pro.H3 = 1, i.type = NULL){
## Parse paramter inputs ##
rPro.H1 = ifelse(is.na(pars['rPro.H1']), 1, pars['rPro.H1'])# Relative risk given imprinting protection
rPro.H3 = ifelse(is.na(pars['rPro.H3']), 1, pars['rPro.H3'])# Relative risk given imprinting protection
b = 1 # Fix this as a free paramter. Then estimate all others as relative risk. Most should be lower, bounded at 0.
r5.10 = pars['r5.10'] # Expected risk for 5 to 10 year olds
r11.17 = pars['r11.17'] # etc.
r18.24 = pars['r18.24']
r25.31 = pars['r25.31']
r32.38 = pars['r32.38']
r39.45 = pars['r39.45']
r46.52 = pars['r46.52']
r53.59 = pars['r53.59']
r60.66 = pars['r60.66']
r67.73 = pars['r67.73']
r74.80 = pars['r74.80']
r81.90p = pars['r81.90p']
## Calculate predicted case distributions, as in the likelihood ##
age.baseline = (b*a0.4 +b*r5.10*a5.10 +b*r11.17*a11.17+b*r18.24*a18.24+b*r25.31*a25.31+ b*r32.38*a32.38+ b*r39.45*a39.45+ b*r46.52*a46.52+ b*r53.59*a53.59+ b*r60.66*a60.66+ b*r67.73*a67.73+ b*r74.80*a74.80+ b*r81.90p*a81.90plus)
age.baseline.rr = age.baseline/max(age.baseline) # Save unnormalized version
age.baseline = age.baseline/rowSums(age.baseline)
age.demog = (age.baseline*demog)/rowSums(age.baseline*demog)
imprinting.H1 = (pro.H1*rPro.H1+(1-pro.H1))
imprinting.H3 = (pro.H3*rPro.H3+(1-pro.H3))
## Age-specific risk prediction
## Plot row 15, which represents the last observed season, and aligns 0-year-olds with the first column (2015 birth year)
## No need to take the mean, because age-specific predictions are identical across countries and years
par(mar = c(3.5, 4, 3.5, 5))
plot(0:97, age.baseline.rr[13,], main = 'Age effects', ylab = 'relative risk', xlab = 'age', bty = 'n', ylim = c(0,1.1))
mtext('A', side = 3, line = 1.5, at = -5, font = 2)
#abline(h = 1, lty = 2)
## Predicted effects from imprinting protection
## Plot the colmeans (mean predicted protection across birth years)
## This is necessary because imprinting reconstructions differ slightly from year to year, as children (with recent birth years) get older.
if(is.na(pars['rPro.H1'])){ # If no imprinting protection par, this factor was not relevant to the model fit, and we plot an empty windwo
plot(1, 1, xlim = c(0,1), ylim = c(0,1.1), col = 'white', xaxt = 'n', yaxt = 'n', xlab = '', ylab = '', main = paste('Imprinting protection\n', i.type, sep = ''), bty = 'n')
text(.5, .5, 'NA', cex = 2)
}else{ # Else, if imprinting protection was included in the model, plot mean birth year-specific relative risk, after adjusting for imprinting protection.
## Plot just one row here, because predicted protection changes with birth year
plot(-2015:-1918, imprinting.H1[13,], col = colH1N1, cex = .7, ylab = 'relative risk', xlab = 'birth year', main = 'Imprinting effects', ylim = c(0,1.1), bty = 'n', xaxt = 'n')
axis(side = 1, at = seq(-2015, -1918, by = 10), labels = seq(2015, 1918, by = -10), line = 0)
points(-2015:-1918, imprinting.H3[13,], col = colH3N2, cex = .7)
}
## H1N1 risk in blue, H3N2 risk in red
mtext('B', side = 3, line = 1.5, at = -5, font = 2)
#abline(h = 1, lty = 2)
## Plot relative risk point estimates for each free parameter.
par(mar = par('mar')+c(.5,3,.5,.5))
parnames = c("ages 0-4", "ages5-10", "ages 11-17", "ages 18-24", "ages 25-31", "ages 32-38", "ages 39-45", "ages 46-52", "ages 53-59", "ages 60-66", "ages 67-73", "ages 74-80", "ages 81+", "", "", "", paste('Impr.', i.type, sep = ', '))
xvals = c(NA, pars[grep(pattern = "r\\d\\d?.\\d\\d", x = names(pars))], NA, NA, NA, pars['rPro.H1'], pars['rPro.H3'])
## and CIS
xlows = CIs[1,]
xlows = c(NA, xlows[grep(pattern = "r\\d\\d?.\\d\\d", x = colnames(CIs))], NA, NA, NA, xlows['rPro.H1'], xlows['rPro.H3'])
xhis = CIs[2,]
xhis = c(NA, xhis[grep(pattern = "r\\d\\d?.\\d\\d", x = colnames(CIs))], NA, NA, NA, xhis['rPro.H1'], xhis['rPro.H3'])
yvals = c(1:15, 16, 16.9, 17.1)
#print(rbind(yvals, xvals, xlows, xhis, c(rep('black', 12), 'dodgerblue', 'firebrick1', 'dodgerblue', 'firebrick1', 'dodgerblue', 'firebrick1', 'dodgerblue', 'firebrick1', 'black')))
#par(mar = c(4, 7, 2, 1))
plot(xvals, yvals, xlim = c(0, 1.6), xaxt = 'n', yaxt = 'n', xlab = 'relative risk', ylab = '', col = c(rep('black', 16), 'dodgerblue', 'firebrick1'), pch = 9, main = "Maximum likelihood estimates")
segments(x0 = xlows, y0 = yvals, x1 = xhis, col = c(rep('black', 16), 'dodgerblue', 'firebrick1'), lwd = 3)
axis(side = 1, at = seq(0, 1.6, by = .25))
axis(side = 2, at = 1:17, labels = parnames, las = 2)
abline(v = 1, lty = 2)
mtext('C', side = 3, line = 1.5, at = -.35, font = 2, xpd = NA)
# 2. calculate predicted distribution, pp, as a function of the parameters:
# This step gives the model prediction
pp.H1 = age.demog * imprinting.H1
pp.H3 = age.demog * imprinting.H3
## Return the predicted age distributions of infection. Plot these below against the observed data
return(list(age = age.baseline.rr, iH1 = imprinting.H1, iH3 = imprinting.H3,
fits = rbind(rev(colSums(pp.H1/rowSums(pp.H1)*rowSums(H1.master))), rev(colSums(pp.H3/rowSums(pp.H3)*rowSums(H3.master))))))
}
# Get prediction for model AS, but don't save to tiff
AS = plotmod1(lk.AS$par, pro.H1 = proH1.master, pro.H3 = proH3.master, i.type = 'HA Sub', CIs = AS.CIs)
spred = AS$fits# Get predicted age distributions using hte best fit subtype-specific imprinting model.
AG = plotmod1(lk.AG$par, pro.H1 = prog1.master, pro.H3 = prog2.master, i.type = "HA grp", CIs = AG.CIs)
gpred = AG$fits
AA = plotmod1(lk.A$par, pro.H1 = 1, pro.H3 = 1, i.type = NULL, CIs = A.CIs)
apred = AA$fits
AN = plotmod1(lk.AN$par, pro.H1 = proN1.master, pro.H3 = proN2.master, i.type = 'NA Sub', CIs = AN.CIs)
npred = AN$fits
######### Plot AZ age fits
tiff(file = plot5, width = 3, height = 1.5, units = 'in', res = 400)
par(mar = c(3,3,1,1)+.5, mgp = c(2,1,0))
plot(0:97, AN$age[13,], main = '', ylab = 'relative risk', xlab = 'age', bty = 'n', ylim = c(0,1.1))
#abline(h = 1, lty = 2)
dev.off()
######### Plot AZ imprinting fits
tiff(file = plot6, width = 3, height = 1.5, units = 'in', res = 400)
par(mar = c(3,3,1,1)+.5, mgp = c(2,1,0))
plot(-2015:-1918, AN$iH1[13,], col = 'dodgerblue', cex = .7, ylab = 'relative risk', xlab = 'birth year', main = '', ylim = c(0,1.1), bty = 'n', xaxt = 'n')
axis(side = 1, at = seq(-2015, -1918, by = 10), labels = seq(2015, 1918, by = -10), line = 0)
points(-2015:-1918, AN$iH3[13,], col = 'firebrick1', cex = .7)
dev.off()
######### Plot AZ H1N1 fits
dal = round(del.AIC, 2); names(dal) = gsub(pattern = 'lk.(\\w+)', replacement = '\\1', x = names(del.AIC))
tiff(file = plot3, width = 4, height = 3.5, units = 'in', res = 400)
par(mar = c(3,3,2,2)+.5, mgp = c(2,1,0))
xx = barplot(colSums(H1.master), col = 'gray', border = 'gray', ylim = c(0, 115), xlab = 'birth year', ylab = 'cases')
axis(side = 1, at = xx[seq(1, length(xx), by = 10)], labels = NA, line = 0)
lines(xx, rev(apred[1,]), col = cols[2], lwd = 1.7)
lines(xx, rev(gpred[1,]), col = cols[3], lwd = 1.7)
lines(xx, rev(spred[1,]), col = cols[4], lwd = 1.7)
lines(xx, rev(npred[1,]), col = cols[5], lwd = 1.7)
legend('topright', legend = c('observed', expression(paste('AN fit, ', Delta, 'AIC=', 0.00)),
expression(paste('AS fit, ', Delta, 'AIC=', 34.54)),
expression(paste('AG fit, ', Delta, 'AIC=', 249.06)),
expression(paste('A fit, ', Delta, 'AIC=', 385.42))), pch = c(15, NA, NA, NA, NA), col = c('gray', cols[5:2]), lwd = c(NA,2,2,1,1), bty = 'n')
dev.off()
######### Plot AZ H3N2 fits
tiff(file = plot4, width = 4, height = 3.5, units = 'in', res = 400)
par(mar = c(3,3,2,2)+.5, mgp = c(2,1,0))
xx = barplot(colSums(H3.master), col = 'gray', border = 'gray', xlab = 'birth year', ylab = 'cases')
axis(side = 1, at = xx[seq(1, length(xx), by = 10)], labels = NA, line = 0)
lines(xx, rev(apred[2,]), col = cols[2], lwd = 1.7)
lines(xx, rev(gpred[2,]), col = cols[3], lwd = 1.7)
lines(xx, rev(spred[2,]), col = cols[4], lwd = 1.7)
lines(xx, rev(npred[2,]), col = cols[5], lwd = 1.7)
legend('topright', legend = c('observed', expression(paste('AN fit, ', Delta, 'AIC=', 0.00)),
expression(paste('AS fit, ', Delta, 'AIC=', 34.54)),
expression(paste('AG fit, ', Delta, 'AIC=', 249.06)),
expression(paste('A fit, ', Delta, 'AIC=', 385.42))), pch = c(15, NA, NA, NA, NA), col = c('gray', cols[5:2]), lwd = c(NA,2,2,1,1), bty = 'n')
dev.off()
|
4d718a6095eee41dff681658ae352baf0f4f962a
|
3aef5a679c390d1f2c7ecba35eca09864164c5a5
|
/man/PsmCurves.Rd
|
96dde9d2d0f321516a73d1ba491ab6a5ecf56ed0
|
[] |
no_license
|
jeff-m-sullivan/hesim
|
576edfd8c943c62315890528039366fe20cf7844
|
fa14d0257f0d6d4fc7d344594b2c4bf73417aaf3
|
refs/heads/master
| 2022-11-14T07:35:15.780960
| 2022-09-02T03:13:49
| 2022-09-02T03:13:49
| 140,300,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 10,499
|
rd
|
PsmCurves.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psm.R
\name{PsmCurves}
\alias{PsmCurves}
\title{Partitioned survival curves}
\format{
An \link[R6:R6Class]{R6::R6Class} object.
}
\description{
Summarize N-1 survival curves for an N-state partitioned survival model.
}
\examples{
library("flexsurv")
N_SAMPLES <- 5 # Number of parameter samples for PSA
# Consider a 3-state model where there is a
# progression-free survival (PFS) and an
# overall survival (OS) endpoint
# (0) Model setup
hesim_dat <- hesim_data(
strategies = data.frame(
strategy_id = c(1, 2),
strategy_name = c("SOC", "New 1")
),
patients = data.frame(
patient_id = 1
)
)
# (1) Parameterize survival models
## (1.1) If patient-level data is available,
## we can fit survival models
### (1.1.1) Data for estimation (for simplicity, only use 2 strategies)
surv_est_data <- as_pfs_os(
onc3[strategy_name != "New 2"],
patient_vars = c("patient_id", "strategy_name")
)
surv_est_data$strategy_name <- droplevels(surv_est_data$strategy_name)
### (1.1.2) Fit models
fit_pfs <- flexsurvreg(Surv(pfs_time, pfs_status) ~ strategy_name,
data = surv_est_data, dist = "exp")
fit_os <- flexsurvreg(Surv(os_time, os_status) ~ strategy_name,
data = surv_est_data, dist = "exp")
fits <- flexsurvreg_list(pfs = fit_pfs, os = fit_os)
## (1.2) If patient-level data is NOT available,
## we can construct the parameter objects "manually"
### (1.2.1) Baseline hazard:
### Assume that we know the (log) rate parameters for both PFS and OS
### for SOC (i.e., the intercept) and their standard error
logint_pfs_est <- -1.7470900
logint_pfs_se <- 0.03866223
logint_os_est <- -2.7487675
logint_os_se <- 0.04845015
### (1.2.2) Relative treatment effect:
### Assume we know the log hazard ratios (and their standard errors)
### for comparing the new interventions to the SOC
loghr_pfs_est_new1 <- -0.1772028
loghr_pfs_se_new1 <- 0.05420119
loghr_os_est_new1 <- -0.1603632
loghr_os_se_new1 <- 0.06948962
### (1.2.3) Create "params_surv_list" object by combining the baseline hazard
### and relative treatment effects
params <- params_surv_list(
#### Model for PFS
pfs = params_surv(
coefs = list(
rate = data.frame( # coefficients predict log rate
intercept = rnorm(N_SAMPLES, logint_pfs_est, logint_pfs_se),
new1 = rnorm(N_SAMPLES, loghr_pfs_est_new1, loghr_pfs_se_new1)
)
),
dist = "exp"
),
#### Model for OS
os = params_surv(
coefs = list(
rate = data.frame(
intercept = rnorm(N_SAMPLES, logint_os_est, logint_os_se),
new1 = rnorm(N_SAMPLES, loghr_os_est_new1, loghr_os_se_new1)
)
),
dist = "exp"
)
)
#### The print (and summary) methods for the "params_surv_list" object will
#### summarize each of the model terms, which is a good way to check
#### if it's been setup correctly
params
# (2) Simulation
## (2.1) Construct the model
### (2.1.1) Case where patient-level data was available
### Use create_PsmCurves.params_flexsurvreg_list() method
surv_input_data <- expand(hesim_dat, by = c("strategies", "patients"))
psm_curves1 <- create_PsmCurves(fits, input_data = surv_input_data,
n = N_SAMPLES,
uncertainty = "normal",
est_data = surv_est_data)
### (2.1.2) Case where patient-level data was NOT available
### Use create_PsmCurves.params_surv_list() method
surv_input_data$intercept <- 1
surv_input_data$new1 <- ifelse(surv_input_data$strategy_name == "New 1",
1, 0)
psm_curves2 <- create_PsmCurves(params, input_data = surv_input_data)
## (2.2) Summarize survival models
## There are minor discrepancies between the case where models were fit
## with flexsurvreg() and the case where the "params_surv_list" object
## was constructed manually due to differences in the random draws
## of the parameter samples. These differences are decreasing in the size
## of N_SAMPLES
times <- seq(0, 10, 1/12) # Monthly times
### Quantiles
head(psm_curves1$quantile(p = c(.25, .5, .75)))
head(psm_curves2$quantile(p = c(.25, .5, .75)))
### Survival curves
head(psm_curves1$survival(t = times))
head(psm_curves2$survival(t = times))
### Restricted mean survival
head(psm_curves1$rmst(t = c(2, 5)))
head(psm_curves2$rmst(t = c(2, 5)))
}
\seealso{
\code{PsmCurves} are conveniently created from either fitted models or
parameter objects with \code{\link[=create_PsmCurves]{create_PsmCurves()}}. A complete economic model can be
implemented with the \code{\link{Psm}} class. A longer example is provided in
\code{vignette("psm")}.
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{params}}{An object of class \code{\link{params_surv_list}}.}
\item{\code{input_data}}{An object of class \code{\link{input_mats}}. Each row in \code{X} must
be a unique treatment strategy and patient.}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-PsmCurves-new}{\code{PsmCurves$new()}}
\item \href{#method-PsmCurves-hazard}{\code{PsmCurves$hazard()}}
\item \href{#method-PsmCurves-cumhazard}{\code{PsmCurves$cumhazard()}}
\item \href{#method-PsmCurves-survival}{\code{PsmCurves$survival()}}
\item \href{#method-PsmCurves-rmst}{\code{PsmCurves$rmst()}}
\item \href{#method-PsmCurves-quantile}{\code{PsmCurves$quantile()}}
\item \href{#method-PsmCurves-check}{\code{PsmCurves$check()}}
\item \href{#method-PsmCurves-clone}{\code{PsmCurves$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PsmCurves-new"></a>}}
\if{latex}{\out{\hypertarget{method-PsmCurves-new}{}}}
\subsection{Method \code{new()}}{
Create a new \code{PsmCurves} object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PsmCurves$new(params, input_data)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{params}}{The \code{params} field.}
\item{\code{input_data}}{The \code{input_data} field.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new \code{PsmCurves} object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PsmCurves-hazard"></a>}}
\if{latex}{\out{\hypertarget{method-PsmCurves-hazard}{}}}
\subsection{Method \code{hazard()}}{
Predict the hazard function for each survival curve as a function of time.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PsmCurves$hazard(t)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{t}}{A numeric vector of times.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A \code{data.table} with columns \code{sample}, \code{strategy_id},
\code{patient_id}, \code{grp_id}, \code{curve} (the curve number), \code{t}, and \code{hazard}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PsmCurves-cumhazard"></a>}}
\if{latex}{\out{\hypertarget{method-PsmCurves-cumhazard}{}}}
\subsection{Method \code{cumhazard()}}{
Predict the cumulative hazard function for each survival curve as a function of time.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PsmCurves$cumhazard(t)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{t}}{A numeric vector of times.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A \code{data.table} with columns \code{sample}, \code{strategy_id},
\code{patient_id}, \code{grp_id}, \code{curve}, \code{t}, and \code{cumhazard}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PsmCurves-survival"></a>}}
\if{latex}{\out{\hypertarget{method-PsmCurves-survival}{}}}
\subsection{Method \code{survival()}}{
Predict survival probabilities for each survival curve as a function of time.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PsmCurves$survival(t)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{t}}{A numeric vector of times.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
An object of class \code{\link{survival}}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PsmCurves-rmst"></a>}}
\if{latex}{\out{\hypertarget{method-PsmCurves-rmst}{}}}
\subsection{Method \code{rmst()}}{
Predict the restricted mean survival time up until time points \code{t}
for each survival curve.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PsmCurves$rmst(t, dr = 0)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{t}}{A numeric vector of times.}
\item{\code{dr}}{Discount rate.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A \code{data.table} with columns \code{sample}, \code{strategy_id},
\code{patient_id}, \code{grp_id}, \code{curve}, \code{t}, and \code{rmst}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PsmCurves-quantile"></a>}}
\if{latex}{\out{\hypertarget{method-PsmCurves-quantile}{}}}
\subsection{Method \code{quantile()}}{
Predict quantiles of the survival distribution for each survival curve.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PsmCurves$quantile(p)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{p}}{A numeric vector of probabilities for computing quantiles.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A \code{data.table} with columns \code{sample}, \code{strategy_id},
\code{patient_id}, \code{grp_id}, \code{curve}, \code{p} and \code{quantile}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PsmCurves-check"></a>}}
\if{latex}{\out{\hypertarget{method-PsmCurves-check}{}}}
\subsection{Method \code{check()}}{
Input validation for class. Checks that fields are the correct type.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PsmCurves$check()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PsmCurves-clone"></a>}}
\if{latex}{\out{\hypertarget{method-PsmCurves-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PsmCurves$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
03e65d211fed437a447b1ad50d389b5db91ec507
|
fe4bdb0f1ecd46518a6b388a1a7d2d085a9b98a1
|
/SVM.R
|
69bcb14760aedbac36471e1e51d2ece8e495c749
|
[] |
no_license
|
raghulmohankumar/Machine-Learning
|
17c0ceec25d78e8e58a053468efdbc50cb7b6563
|
f3dc4300020fafda690e605eac5923a02c97964d
|
refs/heads/master
| 2021-12-10T23:45:00.730546
| 2016-10-02T14:52:51
| 2016-10-02T14:52:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,364
|
r
|
SVM.R
|
#import data
data=read.table('Documents/uspsdata/uspsdata.txt',header=T)
y=read.table("Documents/uspsdata/uspscl.txt",header=T)
names(data)
length(data)
nrow(data)
set.seed(1)
#randomly select test set and training set
s=sort(sample(nrow(y),0.8*nrow(y)));s
train.x=as.matrix(data.frame(data[c(s),],row.names=c(1:(0.8*nrow(y)))))
train.y=as.numeric(as.matrix(data.frame(y[c(s),],row.names=c(1:(0.8*nrow(y))))))
test.x=as.matrix(data.frame(data[-c(s),],row.names=c(1:(round(nrow(y)-0.8*nrow(y))))))
test.y=as.numeric(as.matrix(data.frame(y[-c(s),],row.names=c(1:(round(nrow(y)-0.8*nrow(y)))))))
length(c(s))
round(39.8)
traindata=data.frame(x=train.x,y=as.factor(train.y))
testdata=data.frame(x=test.x,y=as.factor(test.y))
#train linear SVM
library(e1071)
library(ggplot2)
svmlinear=svm(y~.,data=traindata,kernel="linear",cost=10,scale=T)
svmlinear$index
summary(svmlinear)
#cross-validate the margin parameter
tune.out1=tune(svm,y~.,data=traindata,kernel="linear",ranges=list(cost=c(0.0001,0.0005,0.001,0.005,0.01,0.1)))
summary(tune.out1)
tune.out1$performance$error
#a function of the margin parameter in the linear case
error.l=tune.out1$performance$error
cost.l=tune.out1$performance$cost
qplot(cost.l,error.l,log='x',geom="line",main="Misclassification Rate of the linear SVM")
#the misclassification rate of the linear SVM
bestmodel1=tune.out1$best.model
summary(bestmodel1)
predict(bestmodel1,testdata)
mean(test.y!=predict(bestmodel1,testdata))
#train a SVM with soft margin and RBF kernel
svmradial=svm(y~.,data=traindata,kernel="radial",cost=1)
summary(svmradial)
#cross-validate both the soft-margin parameter and the kernel #bandwidth
tune.out2=tune(svm,y~.,data=traindata,kernel="radial",ranges=list(cost=c(0.01,0.05,0.1,0.5,1,10,100,1000,10000),gamma=c(0.0001,0.0005,0.001,0.005,0.01,0.1,0.5,1)))
summary(tune.out2)
#a function of the margin parameter and the kernel bandwidth in th e non-linear case
cost.r=tune.out2$performance$cost
gamma.r=tune.out2$performance$gamma
error.r=tune.out2$performance$error
dat=data.frame(cbind(cost.r,gamma.r,error.r))
dat[1:12,]
ggplot(dat,aes(x=log(cost.r),y=error.r,group=gamma.r,col=gamma.r))+geom_line()
#3D
library(rgl)
plot3d(cost.r,gamma.r,error.r,log="xy")
#misclassification rate of non-linear case
bestmodel2=tune.out2$best.model
summary(bestmodel2)
mean(test.y!=predict(bestmodel2,testdata))
|
2e52a442724d14c8761152c2650b9bb6e00bc12e
|
a46d9f582e0cbf0939b67b4937bcff4e2e0ce471
|
/Scripts inicials/simul_rel_vars_Ejemplo_7.gam.R
|
2af1fd809a072d7aeef68b8fae070e290631528a
|
[] |
no_license
|
oriolrovira/TFG
|
893605991f0d9d182d6b83b76aee8f47ab31835e
|
d7045fa776f91f3a58c48516dd9be6ff02e57bcc
|
refs/heads/master
| 2023-02-27T12:42:36.326644
| 2021-02-05T14:32:12
| 2021-02-05T14:32:12
| 262,000,272
| 0
| 1
| null | 2020-05-19T12:03:06
| 2020-05-07T08:57:59
|
R
|
ISO-8859-2
|
R
| false
| false
| 3,946
|
r
|
simul_rel_vars_Ejemplo_7.gam.R
|
# Variation of simul_rel_vars_Ejemplo_7.R
# - y is generated according to a gam model, and estimated with a gam
#
# Simulated example
# X1,X2 independent from X3,X4,X5,
# X1 and X2 correlated,
# X3, X4 and X5 correlated,
#
library(mgcv)
library(ggplot2)
library(grid)
library(maptools)# For pointLabel
source("relev.ghost.var.R")
source("relev.rand.perm.R")
printing <- TRUE #FALSE #
set.seed(1234)
#
n1 <- 2000 # size of the training sample
n2 <- 1000 # size of the test sample
rho.12 <- .95 # correlation between x1 and x2
rho.345 <- .95 # correlation between x3, X4 and x5
sigma.1 <- 1 # sd for x1
sigma.2 <- 1 # sd for x2
sigma.3 <- 1 # sd for x3
sigma.4 <- 1 # sd for x4
sigma.5 <- 1 # sd for x5
sigma.eps <- 1/2 # residual sd for defining y
beta1 <- 1 # coef. of y=x_1+...+x_{p1}
beta2 <- 0.5#0#0.5 #.25 # entre -1 i 1. Marca la quantitat d'aditivitat del terme en (X2,X3):
# quan més lluny de 0, més aditivitat
# 0, .25, 1
# For beta2=0, do beta2.2=1
# For beta2==.5, beta2.2=1
beta2.2 <- 1
#if (beta2==0) beta2.2 <- 2/3
beta3 <- 1 # coef. of y=x_1+...+x_{p2}
beta4 <- 1 # coef. of y=x_1+...+x_{p2}
beta5 <- 1 # coef. of y=x_1+...+x_{p2}
# Generating variables x1 and x2
Sigma.12 <- matrix(rho.12, nrow=2, ncol=2)
diag(Sigma.12) <- 1
eig.Sigma.12 <- eigen(Sigma.12)
sqrt.Sigma.12 <- eig.Sigma.12$vectors %*% diag(eig.Sigma.12$values^.5) %*% t(eig.Sigma.12$vectors)
X12 <- matrix(rnorm((n1+n2)*2),ncol=2) %*% sqrt.Sigma.12 %*%diag(c(sigma.1,sigma.2))
X1<-X12[,1]
X2<-X12[,2]
# Generating variables x3, x4 and X5
Sigma.345 <- matrix(rho.345, nrow=3, ncol=3)
diag(Sigma.345) <- 1
eig.Sigma.345 <- eigen(Sigma.345)
sqrt.Sigma.345 <- eig.Sigma.345$vectors %*% diag(eig.Sigma.345$values^.5) %*% t(eig.Sigma.345$vectors)
X345 <- matrix(rnorm((n1+n2)*3),ncol=3) %*% sqrt.Sigma.345 %*%diag(c(sigma.3,sigma.4,sigma.5))
X3<-X345[,1]
X4<-X345[,2]
X5<-X345[,3]
# defining the response variable
#y <- beta1*X1 + beta2*X2 + beta3*X3 + beta4*X4 + beta5*X5 + rnorm(n1+n2,sd=sigma.eps)
#y <- beta1*cos(X1) + beta2*cos(X2) + beta3*cos(X3) + beta4*cos(X4) + beta5*cos(X5) + rnorm(n1+n2,sd=sigma.eps)
#y <- beta1*X1 + (beta2*(X2+X3) + sqrt(1-beta2^2)*X2*X3) + beta4*X4 + beta5*X5 + rnorm(n1+n2,sd=sigma.eps)
#y <- beta1*cos(X1) + beta2.2*(beta2*(cos(X2)+cos(X3)) + sqrt(1-beta2^2)*X2*X3) +
# beta4*cos(X4) + beta5*cos(X5) + rnorm(n1+n2,sd=sigma.eps)
y <- beta1*cos(X1) + beta2.2*(beta2*(cos(X2)+cos(X3)) + (1-beta2)*X2*X3) +
beta4*cos(X4) + beta5*cos(X5) + rnorm(n1+n2,sd=sigma.eps)
X <- cbind(X1,X2,X3,X4,X5)
colnames(X) <- paste0("x",1:5)
yX <- as.data.frame(cbind(y,X))
colnames(yX) <- c("y",paste0("x",1:5))
# Training sample:
tr.sample <- (1:n1)
# Test sample:-tr.sample
# Fitting the linear model
#gam.tr <- gam(y ~ s(x1)+s(x2)+s(x3)+s(x4)+s(x5), data=yX, subset = tr.sample)
gam.tr <- gam(y ~ s(x1)+s(x2,x3)+s(x4)+s(x5), data=yX, subset = tr.sample)
(sum.gam.tr <- summary(gam.tr))
if (printing) pdf(file="Ex7_gam_model.pdf", width=8,height=6)
plot(gam.tr,residuals=TRUE,pages=1)
if (printing) dev.off()
# Predicting in the test sample
y.hat.ts <- as.numeric( predict(gam.tr,newdata = yX[-tr.sample,]) )
# variable relevance matrix
# by ghost variables
relev.ghost.out <- relev.ghost.var(model=gam.tr, newdata = yX[-tr.sample,], func.model.ghost.var= lm)
if (printing) pdf("Ex7_gam_Relev_GH.pdf", width = 8, height = 6)
plot.relev.ghost.var(relev.ghost.out, resid.var=gam.tr$sig2, n1=n1)
if (printing) dev.off()
#######
# variable relevance matrix
# by random permutation
relev.rand.out <- relev.rand.perm(model=gam.tr, newdata = yX[-tr.sample,], func.model.ghost.var= lm)
if (printing) pdf("Ex7_gam_Relev_RP.pdf", width = 8, height = 6)
plot.relev.rand.perm(relev.rand.out, relev.ghost=relev.ghost.out$relev.ghost)
if (printing) dev.off()
|
0ce1380ef2f4f07e4210baf718c0dba165110b42
|
f2ca5431d921b1189a6ebaacd88aef3a9a1a1820
|
/tests/testthat/helper_mlr3.R
|
c054b036063633c41e48ab2d639de719b6f33dcd
|
[] |
no_license
|
mlr-org/mlr3cluster
|
44747d2b4fae9170b5ea20704cccfdad777f198f
|
161aee5e75aa299bea29617020339768a8d9a75c
|
refs/heads/main
| 2023-06-22T09:58:51.455583
| 2023-06-15T22:32:15
| 2023-06-15T22:32:15
| 157,852,274
| 15
| 7
| null | 2023-03-10T01:08:56
| 2018-11-16T10:32:38
|
R
|
UTF-8
|
R
| false
| false
| 745
|
r
|
helper_mlr3.R
|
lapply(list.files(system.file("testthat", package = "mlr3"), pattern = "^helper.*\\.[rR]",
full.names = TRUE), source)
generate_tasks.LearnerClust = function(learner, N = 20L) { # nolint
set.seed(1)
data = mlbench::mlbench.2dnormals(N, cl = 2, r = 2, sd = 0.1)
task = TaskClust$new("sanity", mlr3::as_data_backend(as.data.frame(data$x)))
list(task)
}
registerS3method("generate_tasks", "LearnerClust", generate_tasks.LearnerClust,
envir = parent.frame()
)
sanity_check.PredictionClust = function(prediction, task, ...) { # nolint
prediction$score(measures = msr("clust.silhouette"), task = task) > -1L
}
registerS3method("sanity_check", "PredictionClust", sanity_check.PredictionClust,
envir = parent.frame()
)
|
198d9d53174d5a2d1c6b9bb4ea833ee4512dea03
|
284cf90e8beb4b8405f1de30c415f609d55e4e7e
|
/Untitled1.R
|
e3f91eb2e27425a0e8ae928f0fd77d69662959b6
|
[] |
no_license
|
paumontero/clases_cursoR
|
731dea17b22e4c682e54d74b842d25896c976d21
|
e5e436f51d618d9d7c393b2d456acff262e579b5
|
refs/heads/master
| 2021-01-12T13:57:51.428455
| 2016-09-26T13:46:57
| 2016-09-26T13:46:57
| 69,254,284
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 477
|
r
|
Untitled1.R
|
mi-vector <- vector(length = 3)
mi_vector <- vector(length = 3)
str(gatos)
str(gatos%vector)
str(gatos&pelaje)
srt(gatos&peso)
gatos
str(gatos$peso)
gatos <- cbind(gatos, edad)
nuevaFila <- list("carey", 3.3, TRUE, 9)
gatos <- rbind(gatos, nuevaFila)
levels(gatos$pelaje)
levels(gatos$pelaje) <- c(levels(gatos$pelaje), 'carey')
gatos <- rbind(gatos, list("tortoiseshell", 3.3,
seq(2, 120)
mi_secuencia <- 1:10
head(mi_secuencia)
tail(mi_secuencia)
tail(mi_secuencia, 2)
|
53f02dc20c1f370b1422d27d0514e79185953cfd
|
e5036c7f2d13b1cea8b010acaee53ce34074b918
|
/man/DesignEval.Rd
|
0fc4e56a31cc67d2bc447c0160de6d3c896dde7f
|
[] |
no_license
|
cran/UniDOE
|
28c90515ebf5139cef7e3559b75f0e183834971b
|
8e3ba53b85975865c17641e91b6d495018cad175
|
refs/heads/master
| 2021-09-12T16:19:30.056341
| 2018-04-18T12:22:01
| 2018-04-18T12:22:01
| 112,484,584
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 837
|
rd
|
DesignEval.Rd
|
\name{DesignEval}
\alias{DesignEval}
\title{Evaluate design in terms of criteria}
\usage{
DesignEval(X0,crit)
}
\description{
This function takes matrix X0,q and crit to output the criterion value.
}
\arguments{
\item{X0}{an integer matrix object}
\item{crit}{an integer object, criterion to choose:
"MD2" -- MD2
"CD2" -- CD2
"WD2" -- WD2
"maximin" -- maximin
Default: "MD2"}
}
\value{
criterion value.
}
\author{
Aijun Zhang, Haoyu Li, Shijie Quan
}
\references{
Zhang, A. and Li, H. (2017). UniDOE: An R package for constructing uniform design of experiments via stochastic and adaptive threshold accepting algorithm. Technical Report.
}
\examples{
x0 = matrix(c(1,1,1,2,2,2,3,3,3),nrow=3,byrow=TRUE)
crit="MD2"
value = DesignEval(x0,crit)
}
|
5869a1a1e1c79f49f450f9efa8b2231fe3cf0c09
|
2cb802c7e9bb18670769604cb289b03192661d5a
|
/Perinatal paper scripts/6_Public Code/Analysis Code/calculating_controls_pre_matching.R
|
49095f88173b50b74c0f031eb2a3805460032e81
|
[] |
no_license
|
Public-Health-Scotland/COPS-public
|
a18d36d8a69479e34c1ddd31f23a15b5b7a6eba6
|
b4c4df18020712fbae08a979226d0a382d6aeda9
|
refs/heads/main
| 2023-07-29T17:41:26.677028
| 2023-07-11T12:40:32
| 2023-07-11T12:40:32
| 362,821,738
| 0
| 2
| null | 2021-12-07T12:55:46
| 2021-04-29T13:11:02
|
R
|
UTF-8
|
R
| false
| false
| 13,510
|
r
|
calculating_controls_pre_matching.R
|
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# RStudio Workbench is strictly for use by Public Health Scotland staff and
# authorised users only, and is governed by the Acceptable Usage Policy https://github.com/Public-Health-Scotland/R-Resources/blob/master/posit_workbench_acceptable_use_policy.md.
#
# This is a shared resource and is hosted on a pay-as-you-go cloud computing
# platform. Your usage will incur direct financial cost to Public Health
# Scotland. As such, please ensure
#
# 1. that this session is appropriately sized with the minimum number of CPUs
# and memory required for the size and scale of your analysis;
# 2. the code you write in this script is optimal and only writes out the
# data required, nothing more.
# 3. you close this session when not in use; idle sessions still cost PHS
# money!
#
# For further guidance, please see https://github.com/Public-Health-Scotland/R-Resources/blob/master/posit_workbench_best_practice_with_r.md.
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# used for reviewer comments and used to produce supplementary table 4
# import pacakges
library(janitor)
library(dplyr)
library(tidyr)
library(forcats)
library(renv)
library(here)
library(odbc)
library(magrittr)
library(lubridate)
library(readr)
# library(hablar)
library(labelled)
library(purrr)
library(stringr)
# run 00 setup and load in cohort 1 for this. THis is to find out how many pregnancies went over 40+6 weeks
infected_over_40_weeks <- cohort1_infect_contemp_controls_singletons %>%
filter(gestation_at_outcome > 41) %>%
group_by(inf_or_uninf) %>%
tally()
vaccinated_over_40_weeks <- cohort1_vacc_contemp_controls_singletons %>%
filter(gestation_at_outcome > 41) %>%
group_by(vacc_or_unvacc) %>%
tally()
##### file paths #####
folder_working_data <- "/data/HPS/COPS_non_confi/COPS_VaccineSafety_Perinatal/2_Working_Data/"
folder_results <- "/data/HPS/COPS_non_confi/COPS_VaccineSafety_Perinatal/4_Results/"
# import data
#66414
cohort1_uninfected <- readRDS(paste0(folder_working_data, "cohort1_uninfected.rds"))
cohort1_unvaccinated <- readRDS(paste0(folder_working_data, "cohort1_unvaccinated.rds"))
# we need to get the ids that we remove because they have the wrong date
cohort1_infect_contemp_controls_singletons <- readRDS(paste0(folder_working_data, "matched_perinatal_cohort1_infection_secondary_contemporary_singletons.rds"))
# find all the ids that have a covid infection before study start date and remove them and the controls from dataset
# but if they have a second infection within the start date, use that as the new index date
wrong_date_ids <- cohort1_infect_contemp_controls_singletons %>%
mutate(wrong_date_flag = case_when(perinatal_covid_index_date < "2020-05-18" ~ 1)) %>%
filter(wrong_date_flag == 1) %>%
mutate(outcome_covid_index_date_new = case_when(difftime(index_date_covid_infection_1, est_conception_date) >= -42 &
index_date_covid_infection_1 <= perinatal_vaccination_timing_period_end &
index_date_covid_infection_1 <= pregnancy_end_date &
index_date_covid_infection_1 >= "2020-05-18" ~ index_date_covid_infection_1,
difftime(index_date_covid_infection_2, est_conception_date) >= -42 &
index_date_covid_infection_2 <= perinatal_vaccination_timing_period_end &
index_date_covid_infection_2 <= pregnancy_end_date &
index_date_covid_infection_2 >= "2020-05-18" ~ index_date_covid_infection_2,
difftime(index_date_covid_infection_3, est_conception_date) >= -42 &
index_date_covid_infection_3 <= perinatal_vaccination_timing_period_end &
index_date_covid_infection_3 <= pregnancy_end_date &
index_date_covid_infection_3 >= "2020-05-18" ~ index_date_covid_infection_3,
difftime(index_date_covid_infection_4, est_conception_date) >= -42 &
index_date_covid_infection_4 <= perinatal_vaccination_timing_period_end &
index_date_covid_infection_4 <= pregnancy_end_date &
index_date_covid_infection_4 >= "2020-05-18" ~ index_date_covid_infection_4))
ids_removed <- wrong_date_ids %>%
filter(is.na(outcome_covid_index_date_new))
cohort1_removed_data <- cohort1_infect_contemp_controls_singletons %>%
filter((index %in% ids_removed$index))
cohort1_uninfected <- cohort1_uninfected %>%
full_join(cohort1_removed_data)
rm(cohort1_infect_contemp_controls_singletons, cohort1_removed_data, wrong_date_ids, ids_removed)
# get characteristics of unexposed group
uninfected_descriptives <- bind_rows(
# n pregnancies
cohort1_uninfected %>%
group_by(pregnancy_id_orig) %>%
slice(1) %>%
ungroup %>%
tally() %>%
mutate(rowname = "n_pregnancies"),
cohort1_uninfected %>%
tally() %>%
mutate(rowname = "n_births"),
cohort1_uninfected %>%
summarise(median_age = median(mother_age_at_conception),
min_age = min(mother_age_at_conception),
max_age = max(mother_age_at_conception)) %>%
pivot_longer(everything(), names_to = "rowname", values_to = "n"),
cohort1_uninfected %>%
mutate(simd = case_when(simd == "1=most deprived" ~ "1",
T ~ simd)) %>%
group_by(simd) %>%
tally() %>%
mutate(rowname = paste0("deprivation:", simd)),
cohort1_uninfected %>%
group_by(ethnicity_cat) %>%
tally() %>%
mutate(rowname = paste0("ethnicity:", ethnicity_cat)),
cohort1_uninfected %>%
group_by(UR6_categories) %>%
tally() %>%
mutate(rowname = paste0("UR6_categories:", UR6_categories)),
cohort1_uninfected %>%
group_by(cv_clinical_vulnerability_category) %>%
tally() %>%
mutate(rowname = paste0("clinical_vulnerability:", cv_clinical_vulnerability_category)),
cohort1_uninfected %>%
mutate(q_diag_diabetes_1 = case_when(!is.na(mother_eave_linkno) ~ replace_na(q_diag_diabetes_1, 0)),
q_diag_diabetes_2 = case_when(!is.na(mother_eave_linkno) ~ replace_na(q_diag_diabetes_2, 0))) %>%
mutate(diabetes = case_when(diabetes == "unknown" & q_diag_diabetes_1 == 0 & q_diag_diabetes_2 == 0 ~ "assumed_no_diabetes",
T ~ diabetes)) %>%
mutate(diabetes_cat = as.character(diabetes_cat),
diabetes_cat = case_when(diabetes_cat == "Unknown" & diabetes == "assumed_no_diabetes" ~ "No - assumed & confirmed",
T ~ diabetes_cat),
diabetes_cat = factor(diabetes_cat, levels = c("No - assumed & confirmed",
"Pre-existing diabetes",
"Gestational Diabetes/onset unknown",
"Unknown"))) %>%
group_by(diabetes_cat) %>%
tally() %>%
mutate(rowname = paste0("diabetes:", diabetes_cat)),
cohort1_uninfected %>%
mutate(smoking_status = case_when(is.na(x_overall_smoking_status) ~ "missing",
T ~ x_overall_smoking_status),
smoking_status = factor(smoking_status, levels = c("non-smoker", "ex-smoker", "smoker", "missing"))) %>%
group_by(smoking_status) %>%
tally() %>%
mutate(rowname = paste0("smoking_status:", smoking_status)),
cohort1_uninfected %>%
group_by(bmi_cat) %>%
tally() %>%
mutate(rowname = paste0("bmi:", bmi_cat)),
cohort1_uninfected %>%
group_by(gestation_ascertainment) %>%
tally() %>%
mutate(rowname = paste0("imputed_gestation:", gestation_ascertainment)),
cohort1_uninfected %>%
mutate(smr02_total_previous_pregnancies = ifelse(smr02_total_previous_pregnancies == 99, NA, smr02_total_previous_pregnancies),
smr02_previous_spontaneous_abortions = ifelse(smr02_previous_spontaneous_abortions == 99, NA, smr02_previous_spontaneous_abortions),
smr02_previous_theraputic_abortions = ifelse(smr02_previous_theraputic_abortions == 99, NA, smr02_previous_theraputic_abortions),
parity = smr02_total_previous_pregnancies - (smr02_previous_spontaneous_abortions + smr02_previous_theraputic_abortions),
parity_cat = case_when(parity == 0 ~ "0",
parity >= 1 ~"1+",
T ~ "Unknown/missing")) %>%
group_by(parity_cat) %>%
tally() %>%
mutate(rowname = paste0("parity:", parity_cat))) %>%
select(rowname, n) %>%
separate(rowname, sep = ":", into = c("category", "sub_category")) %>%
group_by(category) %>%
mutate(percentage = round(as.numeric(n)/sum(as.numeric(n), na.rm = T)*100, 1))
unvaccinated_descriptives <- bind_rows(
# n pregnancies
cohort1_unvaccinated %>%
group_by(pregnancy_id_orig) %>%
slice(1) %>%
ungroup %>%
tally() %>%
mutate(rowname = "n_pregnancies"),
cohort1_unvaccinated %>%
tally() %>%
mutate(rowname = "n_births"),
cohort1_unvaccinated %>%
summarise(median_age = median(mother_age_at_conception),
min_age = min(mother_age_at_conception),
max_age = max(mother_age_at_conception)) %>%
pivot_longer(everything(), names_to = "rowname", values_to = "n"),
cohort1_unvaccinated %>%
mutate(simd = case_when(simd == "1=most deprived" ~ "1",
T ~ simd)) %>%
group_by(simd) %>%
tally() %>%
mutate(rowname = paste0("deprivation:", simd)),
cohort1_unvaccinated %>%
group_by(ethnicity_cat) %>%
tally() %>%
mutate(rowname = paste0("ethnicity:", ethnicity_cat)),
cohort1_unvaccinated %>%
group_by(UR6_categories) %>%
tally() %>%
mutate(rowname = paste0("UR6_categories:", UR6_categories)),
cohort1_unvaccinated %>%
group_by(cv_clinical_vulnerability_category) %>%
tally() %>%
mutate(rowname = paste0("clinical_vulnerability:", cv_clinical_vulnerability_category)),
cohort1_unvaccinated %>%
mutate(q_diag_diabetes_1 = case_when(!is.na(mother_eave_linkno) ~ replace_na(q_diag_diabetes_1, 0)),
q_diag_diabetes_2 = case_when(!is.na(mother_eave_linkno) ~ replace_na(q_diag_diabetes_2, 0))) %>%
mutate(diabetes = case_when(diabetes == "unknown" & q_diag_diabetes_1 == 0 & q_diag_diabetes_2 == 0 ~ "assumed_no_diabetes",
T ~ diabetes)) %>%
mutate(diabetes_cat = as.character(diabetes_cat),
diabetes_cat = case_when(diabetes_cat == "Unknown" & diabetes == "assumed_no_diabetes" ~ "No - assumed & confirmed",
T ~ diabetes_cat),
diabetes_cat = factor(diabetes_cat, levels = c("No - assumed & confirmed",
"Pre-existing diabetes",
"Gestational Diabetes/onset unknown",
"Unknown"))) %>%
group_by(diabetes_cat) %>%
tally() %>%
mutate(rowname = paste0("diabetes:", diabetes_cat)),
cohort1_unvaccinated %>%
mutate(smoking_status = case_when(is.na(x_overall_smoking_status) ~ "missing",
T ~ x_overall_smoking_status),
smoking_status = factor(smoking_status, levels = c("non-smoker", "ex-smoker", "smoker", "missing"))) %>%
group_by(smoking_status) %>%
tally() %>%
mutate(rowname = paste0("smoking_status:", smoking_status)),
cohort1_unvaccinated %>%
group_by(bmi_cat) %>%
tally() %>%
mutate(rowname = paste0("bmi:", bmi_cat)),
cohort1_unvaccinated %>%
group_by(gestation_ascertainment) %>%
tally() %>%
mutate(rowname = paste0("imputed_gestation:", gestation_ascertainment)),
cohort1_unvaccinated %>%
mutate(smr02_total_previous_pregnancies = ifelse(smr02_total_previous_pregnancies == 99, NA, smr02_total_previous_pregnancies),
smr02_previous_spontaneous_abortions = ifelse(smr02_previous_spontaneous_abortions == 99, NA, smr02_previous_spontaneous_abortions),
smr02_previous_theraputic_abortions = ifelse(smr02_previous_theraputic_abortions == 99, NA, smr02_previous_theraputic_abortions),
parity = smr02_total_previous_pregnancies - (smr02_previous_spontaneous_abortions + smr02_previous_theraputic_abortions),
parity_cat = case_when(parity == 0 ~ "0",
parity >= 1 ~"1+",
T ~ "Unknown/missing")) %>%
group_by(parity_cat) %>%
tally() %>%
mutate(rowname = paste0("parity:", parity_cat))) %>%
select(rowname, n) %>%
separate(rowname, sep = ":", into = c("category", "sub_category")) %>%
group_by(category) %>%
mutate(percentage = round(as.numeric(n)/sum(as.numeric(n), na.rm = T)*100, 1))
|
a1c1d7ca76c95fd1ce34374960265b86a1465e49
|
3caab84dfbc452191c3bdc7800e3653a4a364815
|
/sim/simMH.R
|
93759e1c472792148712a55f7b19a49c3025f9a4
|
[] |
no_license
|
kklotzke/MALNIRT
|
e428cf2cef5b8f59d0f5b65a39f2b70aa8cc66c7
|
e318c36937973ba67dc3b3096a0c6dd25eab765c
|
refs/heads/master
| 2023-02-08T16:42:09.937913
| 2017-04-14T19:14:34
| 2017-04-14T19:14:34
| 79,729,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,181
|
r
|
simMH.R
|
# be <- la <- matrix(NA, nrow = 100, ncol = sim.K)
# for(ii in 1:100)
# {
# dat.tmp <- simdataLNIRT(N = sim.N, K = sim.K, delta = c(0.1,0), tau = c(0.15,0), nu = rep(-0.15,sim.K))
# be[ii, ] <- dat.tmp$beta
# la[ii, ] <- dat.tmp$lambda
# }
dat.tmp <- simdataLNIRT(N = sim.N, K = sim.K, delta = c(0.1,0), tau = c(0.15,0), nu = rep(-0.15,sim.K))
beta <- dat.tmp$beta
lambda <- dat.tmp$lambda
save(beta, lambda, file = "beta_lambda.Rdata")
sim.N <- 500
sim.K <- 10
sim.XG <- 1000
sim.rep <- 25
sim.delta <- sim.tau <- numeric(sim.rep)
sim.nu <- matrix(NA, nrow = sim.rep, ncol = sim.K)
sim.beta <- sim.lambda <- matrix(NA, nrow = sim.rep, ncol = sim.K)
#dat.tmp <- simdataLNIRT(N = sim.N, K = sim.K, delta = c(0.1,0), tau = c(0.15,0), nu = rep(0.20,sim.K))
#be <- dat.tmp$beta
#la <- dat.tmp$lambda
system.time({
for (ii in 1:sim.rep)
{
print(ii)
dat <- simdataLNIRT(N = sim.N, K = sim.K, delta = c(0.1,0), tau = c(0.15,0), nu = rep(0.20,sim.K))#, beta = be, lambda = la)
out <- MALNIRT(Y = Y, RT = RT, data = dat, XG = sim.XG, est.person = FALSE)
sim.delta[ii] <- out$post.means[[1]]$delta
sim.tau[ii] <- out$post.means[[1]]$tau
sim.nu[ii, ] <- out$post.means[[1]]$nu
sim.beta[ii, ] <- out$post.means[[1]]$beta
sim.lambda[ii, ] <- out$post.means[[1]]$lambda
}
})
nu <- rep(-0.20,sim.K)
setwd("~/Desktop/github_kklotzke/MALNIRT/sim")
save(sim.tau, sim.delta, sim.nu, sim.beta, sim.lambda, nu = nu, be = be, la = la, file = "simMH210317_2.RData")
hist(sim.tau, breaks=10)
hist(sim.delta, breaks=10)
plot(sim.tau)
plot(sim.delta)
hist(c(out$data.chain1$tau.1[200:2000], out$data.chain2$tau.1[200:2000]), breaks = 50, main = "Tau = .15", xlab = "")
hist(c(out$data.chain1$delta.1[200:2000], out$data.chain2$delta.1[200:2000]), breaks = 50, main = "Delta = .1", xlab = "")
hist(c(out$data.chain1$nu[200:2000], out$data.chain2$nu[200:2000]), breaks = 50, main = "Nu (Item 1) = -.25", xlab ="")
mean(sim.tau)
mean(sim.delta)
sd(sim.tau)
sd(sim.delta)
plot(colMeans(sim.nu) - (-0.20))
plot(rowMeans(sim.nu) - (-0.20))
summary(colMeans(sim.nu)- (0.20))
apply(sim.nu, FUN = sd, MARGIN = 2)
plot(colMeans(sim.beta[-bad, ]) - be)
summary(colMeans(sim.beta) - be)
apply(sim.beta, FUN = sd, MARGIN = 2)
plot(colMeans(sim.lambda) - la)
summary(colMeans(sim.lambda) - la)
apply(sim.lambda, FUN = sd, MARGIN = 2)
plot(200:1000, out$samples[[1]]$nu.1[200:1000], type = "l", col = "red", main = "nu (item 1)",
xlab = "", ylab = "", frame.plot=F, cex.axis=1.1)
lines(200:1000, out$samples[[1]]$nu.1[1200:2000], col = "blue")
plot(200:1000, out$samples[[1]]$tau[200:1000], type = "l", col = "red", main = "tau",
xlab = "", ylab = "", frame.plot=F, cex.axis=1.1)
lines(200:1000, out$samples[[1]]$tau[1200:2000], col = "blue")
plot(200:1000, out$samples[[1]]$delta[200:1000], type = "l", col = "red", main = "delta",
xlab = "", ylab = "", frame.plot=F, cex.axis=1.1)
lines(200:1000, out$samples[[1]]$delta[1200:2000], col = "blue")
plot(200:1000, out$samples[[1]]$sig2k.1[200:1000], type = "l", col = "red", main = "sig2 (item 1)",
xlab = "", ylab = "", frame.plot=F, cex.axis=1.1)
lines(200:1000, out$samples[[1]]$sig2k.1[1200:2000], col = "blue")
|
11754bb5fad7cff56d1ff72fc5a4025243382843
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/PoweR/man/print.critvalues.Rd
|
01c9158e2af30870a82710dfffc8de6f4394c22a
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,869
|
rd
|
print.critvalues.Rd
|
\name{print.critvalues}
\alias{print.critvalues}
\alias{print.critvalues1}
\title{Latex table for critical values}
\description{Transform the critical values given by function \code{\link{many.crit}}
into a LaTeX code for creating the table of critical values.}
\usage{
\method{print}{critvalues}(x, digits = 3, latex.output = FALSE, template = 1, ...)
}
\arguments{
\item{x }{critical values given by function \code{\link{many.crit}}.}
\item{digits }{integer indicating the number of decimal places to be used.}
\item{latex.output }{logical. If \code{TRUE}, we output LaTeX code for the table of critical values.
If \code{FALSE}, we output this table in the R Console.}
\item{template }{integer, template to use for the (LaTeX) printing of
values. Only \code{template = 1} is defined for the moment.}
\item{... }{further arguments passed to or from other methods.}
}
\references{
Pierre Lafaye de Micheaux, Viet Anh Tran (2016). PoweR: A
Reproducible Research Tool to Ease Monte Carlo Power Simulation
Studies for Goodness-of-fit Tests in R. \emph{Journal of Statistical Software}, \bold{69(3)}, 1--42. doi:10.18637/jss.v069.i03
Puig, P. and Stephens, M. A. (2000), Tests of fit for the Laplace distribution, with applications,
\emph{Technometrics}, \bold{42}, 417--424.
}
\author{P. Lafaye de Micheaux, V. A. Tran}
\seealso{See \code{\link{print.power}}.}
\examples{
## Regenerate Table 1 from Puig (2000) (page 419)
# Take M = 10000 for accurate results
M <- 10
law.index <- 1
vectn <- c(10,15,20,35,50,75,100,1000)
level <- c(0.50,0.25,0.10,0.05,0.025,0.01)
table1 <- many.crit(law.index,stat.indices = c(43),M,vectn,level,
alter = list(stat43=3),law.pars = NULL,parstat = NULL)
print.critvalues(table1,digits=3,latex.output=TRUE)
}
\keyword{print}
|
477a9413ec54e6c261f09a929195f3dcd3f345b1
|
373770603afb01dee47ac4055eb4b584bbfd205c
|
/man/OutlierPosition.Rd
|
dbd3a700a90788ce51af277c0baeaadd63935098
|
[
"BSD-3-Clause"
] |
permissive
|
sshcherbakov/ERTMon-R
|
e1ac5c22be39cd51ba70a81a51433034a6c4e10c
|
75847078568f76bfeb69597775c462f43cd49a34
|
refs/heads/master
| 2020-04-07T10:13:16.505303
| 2019-03-13T08:58:02
| 2019-03-13T08:58:02
| 158,279,216
| 0
| 0
| null | 2018-11-19T19:32:33
| 2018-11-19T19:32:32
| null |
UTF-8
|
R
| false
| true
| 420
|
rd
|
OutlierPosition.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OutlierIdentifiers.R
\name{OutlierPosition}
\alias{OutlierPosition}
\title{Outlier positions finder.}
\usage{
OutlierPosition(dataVec, outlierIdentifier = HampelIdentifierParameters)
}
\arguments{
\item{dataVec}{data vector}
\item{outlierIdentifier}{outlier identifier function}
}
\description{
Find the outlier positions in a data vector
}
|
a0fddbb7fbe9b1b732bbb36bc4222456954742f4
|
56dd5bdd05335517864530de705c6c6b5be17009
|
/man/UserBlocks.Rd
|
dc55723ce78736a36bdfe34a15e09404c52ee8d2
|
[] |
no_license
|
gitter-badger/HybRIDS
|
3ec32da022b5b9f6e243deb3f31b0680e102ffc9
|
092acc68645017624d35ee1b6420f78e2ca59a73
|
refs/heads/master
| 2021-01-21T19:45:19.918535
| 2014-12-09T04:06:28
| 2014-12-09T04:06:28
| 27,845,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
rd
|
UserBlocks.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{class}
\name{UserBlocks}
\alias{UserBlocks}
\title{UserBlocks reference class}
\description{
The UserBlocks reference class is the class used to store and manipulate user defined blocks in HybRIDS.
}
|
e2d91647d080ad5ae1fdf11b9188671e3634a549
|
585ad5e777416942f6a1f1a7b4c2cbb5c0352727
|
/script.R
|
84a88d882693b9526766d70d0c3a32e2f67299fb
|
[] |
no_license
|
ssm123ssm/capstone
|
a82615592eae10416e88d312a15c7b5efb2fbb98
|
825f96db51380a65621e036d81a5cdeaecc55b1d
|
refs/heads/master
| 2022-12-25T15:35:54.133540
| 2020-09-22T01:27:52
| 2020-09-22T01:27:52
| 297,499,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,973
|
r
|
script.R
|
# Loading the libraries
if (!require(tidyverse)) install.packages('tidyverse')
if (!require(caret)) install.packages('caret')
if (!require(factoextra)) install.packages('factoextra')
if (!require(broom)) install.packages('broom')
if (!require(readr)) install.packages('readr')
if (!require(pROC)) install.packages('pROC')
if (!require(NeuralNetTools)) install.packages('NeuralNetTools')
library(tidyverse)
library(caret)
library(factoextra)
library(broom)
library(readr)
library(pROC)
#importing the dataset
uri <- 'http://ssm123ssm.github.io/projects/data_arrhythmia.csv'
original <- read_delim(uri,";", escape_double = FALSE, trim_ws = TRUE)
# Filtering the output categories of 1,2,3 and 4
tem <- original %>% filter(diagnosis %in% c('1', '2', '3', '4'))
#Partitioning the dataset to train (70%) and test(30%)
set.seed(2000, sample.kind = 'Rounding')
test_ind <- as.vector(createDataPartition(tem$diagnosis, p = .3, list = F))
train_set <- tem[-test_ind,]
test_set <- tem[test_ind,]
#Upsampling the training set to balance class prevelence
train_set <- upSample(train_set[,-280], as.factor(train_set$diagnosis), list = FALSE)
#Selecting features according to ECG changes during an MI
selected <- c(1,2,15, 161, 162, 167, 171, 172, 177, 181, 182, 187, 191, 192, 197, 201, 202, 207, 211, 212, 217, 221, 222, 227, 231, 232, 237, 241, 242, 247, 251, 252, 257, 261, 262, 267, 271, 272, 277, 16:20, 28:32, 40:44, 52:56, 64:68, 76:80, 88:92, 100:104, 112:116, 124:128, 136:140, 148:152, 280)
selected_names <- c('age', 'sex', 'HR', 'Q_amp_1', 'R_amp_1', 'T_amp_1','Q_amp_2', 'R_amp_2', 'T_amp_2', 'Q_amp_3', 'R_amp_3', 'T_amp_3', 'Q_amp_avR', 'R_amp_avR', 'T_amp_avR', 'Q_amp_avL', 'R_amp_avL', 'T_amp_avL', 'Q_amp_avF', 'R_amp_avF', 'T_amp_avF', 'Q_amp_v1', 'R_amp_v1', 'T_amp_v1', 'Q_amp_v2', 'R_amp_v2', 'T_amp_v2','Q_amp_v3', 'R_amp_v3', 'T_amp_v3', 'Q_amp_v4', 'R_amp_v4', 'T_amp_v4', 'Q_amp_v5', 'R_amp_v5', 'T_amp_v5', 'Q_amp_v6', 'R_amp_v6', 'T_amp_v6', 'Q_wd_1', 'R_wd_1', 'S_wd_1', 'r_wd_1', 's_wd_1', 'Q_wd_2', 'R_wd_2', 'S_wd_2', 'r_wd_2', 's_wd_2', 'Q_wd_3', 'R_wd_3', 'S_wd_3', 'r_wd_3', 's_wd_3','Q_wd_aVR', 'R_wd_aVR', 'S_wd_aVR', 'r_wd_aVR', 's_wd_aVR', 'Q_wd_aVL', 'R_wd_aVL', 'S_wd_aVL', 'r_wd_aVL', 's_wd_aVL','Q_wd_3aVF', 'R_wd_3aVF', 'S_wd_3aVF', 'r_wd_3aVF', 's_wd_3aVF', 'Q_wd_v1', 'R_wd_v1', 'S_wd_v1', 'r_wd_v1', 's_wd_v1', 'Q_wd_v2', 'R_wd_v2', 'S_wd_v2', 'r_wd_v2', 's_wd_v2', 'Q_wd_v3', 'R_wd_v3', 'S_wd_v3', 'r_wd_v3', 's_wd_v3', 'Q_wd_v4', 'R_wd_v4', 'S_wd_v4', 'r_wd_v4', 's_wd_v4', 'Q_wd_v5', 'R_wd_v5', 'S_wd_v5', 'r_wd_v5', 's_wd_v5', 'Q_wd_v6', 'R_wd_v6', 'S_wd_v6', 'r_wd_v6', 's_wd_v6', 'class')
sel_train <- train_set %>% select(selected)
sel_test <- test_set %>% select(selected)
#Removing Near-Zero variance columns
nz <- nzv(sel_train)
names(sel_train) <- selected_names
names(sel_test) <- selected_names
sel_train <- sel_train[,-nz]
sel_test <- sel_test[,-nz]
#Parsing the predictors to a numeric matrix - training data
tm <- sel_train %>% as.matrix()
tm <- apply(tm, 2, as.numeric)
tm[!is.finite(tm)] = 0
tl <- as.factor(paste0('cl_',tm[,ncol(tm)]))
tm <- tm[,-ncol(tm)]
#Parsing the predictors to a numeric matrix - test data
tmm <- sel_test %>% as.matrix()
tmm <- apply(tmm, 2, as.numeric)
tmm[!is.finite(tmm)] = 0
tll <- as.factor(paste0('cl_',tmm[,ncol(tmm)]))
tmm <- tmm[,-ncol(tmm)]
#using 10 fold cross validation
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 10,classProbs = TRUE)
#Training the models, optimized for Kappa after centering and scaling
#treebag
set.seed(2020)
mod_treebag <- train(tm,tl, method = 'treebag' ,metric = "Kappa", preProcess = c("center", "scale"), trControl = fitControl)
#rf
set.seed(2020)
mod_rf <- train(tm,tl, method = 'rf' ,metric = "Kappa", preProcess = c("center", "scale"), trControl = fitControl, tuneGrid = data.frame(mtry = seq(1,10, by = 2)))
#Neural networks
library(NeuralNetTools)
#Multi-Layer Perceptron
set.seed(2020)
set.seed(2020)
mod_mlp <- train(tm,tl, method = 'mlp' ,metric = "Kappa", preProcess = c("center", "scale"), trControl = fitControl, tuneGrid = data.frame(size = 1:10))
#Ploting the parameter tuning
plot(mod_rf, plotType = 'line')
plot(mod_mlp)
#The distributions of the evaluation metrics of the cross validation samples and the scatter plot matrix for Kappa statistic of the three models
resamples <- resamples(list(treebag = mod_treebag, RF = mod_rf, preceptron = mod_mlp))
theme1 <- trellis.par.get()
theme1$plot.symbol$col = rgb(.2, .2, .2, .4)
theme1$plot.symbol$pch = 16
theme1$plot.line$col = rgb(1, 0, 0, .7)
theme1$plot.line$lwd <- 2
trellis.par.set(theme1)
bwplot(resamples)
dotplot(resamples)
#Confusion matrices of the three models for the predictions for test data
#Random forests
confusionMatrix(tll, predict(mod_rf, tmm))
#Treebag
confusionMatrix(tll, predict(mod_treebag, tmm))
#Perceptron
confusionMatrix(tll, predict(mod_mlp, tmm))
|
f5cd4845808b899d2cc8e2df33b75e448235c52a
|
efc0045c29a9390a563307e4e76ead9a1414c746
|
/R/data.R
|
b8125b0a8e634d55f3fdbfd7d868af372f290c8b
|
[] |
no_license
|
cran/flatness
|
72f1de6b9e929452e89b57cd67ad38abcb098d72
|
d3cf74f2bd78d070a6f51dccaa0bb07cf47effe4
|
refs/heads/master
| 2023-06-04T10:24:04.326157
| 2021-06-29T06:20:09
| 2021-06-29T06:20:09
| 381,417,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,713
|
r
|
data.R
|
#' Ensemble forecasts of temperature and associated observation
#'
#' This is a data set containing the forecasts of five ensemble weather
#' prediction models for two-meter temperature from March, 2019 to March, 2021.
#'
#' The five ensemble models are named CWAO (20 perturbed members, from ECCC),
#' DEMS (11 perturbed members, from NCMRWF), ECMF (50 members, from ECMWF),
#' EGRR (17 perturbed members, from UKMO) and RKSL (24 perturbed members, from
#' KMA).
#'
#' The forecasts are the ones at the nearest grid point to Toulouse-Blagnac
#' station (France) in the TIGGE dataset. The observation is the two-meter
#' height temperature measured at this same station, at 06UTC. The forecast
#' initial time is 00UTC, with a 30 hour lead-time.
#'
#' @format A named list with five entries, each containing a \code{data.table}
#' with 731 rows and a varying number of columns (depending on the number of
#' members).
#' \describe{
#' \item{date_run}{initial condition date (with format YYYY-MM-DD)}
#' \item{latitude}{latitude of the forecast location (in degrees)}
#' \item{longitude}{longitude of the forecast location (in degrees)}
#' \item{1 ... M}{member forecast (M members) in Kelvins}
#' \item{T}{the measured two-meter air temperature in Kelvins}
#' }
#' @source \url{https://apps.ecmwf.int/datasets/data/tigge/levtype=sfc/type=pf/}
#'
#' \url{https://donneespubliques.meteofrance.fr/?fond=produit&id_produit=91&id_rubrique=32}
"ensembles"
#' Post-processed ensemble forecasts of temperature and associated observation
#'
#' This is a dataset containing the post-processed forecasts of five ensemble
#' weather prediction models for two-meter temperature from March, 2019 to
#' March, 2021.
#'
#' Each ensemble has been post-processed with the non homogeneous regression
#' technique, described in Gneiting et al.(2005). In a nutshell the true
#' distribution is supposed to be gaussian, with mean and standard deviation
#' being a linear function of the ensemble mean and standard deviation
#' (respectively). The intercept and slope of each regression is determined by
#' minimizing the CRPS over a 60-day sliding window. The forecast in the
#' data set is a sample of 30 values from this gaussian distribution.
#'
#' The five ensemble models are named based on the raw ensemble: CWAO
#' (from ECCC), DEMS (from NCMRWF), ECMF (from ECMWF), EGRR (from UKMO) and RKSL
#' (from KMA).
#'
#' The raw forecasts are the ones at the nearest grid point to Toulouse-Blagnac
#' station (France) in the TIGGE data set. The observation is the two-meter
#' height temperature measured at this same station, at 06UTC. The forecast
#' initial time is 00UTC, with a 30 hour lead-time.
#'
#' @format A named list with five entries, each containing a \code{data.table}
#' with 731 rows.
#' \describe{
#' \item{date_run}{initial condition date (with format YYYY-MM-DD)}
#' \item{latitude}{latitude of the forecast location (in degrees)}
#' \item{longitude}{longitude of the forecast location (in degrees)}
#' \item{1 ... 30}{forecast in Kelvins sampled from the gaussian distribution.
#' The forecasts are sorted in increasing order.}
#' \item{T}{the measured two-meter air temperature in Kelvins}
#' }
#' @source \url{https://apps.ecmwf.int/datasets/data/tigge/levtype=sfc/type=pf/}
#'
#' \url{https://donneespubliques.meteofrance.fr/?fond=produit&id_produit=91&id_rubrique=32}
#' @references
#' Gneiting, Tilmann, et al. "Calibrated probabilistic forecasting using
#' ensemble model output statistics and minimum CRPS estimation."
#' \emph{Monthly Weather Review} 133.5 (2005): 1098-1118.
#' doi:https://doi.org/10.1175/MWR2904.1
"ppensembles"
|
ba02598252b46588d41a7e93d4dcc738047c0b46
|
e5455f8ffee998491a66117de725fa7245da998b
|
/plot4.R
|
3ad72b83300b6fd3bbc67c520b6dcd1fd8308e78
|
[] |
no_license
|
einmarc/ExData_Plotting1
|
682521a2be8cffd59d1b82d6cbb4bf306f7926ba
|
2da6fae0b2d71d879ca9d0917ab669fe3f3d680c
|
refs/heads/master
| 2021-01-17T22:39:34.221725
| 2016-03-20T22:32:20
| 2016-03-20T22:32:20
| 49,378,267
| 0
| 0
| null | 2016-01-10T17:53:55
| 2016-01-10T17:53:55
| null |
UTF-8
|
R
| false
| false
| 2,445
|
r
|
plot4.R
|
plot4 <- function(sourceFile) {
# sourceFile contains the directory to the input file
dataTable <- read.table(file = sourceFile,header = TRUE, sep =";")
# convert column date in the standard format
dataTable$Date <- as.Date(dataTable$Date, "%d/%m/%Y")
# extract the useful dataset
dataTable <- dataTable[dataTable$Date > as.Date("2007-01-31"),]
dataTable <- dataTable[dataTable$Date < as.Date("2007-02-03"),]
# convert time into expected format
dataTable$Time <- strptime(dataTable$Time,"%H:%M:%S")
dataTable$Time <- format(dataTable$Time, "%H:%M:%S")
# convert into numeric
dataTable$Global_active_power <- as.numeric(as.character(dataTable$Global_active_power))
dataTable$Sub_metering_1 <- as.numeric(as.character(dataTable$Sub_metering_1))
dataTable$Sub_metering_2 <- as.numeric(as.character(dataTable$Sub_metering_2))
dataTable$Sub_metering_3 <- as.numeric(as.character(dataTable$Sub_metering_3))
dataTable$Voltage <- as.numeric(as.character(dataTable$Voltage))
dataTable$Global_reactive_power <- as.numeric(as.character(dataTable$Global_reactive_power))
dataTable$DateTime<-strptime(paste(dataTable$Date,dataTable$Time,sep = " "),
format="%Y-%m-%d %H:%M:%S")
# create the plot
par(mfcol = c(2,2), mar = c(4,4,2,2))
#1
plot(x = dataTable$DateTime, y = dataTable$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power")
#2
plot(x = dataTable$DateTime, y = dataTable$Sub_metering_1,
type = "l",
xlab = "",
ylab = "Energy sub metering")
lines(x = dataTable$DateTime, y = dataTable$Sub_metering_2,
col = "red"
)
lines(x = dataTable$DateTime, y = dataTable$Sub_metering_3,
col = "blue"
)
legend("topright", c("Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"),
col = c("black","red","blue"),
lwd = 1,
bty = "o")
#3
plot(dataTable$DateTime, dataTable$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage")
#4
plot(dataTable$DateTime, dataTable$Global_reactive_power,
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power")
# save the histogram in png format
dev.copy(png,'plot4.png',480,480)
dev.off()
}
|
aed54d4959020006f8dd1b68aae68b540f758793
|
8c3d56ee187a77348c2f112cfcfb110efa0e403c
|
/R/rank_FGSEA.R
|
45df1a59986f4daa252ae01c6acaf3e2d1bece4b
|
[
"MIT"
] |
permissive
|
pphector/scripts
|
1ab0113eff51b851f1e2352663a20ae08f8386f6
|
3ae8224dbd174020e0e04c1a3f230ea60b193aa1
|
refs/heads/master
| 2021-06-11T03:31:59.007558
| 2020-05-07T13:48:38
| 2020-05-07T13:48:38
| 128,252,300
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,044
|
r
|
rank_FGSEA.R
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library(fgsea))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(mygene))
library(ggplot2)
library(mygene)
### Script to generate rank file from DGE output for GSEA analysis
## Requires internet connection for Mygene library
## Produces UNSORTED output in two columns:
## - Column 1: ENTREZ gene IDs
## - Column 2: "Rank" metric obtained by multiplying logFC with -log10 of DEseq p-value
# Create ranks
infile <- "REPLACE.dge.csv"
outfile <- "REPLACE.rnk"
dge <- read.csv(infile, sep="\t")
dge["rank"] <- dge$log_FC * -log10(dge$deseq.p.value)
entrezFromId <- queryMany(dge$id, scopes="ensembl.gene", fields="entrezgene", species="Human", returnall=FALSE)
entrezKey <- entrezFromId[,c("query", "entrezgene")]
dge <- merge(x=dge, y=entrezKey, by.x="id", by.y="query")
rankTable <- dge[!is.na(dge$entrezgene), c("entrezgene","rank")]
write.table(rankTable, file=outfile, sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE)
|
036677842fb558e3aa19c5d43b620c40c6a43976
|
ae2df16f6c3c964a6588a7f3177ccb28384b0b8e
|
/coursera/getting&cleaningdata/week2/Quiz.R
|
75adf4b3b5ce700c3bd878dc8c1caf79364e862f
|
[] |
no_license
|
joellove/R
|
3f3d32ca5d780106816065eaab21fc6e64850d1a
|
2be7aa251a700c75fe88fcdc45572d245a6bb8b1
|
refs/heads/master
| 2020-05-18T15:32:41.389683
| 2015-10-25T07:30:52
| 2015-10-25T07:30:52
| 18,982,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,631
|
r
|
Quiz.R
|
# Quiz 2.
# Problem 1.
install.packages('httpuv')
install.packages('jsonlite')
install.packages('httr')
library('httpuv')
library('jsonlite')
library('httr')
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. Register an application at https://github.com/settings/applications;
# Use any URL you would like for the homepage URL (http://github.com is fine)
# and http://localhost:1410 as the callback url
#
# Insert your client ID and secret below - if secret is omitted, it will
# look it up in the GITHUB_CONSUMER_SECRET environmental variable.
myapp <- oauth_app("My application", "ec070735aa24a15c9fb5", "852a282fade63f44f056f89ba9e21466573b44d4")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
gtoken <- config(token = github_token)
req <- GET("https://api.github.com/rate_limit", gtoken)
stop_for_status(req)
content(req)
# OR:
req <- with_config(gtoken, GET("https://api.github.com/rate_limit"))
stop_for_status(req)
content(req)
# curl -u Access Token:x-oauth-basic "https://api.github.com/users/jtleek/repos"
BROWSE("https://api.github.com/users/jtleek/repos",authenticate("Access Token","x-oauth-basic","basic"))
# 2013-11-07T13:25:07Z
# Problem 2.
install.packages('sqldf')
library(sqldf)
setwd("C:\\Users\\JL186064\\Documents\\GitHub\\R\\coursera\\getting&cleaningdata\\week2")
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
if(!file.exists("data")){
dir.create("data")
}
download.file(fileURL, destfile="./data/getdata-data-ss06pid.csv", mode="wb", method="internal")
acs <- read.csv("./data/getdata-data-ss06pid.csv", header=T, sep=",")
head(acs)
sqldf("select pwgtp1 from acs where AGEP < 50")
# Problem 3.
sqldf("select distinct AGEP from acs")
length(unique(acs$AGEP)) # 91
# Problem 4.
hurl <- "http://biostat.jhsph.edu/~jleek/contact.html"
con <- url(hurl)
htmlCode <- readLines(con)
close(con)
sapply(htmlCode[c(10, 20, 30, 100)], nchar)
#<meta name="Distribution" content="Global" />
# 45
#<script type="text/javascript">
# 31
#})();
#7
#\t\t\t\t<ul class="sidemenu">
# 25
# Problem 5.
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
download.file(fileURL, destfile="./data/getdata-wksst8110.for", mode="wb", method="internal")
list.files()
data <- read.csv("./data/getdata-wksst8110.for", header=T)
head(data)
dim(data)
file_name <- "./data/getdata-wksst8110.for"
df <- read.fwf(file=file_name, widths=c(-1,9,-5,4,4,-5,4,4,-5,4,4,-5,4,4), skip=4)
head(df)
sum(df[, 4])
# 32426.7
|
235e094bec77862cca6eabafd5a53adc4678a69e
|
9ddd623471e8174ade5b9921dbc1cb1da731e115
|
/man/calc_character.Rd
|
cc76171dee8767cf1e367cbec08853b115b24f9e
|
[] |
no_license
|
zackarno/koborg
|
2eba2f837b51a494b7efcb8d491e800de6ec70d9
|
6312bb3ab0b59b96f91812b90f5afd224d599b04
|
refs/heads/master
| 2022-09-13T17:11:09.884337
| 2020-05-27T09:45:22
| 2020-05-27T09:45:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 375
|
rd
|
calc_character.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_calc_character.R
\name{calc_character}
\alias{calc_character}
\title{Calculation character constructor}
\usage{
calc_character(
x = character(),
relevant = NA,
label = NA,
constraint = NA,
calculation = NA
)
}
\description{
`calc_character()` constructs a calc character vector.
}
|
f013eb57c86ef3988d7d66ca0667adb9b5a7535d
|
2d5461cbc4d27a4fbbbcb24d38b23b9cca128cfe
|
/LeafletApp/ui.R
|
85b8f2850b222f48c712d70dcf1bb216e9c930c0
|
[] |
no_license
|
delanst/AssigmentMarkdownLeaflet
|
f1b5c71a63d0b2fed29b75ae176b70387d537a08
|
7129dd06eda9f2f2894d84326defd640a7582b5f
|
refs/heads/master
| 2021-01-17T18:14:19.621847
| 2016-10-19T14:45:17
| 2016-10-19T14:45:17
| 71,272,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
r
|
ui.R
|
library(shiny)
library(leaflet)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
titlePanel("Assignment R markdown and Leaflet"),
h3("18th of October 2016"),
p("Here follows a map of all the airports in Belgium."),
p("The data used for has been download from the following site with their many thanks."),
a("HDX", "https://data.humdata.org/dataset/ourairports-bel"),
h3("Airports belgium"),
strong("This is map with all airports in Belgium. These include international airports as well small 'private' landingstrips."),
leafletOutput("mymap")
))
|
5353a9a73a61c7263a5f46ff161abf6440db0d55
|
a528173483407425c55cbbdf278a2b724830a01e
|
/man/astig.bath.Rd
|
caa77c9137ba2ae15595a5c14b96023314e74ee3
|
[
"MIT"
] |
permissive
|
gmke/zernike
|
7ea52f89dc353f7d72a8385078e03bc2853a22c1
|
397a5d2f316e2f95cc1a1209007780855da16b13
|
refs/heads/master
| 2023-05-28T21:58:50.075555
| 2023-05-10T15:07:23
| 2023-05-10T15:07:23
| 166,230,701
| 0
| 0
|
MIT
| 2021-06-18T12:00:04
| 2019-01-17T13:30:49
|
R
|
UTF-8
|
R
| false
| false
| 867
|
rd
|
astig.bath.Rd
|
\name{astig.bath}
\alias{astig.bath}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Zernike coefficients for astigmatism due to Bath geometry.
}
\description{
Calculates Bath astigmatism coefficients with optional rotation
of phi degrees.
}
\usage{
astig.bath(D, rc, s, lambda = 632.8, phi = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{D}{Diameter}
\item{rc}{Radius of curvature}
\item{s}{separation of reference and test beams}
\item{lambda}{Wavelength}
\item{phi}{angle of image horizontal relative to interferometer axis,
in degrees}
}
\details{
D, rc, s, must have the same units. If those units are mm the source
wavelength lambda should be in nm.
}
\value{
The Zernike coefficients for primary astigmatism terms.
}
\author{M.L. Peck \email{mpeck1@ix.netcom.com}}
\keyword{mathematics}
|
800acdf3913e811c45e45b5779783bd0b2e027f6
|
7a53a053729c895599824fdec9967d461ba04b17
|
/DE_fun.R
|
ce1ca6e5f17fd4c44bff3e29237730c31ddbd16c
|
[] |
no_license
|
feifei/RNA-seq
|
5c7f760acdbf189cd01ef1fc96e1f3b0acebc0f6
|
30c77c0f179ce0183881c322d4af9ac05883310e
|
refs/heads/main
| 2023-06-09T02:20:44.288078
| 2021-07-02T12:25:52
| 2021-07-02T12:25:52
| 382,331,556
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,402
|
r
|
DE_fun.R
|
library(edgeR)
library(DT)
library(GO.db)
library(goseq)
library(ggplot2)
library(reshape2)
library(pheatmap)
library(KEGGREST)
qlf_dictionary <- function(fit, my.contrasts, ...) {
# To pair up contrast and qlf
qlf_d <- list()
for (contrast in dimnames(my.contrasts)$Contrasts) {
qlf <- glmQLFTest(fit, contrast=my.contrasts[, contrast])
qlf_d <- c(qlf_d, list(name = qlf))
}
names(qlf_d) <- dimnames(my.contrasts)$Contrasts
return(qlf_d)
}
plot_logFCvslogCPM <- function(qlf, contrast, ...) {
cat("\n### Log-fold change against log-counts per million\n")
cat("The test results are visualized in te following smear plot.
Genes that are significantly DE with an FDR of 5% are highlighted in red and blue.\n\n")
plotMD(qlf, main=contrast)
abline(h=c(-1,1), col="blue")
}
plot_pheatmap <- function(qlf, summ, contrast, tag = "", ...) {
cat("\n\n### Heatmap of the moderated log-counts-per-million\n")
cat("\nHeatmap of the moderated log-counts-per-million of the DE genes in this comparison.")
size <- sum(summ[c(1,3)])
if (size == 0) return()
DE.top <- topTags(qlf, n=size, sort.by="logFC")
top.genes <- row.names(DE.top)
top.logcpm <- logCPM[top.genes, ]
pheatmap(top.logcpm, cluster_cols = FALSE,
show_rownames=FALSE, color=diverge_hcl(100), main=paste(contrast, "log-counts-per-million", tag))
}
plot_valcano <- function(all.genes.t, contrast, ...) {
cat("\n\n### Volcano plot\n")
cat("A volcano plot the fold change on the x-axis and the statistial significance on the y-axis
(the -log10 of the p-value. Genes that are highly dysregulated are farther to the left and right sides,
while highly significant changes appear higher on the plot.
Genes with FDR<0.05 are colored red, and abs(logFC)>1 are colored orange, and green if both.\n\n")
with(all.genes.t, plot(logFC, -log10(PValue), pch=20, main=paste("Volcano plot", contrast)))
with(subset(all.genes.t, FDR<.05 ), points(logFC, -log10(PValue), pch=20, col="red"))
with(subset(all.genes.t, abs(logFC)>1), points(logFC, -log10(PValue), pch=20, col="orange"))
with(subset(all.genes.t, FDR<.05 & abs(logFC)>1), points(logFC, -log10(PValue), pch=20, col="green"))
}
export_DE <- function(DE.genes.t, directory, name, cell, contrast, tag = "", ...) {
cat("\n\n### Export significant cases\n")
cat("The table lists all the DE with abs(logFC) > 1 and FDR< 0.05. \n\n")
colSize = ncol(DE.genes.t)
# Workaround in printing table
print(
htmltools::tagList(
datatable(DE.genes.t[abs(DE.genes.t$logFC) > 1, ], rownames = FALSE,
options = list(scrollX = TRUE)) %>%
formatStyle(columns = c(1:colSize), fontSize = '80%') %>%
formatSignif(c(8:colSize), 2) %>%
formatRound(c(5:7), 2)
)
)
if (nrow(DE.genes.t) >= 10){
cat(paste(rep("\n", 11), collapse = ""))
} else {
cat(paste(rep("\n", nrow(DE.genes.t)), collapse = ""))
}
DE.genes.t <- format(DE.genes.t, trim=TRUE, digits=2, nsmall=2)
write.csv(DE.genes.t, file=paste(directory, name, ".", cell, ".", contrast, ".sig_results", tag, ".csv", sep=""),
row.names = FALSE)
}
export_all <- function(all.genes.t, directory, name, cell, contrast, tag = "", ...) {
cat("\n\n\n\n\n\n### Export all genes\n")
cat("The table lists all the genes. \n\n")
all.genes.t <- format(all.genes.t, trim=TRUE, digits=2, nsmall=2)
write.csv(all.genes.t, file=paste(directory, name, ".", cell, ".", contrast, ".all", tag, ".csv", sep=""),
row.names = FALSE)
}
export_logcpm <- function(DE.genes.t, directory, name, cell, contrast, tag = "", ...) {
cat("\n\n### Export logCPM values of the DE\n")
cat("Inspect the depth-adjusted reads per million for the top differentially expressed. \n\n")
DE.cpm <- logCPM[rownames(DE.genes.t),]
if (nrow(DE.genes.t) == 1) {
# numeric instead of matrix
DE.cpm <- matrix(DE.cpm, nrow=1)
colnames(DE.cpm) <- group
}
colSize <- ncol(DE.cpm)
print(
htmltools::tagList(
datatable(DE.cpm, options = list(scrollX = TRUE)) %>%
formatStyle(columns = c(1:colSize), fontSize = '80%') %>%
formatRound(c(1:colSize), 1)
)
)
if (nrow(DE.cpm) >= 10){
cat(paste(rep("\n", 11), collapse = ""))
} else {
cat(paste(rep("\n", nrow(DE.cpm)), collapse = ""))
}
DE.cpm <- format(DE.cpm, digits=2, nsmall=1)
write.csv(DE.cpm, file=paste(directory, name, ".", cell, ".", contrast, ".sig_results.logcpm", tag, ".csv", sep=""))
}
export_cpm <- function(DE.genes.t, directory, name, cell, contrast, tag = "", ...) {
cat("\n\n### Export logCPM values of the DE\n")
cat("Inspect the depth-adjusted reads per million for the top differentially expressed. \n\n")
DE.cpm <- CPM[rownames(DE.genes.t),]
if (nrow(DE.genes.t) == 1) {
# numeric instead of matrix
DE.cpm <- matrix(DE.cpm, nrow=1)
colnames(DE.cpm) <- group
}
DE.cpm <- format(DE.cpm, digits=2, nsmall=1)
write.csv(DE.cpm, file=paste(directory, name, ".", cell, ".", contrast, ".sig_results.cpm", tag, ".csv", sep=""))
}
go_up_down <- function(genes, lengthData, GOmap, extra_tag = "UP", ...){
cat(paste("\n\n### GO analysis with goseq - ", extra_tag, "\n", sep = ""))
pwf <- nullp(genes, "hg38", "ensGene", bias.data = lengthData, plot.fit=FALSE)
GO.wall <- goseq(pwf, gene2cat=GOmap)
GO.wall$padj <- p.adjust(GO.wall$over_represented_pvalue, method="BH")
GO.wall.sig <- subset(GO.wall, GO.wall$padj<.05)
GO.wall.sig$ratio <- GO.wall.sig$numDEInCat / GO.wall.sig$numInCat
if (nrow(GO.wall.sig) == 0) return()
write.csv(GO.wall.sig, file=paste(directory, name, ".", cell, ".", contrast,
".sig_results.GO.", extra_tag, tag, ".csv", sep=""),
row.names = FALSE)
# Workaround in printing table in loop
colSize <- ncol(GO.wall.sig)
print(
htmltools::tagList(
datatable(GO.wall.sig, options = list(scrollX = TRUE), rownames = FALSE) %>%
formatStyle(columns = c(1:colSize), fontSize = '80%')
)
)
if (nrow(GO.wall.sig) >= 10){
cat(paste(rep("\n", 11), collapse = ""))
} else {
cat(paste(rep("\n", nrow(GO.wall.sig)), collapse = ""))
}
cat("\n------------ Enriched", extra_tag, "-regulated GO ------------\r\n")
# Print the details of the GO term
enriched.GO <- GO.wall.sig$category
print(enriched.GO)
cat("\n\n")
for(go in enriched.GO){
goterm <- GOTERM[[go]]
if (!is.null(goterm)) {
cat("GOID: ", GOID(goterm), "\n")
cat("Term: ", Term(goterm), "\n")
cat("Ontology: ", Ontology(goterm), "\n")
cat("Definition: ", Definition(goterm), "\n")
cat("--------------------------------------\n\n")
}
}
# Plot the GO terms in barplot, order the term by decresing numDEInCat
GO.wall.sig$term <- reorder(GO.wall.sig$term, GO.wall.sig$numDEInCat)
GO.wall.sig <- GO.wall.sig[order(GO.wall.sig$numDEInCat, decreasing=TRUE),]
ggplot(subset(GO.wall.sig, !is.na(ontology)),
aes(term, numDEInCat, fill=ontology)) +
geom_bar(stat="identity", position=position_dodge()) +
coord_flip() + labs(title=paste(contrast, extra_tag, tag)) +
scale_x_discrete(label=function(x) substr(x, 1, 40))
}
getPathwayName <- function(x) {
x <- paste("hsa", x, sep = "")
return(keggGet(x)[[1]]$NAME)
}
kegg_up_down <- function(genes, lengthData, KEGGmap, extra_tag = "UP", ...){
cat(paste("\n\n\n### KEGG analysis with goseq - ", extra_tag, "\n", sep = ""))
pwf <- nullp(genes, "hg38", "ensGene", bias.data = lengthData, plot.fit=FALSE)
KEGG.wall <- goseq(pwf, gene2cat=KEGGmap)
KEGG.wall$padj <- p.adjust(KEGG.wall$over_represented_pvalue, method="BH")
KEGG.wall.sig <- subset(KEGG.wall, KEGG.wall$padj<.05)
KEGG.wall.sig$ratio <- KEGG.wall.sig$numDEInCat / KEGG.wall.sig$numInCat
if (nrow(KEGG.wall.sig) == 0) return()
# Add pathway info to the table
KEGG.wall.sig$term <- unlist(lapply(KEGG.wall.sig$category, getPathwayName))
write.csv(KEGG.wall.sig, file=paste(directory, name, ".", cell, ".", contrast,
".sig_results.KEGG.", extra_tag, tag, ".csv", sep=""),
row.names = FALSE)
# Workaround in printing table in loop
colSize <- ncol(KEGG.wall.sig)
print(
htmltools::tagList(
datatable(KEGG.wall.sig, options = list(scrollX = TRUE), rownames = FALSE) %>%
formatStyle(columns = c(1:colSize), fontSize = '80%')
)
)
if (nrow(KEGG.wall.sig) >= 10){
cat(paste(rep("\n", 11), collapse = ""))
} else {
cat(paste(rep("\n", nrow(KEGG.wall.sig)), collapse = ""))
}
}
goseq_analysis <- function(...) {
lengthData <- y$genes$length
names(lengthData) <- rownames(y)
genes <- as.vector(dt)
names(genes) <- rownames(y)
# UPs
extra_tag <- "UP"
genes.up <- genes
genes.up[genes.up == -1] <- 0 # Modify all the -1 to 0, look at the enrichment of the upregulated genes
go_up_down(genes.up, lengthData, GOmap, extra_tag)
kegg_up_down(genes.up, lengthData, KEGGmap, extra_tag)
# DOWNs
extra_tag <- "DOWN"
genes.down <- genes
genes.down[genes.down == 1] <- 0
genes.down[genes.down == -1] <- 1 # Modify all the -1 to 1, look at the enrichment of the downregulated genes
go_up_down(genes.down, lengthData, GOmap, extra_tag)
kegg_up_down(genes.down, lengthData, KEGGmap, extra_tag)
}
|
4d7111c590f6070ffd4b85d983dc98f478b7cb07
|
e14e44b96a5059204142cb31d40cdc404a02a3b3
|
/code/illustrate_perm.R
|
5a6dcc7bf0b830cce5ed72101dbd16642cc80fb4
|
[] |
no_license
|
lihualei71/CPT
|
8695c8c9630c94a5d36acbd04e7e7ded375c9341
|
ef78081acf554436fd27cb1980e17f171f95276d
|
refs/heads/master
| 2021-07-01T11:36:17.214154
| 2020-12-15T19:12:17
| 2020-12-15T19:12:17
| 196,753,743
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,128
|
r
|
illustrate_perm.R
|
library("combinat")
library("tidyverse")
library("ggplot2")
source("CPT.R")
allperms <- combinat::permn(1:8)
set.seed(1)
X <- matrix(rnorm(8 * 3), nrow = 8)
Ostar_vec_normal <- sapply(allperms, function(inds){
solve_optim_eta(X[inds, ], 1, 1)$Ostar
})
set.seed(1)
X <- matrix(rt(8 * 3, df = 1), nrow = 8)
Ostar_vec_cauchy <- sapply(allperms, function(inds){
solve_optim_eta(X[inds, ], 1, 1)$Ostar
})
## Ostars <- data.frame(Ostar_vec_normal,
## Ostar_vec_cauchy)
## names(Ostars) <- c("X ~ i.i.d. Normal", "X ~ i.i.d. Cauchy")
Ostar_vec <- data.frame(value = Ostar_vec_normal)
plot <- Ostars %>% gather %>%
ggplot() +
geom_histogram(aes(x = value, y = ..density..)) +
## facet_grid(~ key) +
## scale_y_continuous(expand = c(0, 0)) +
xlab("Values of O* for different ordering") +
theme_bw() +
theme(panel.grid = element_blank(),
axis.title = element_text(size = 15),
axis.text = element_text(size = 12.5),
strip.text = element_text(size = 15))
ggsave(filename = "../figs/ordering_illustrate.pdf", plot,
width = 4, height = 3)
|
68ce13490fe62412f262f253b2d31458fd560c8e
|
4f0b6567a19d2babeafa616c6d8ddf5572004c37
|
/data-science-scripts/zach/S3_gluster.R
|
5f9f8d3df41af869a4d9f4bf90c9c55a9f16e637
|
[] |
no_license
|
mcohenmcohen/DataRobot
|
a9b8fff8b0532e9066a207c1914b12702abbf48c
|
156e548d94d033b032d5027f2e436a13ddb85cf8
|
refs/heads/master
| 2022-09-01T18:14:29.587815
| 2022-08-23T20:54:59
| 2022-08-23T20:54:59
| 134,179,047
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,715
|
r
|
S3_gluster.R
|
#Load Data
library('shrinkR')
library('data.table')
library('reshape2')
library('ggplot2')
library('ggrepel')
library('scales')
library('jsonlite')
library('pbapply')
library('stringi')
#JIRA Tix:
#https://datarobot.atlassian.net/browse/ENT-2320
#https://datarobot.atlassian.net/browse/ENT-2307
#MBtest IDs
# 577bd878d241bf23a9b68bc0 - S3
# 577bee98d241bf2f00db2111 - Gluster 1
# 577c2301d241bf4cf223c503 - Gluster 2
#Load base MBtest and new run
#Sys.sleep(43200)
suppressWarnings(S3 <- getLeaderboard('577bd878d241bf23a9b68bc0'))
suppressWarnings(G <- getLeaderboard('577bee98d241bf2f00db2111'))
G[, env := 'gluster']
S3[, env := 'S3']
dat <- G
#rm(S3, G)
setnames(dat, make.names(names(dat), unique=TRUE))
dat[,sort(table(main_task))]
dat[,sort(table(Filename))]
dat[,table(Filename, Sample_Pct, env)]
#Show max sample size
dat[,list(samp=max(Sample_Pct)), by='Filename']
dat[,list(samp=max(Sample_Pct)), by=c('main_task')][order(samp),]
#Subset data
length(unique(dat$Filename))
setnames(dat, make.names(names(dat)))
dat[,Blueprint := lapply(Blueprint, toJSON, auto_unbox = T, pretty=T)]
dat[,Task_Info_Extras := lapply(Task_Info_Extras, toJSON, auto_unbox = T, pretty=T)]
#Fix model name
dat[,model_name := stri_replace_all_regex(main_task, 'wa_fp=[\\p{L}|\\p{N}]+', '')]
dat[,model_name := stri_replace_all_fixed(model_name, 'l=tweedie', '')]
dat[,model_name := stri_replace_all_fixed(model_name, 'l=poisson', '')]
dat[,model_name := stri_replace_all_fixed(model_name, 'character(0)', '')]
dat[,model_name := stri_replace_all_fixed(model_name, 'ESXGBR', '')]
dat[,model_name := stri_replace_all_fixed(model_name, 'ESXGBC', '')]
dat[,model_name := stri_trim(model_name)]
#dat[,table(model_name)]
dat[Sample_Pct == 95, sort(unique(Sample_Size))]
dat <- dat[,
list(
env,
metric,
Filename,
is_blender,
metric,
main_task,
Blueprint,
Sample_Pct,
cv_method,
Total_Time_P1,
Max_RAM_GB = as.numeric(Max_RAM/1024^3),
Gini.Norm_H,
RMSE_H,
max_vertex_storage_size_P1,
blueprint_storage_size_P1,
X_tasks,
Task_Info_Extras,
cv_method,
blueprint_storage_MB = blueprint_storage_size_P1/(1024^2),
max_vertex_size_MB = max_vertex_storage_size_P1/(1024^2),
holdout_time_seconds = holdout_scoring_time/1000,
holdout_time_minutes = holdout_scoring_time/60000,
model_training_time_minutes = Total_Time_P1/60
)]
#Say that anti-predictive == predictive
#(or we could cap)
dat[, Gini.Norm_H := abs(Gini.Norm_H)]
#Sort files by max RAM
f <- dat[,list(run = max(Max_RAM_GB)), by='Filename']
f <- f[order(run, decreasing=TRUE),]
f_lev <- f$Filename
print(f)
dat[,Filename := factor(Filename, levels=f_lev)]
#Sort main tasks by max RAM
t <- dat[,list(run = max(Max_RAM_GB)), by='main_task']
t <- t[order(run, decreasing=TRUE),]
t_lev <- t$main_task
dat[,main_task := factor(main_task, levels=t_lev)]
#Write sample pct vs runtime vs ram
dat[,Blueprint := as.character(lapply(Blueprint, paste, collapse=' '))]
dat[,sample_number := NULL]
dat[, sample_number := as.integer(factor(Sample_Pct)), by=c('Filename', 'Blueprint')]
#dat[,table(main_task, sample_number)]
dat[,table(sample_number)]
dat[,sample_number := paste0('autopilot_', sample_number)]
#Reshape
#Can also do RMSE_H
id_vars <- c("Filename", "main_task", "cv_method", "metric", "X_tasks", "Blueprint", "Sample_Pct", "sample_number", 'env')
measure_vars <- c('Max_RAM_GB', 'model_training_time_minutes', 'holdout_time_minutes', 'Gini.Norm_H', "blueprint_storage_MB", "max_vertex_size_MB")
dat <- melt.data.table(dat[,c(id_vars, measure_vars), with=FALSE], measure.vars=measure_vars)
dat[,variable := factor(variable, levels=measure_vars)]
dat <- dat[!is.na(value),]
sort(sapply(dat, function(x) length(unique(x))))
#color scale:
#http://colorbrewer2.org/
colors <- c(
"#1f78b4", "#ff7f00", "#6a3d9a", "#33a02c", "#e31a1c", "#b15928",
"#a6cee3", "#fdbf6f", "#cab2d6", "#b2df8a", "#fb9a99", "#ffff99", "black",
"grey", "grey", "grey", "grey", "grey", "grey", "grey", "grey", "grey", "grey"
)
colors[duplicated(colors)]
length(unique(colors))
#Dat
dat[variable == 'model_training_time_minutes', max(value) / 60, by='env']
dat[variable == 'Max_RAM_GB', max(value), by='env']
#Plot overall stats
plt <- dat[Sample_Pct == 95,]
plt <- dat
plt[variable == 'model_training_time_minutes', max(value) / 60]
plt[variable == 'Max_RAM_GB', max(value)]
ggplot(plt, aes(x=value, y=main_task, col=Filename, label=Filename)) +
geom_point() +
theme_bw() +
scale_colour_manual(values=colors) +
theme(legend.position = "bottom") +
facet_wrap(~variable, scales='free') +
theme(axis.text.y = element_text(size=6)) +
guides(col = guide_legend(nrow = 5, byrow = TRUE))
|
59adf0fcc44d1a3dcee7c15dcaed88113b1ce7f7
|
abac881414111f12c5fdecf253d13f749c02c65c
|
/analysis/acoustics.R
|
fd7c28dfb8b8bf3aedaaf9aaf9188a31ec326e03
|
[] |
no_license
|
ElOceanografo/rimfire
|
3d22f8402079fece8aeedcbd9753e08c86213585
|
8f9094fa4c68ada173893310a7fcf2e1858c65ac
|
refs/heads/master
| 2021-03-16T08:28:58.216775
| 2019-08-02T23:47:55
| 2019-08-02T23:47:55
| 48,252,183
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,931
|
r
|
acoustics.R
|
library(ggplot2)
library(scales)
library(viridis)
library(reshape2)
library(dplyr)
library(tidyr)
library(lubridate)
library(gridExtra)
load("acoustics.Rdata")
load("net_data.Rdata")
zoop.ts <- read.csv("nets/zoop_ts.csv", stringsAsFactors = F)
fish.ts <- read.csv("fish_TS.csv", stringsAsFactors = F)
dB.mean <- function(x, na.rm=T) 10 * log10(mean(10^(x/10), na.rm=na.rm))
zoop.ts %>%
group_by(trip, Lake, freq) %>%
summarize(TS = 10*log10(mean(10^(TS/10) * proportion))) %>%
dcast(trip + Lake ~ freq, value.var="TS") %>%
mutate(delta = `120` - `710`)
echo <- mutate(echo,
hour = hour(datetime),
minute = minute(datetime))
latlon <- echo %>%
group_by(trip,Lake,Interval) %>%
summarise(Lat_M = mean(Lat_M, na.rm=T),
Lon_M = mean(Lon_M, na.rm=T))
echo <- dcast(echo,
trip+Lake+Date_M+hour+minute+datetime+Interval+Layer_depth_max ~ freq,
value.var="Sv_mean")
echo <- left_join(echo, latlon)
echo <- mutate(echo,
Sv_120 = `120`,
Sv_710 = `710`,
delta = Sv_120 - Sv_710,
class = "Zooplankton",
Sv_zoop = Sv_710,
Sv_fish = Sv_120)
echo$class[echo$delta > -10 | echo$Sv_120 > -60] <- "Fish"
# Apply top-threshold since there's only 710 kHz data from Independence
echo$class[echo$Lake=="Independence" & echo$Sv_710 > -75] <- "Fish"
echo$Sv_zoop[echo$class == "Fish"] <- NA
echo$Sv_fish[echo$class == "Zooplankton" | echo$Sv_fish < -90] <- -Inf
echo <- filter(echo, Sv_fish < -40) # eliminate a few noise spikes I didn't catch in EV
p <- ggplot(echo, aes(x=Interval, y=Layer_depth_max, fill=delta)) +
geom_tile() + scale_y_reverse(limits=c(50, 0)) + scale_fill_gradient2() +
facet_grid(Lake ~ trip)
ggsave("graphics/dB_difference.png", p, width=12.5, height=7.61, units="in")
p <- ggplot(echo, aes(x=Interval, y=Layer_depth_max, fill=class)) +
geom_tile() +
scale_y_reverse(limits=c(30, 0)) + ylab("Depth (m)") +
facet_grid(Lake ~ trip)
ggsave("graphics/echo_classification.png", p, width=12.5, height=7.61, units="in")
p <- ggplot(echo, aes(x=Interval, y=Layer_depth_max, fill=Sv_zoop)) +
geom_tile() + scale_y_reverse(limits=c(30, 0)) + scale_fill_viridis() +
facet_grid(Lake ~ trip, scales="free_x") +
ylab("Depth (m)") + ggtitle("Zooplankton")
ggsave("graphics/echo_zooplankton.png", p, width=12.5, height=7.61, units="in")
p <- ggplot(echo, aes(x=Interval, y=Layer_depth_max, fill=Sv_fish)) +
geom_tile() +
scale_y_reverse(limits=c(60, 0)) +
scale_fill_viridis(limits=c(-80, -40), oob=squish) +
facet_grid(Lake ~ trip, scales="free_x") +
ylab("Depth (m)") + ggtitle("Fish")
ggsave("graphics/echo_fish.png", p, width=12.5, height=7.61, units="in")
# Example echogram plot
echogram.120 <- read.csv("../acoustics/Exports/cherry.2014.06.24.120kHz.echogram_figure.csv") %>%
mutate(freq = 120) %>%
mutate(datetime = ymd_hms(paste(Date_M, Time_M), tz="UTC")) %>%
filter(Interval > 3700)
echogram.710 <- read.csv("../acoustics/Exports/cherry.2014.06.24.710kHz.echogram_figure.csv") %>%
mutate(freq = 710) %>%
mutate(datetime = ymd_hms(paste(Date_M, Time_M), tz="UTC")) %>%
filter(Interval > 3700)
save(echogram.120, echogram.710, file="example_echogram_data.Rdata")
with(echogram.120,
oce::geodDist(first(Lon_M), first(Lat_M), last(Lon_M), last(Lat_M))
)
echogram <- rbind(echogram.120, echogram.710) %>%
mutate(width = 2,
datetime = datetime - hm("07:00"),
Dist_M = Dist_M - min(Dist_M))
db.120 <- select(echogram.120, Layer_depth_max, datetime, Dist_M, Sv_mean)
db.710 <- select(echogram.710, Layer_depth_max, datetime, Dist_M, Sv_mean)
db.diff <- left_join(db.120, db.710, by=c("Layer_depth_max", "datetime", "Dist_M")) %>%
mutate(delta = Sv_mean.x - Sv_mean.y,
class = "Zoop",
datetime = datetime - hm("07:00"),
Dist_M = Dist_M - min(Dist_M))
db.diff$class[db.diff$delta > 0] <- "Fish"
db.diff$class[db.diff$Sv_mean.y < -80] <- "Empty"
echo.col <- c("#FFFFFF", rev(viridis(24, option="A")))
bottom.col <- "black" # "grey50"
p1 <- ggplot() +
geom_tile(aes(x=Dist_M, y=Layer_depth_max, fill=Sv_mean, width=width),
data=filter(echogram, freq == 120)) +
scale_fill_gradientn(colors=echo.col, limits=c(-80, -50), oob=squish, name=expression(S[v~120])) +
geom_point(aes(x=Dist_M, y=Layer_depth_max, color=Sv_mean),
data=filter(echogram, freq == 120, Sv_mean > -75),
shape=15, size=0.7) +
scale_color_gradientn(colors=echo.col, limits=c(-80, -50), oob=squish, guide=F) +
scale_x_continuous(expand=c(0, 0), name="Distance (m)") +
scale_y_reverse(limits=c(20, 1.5), expand=c(0, 0), name="Depth (m)") +
ggtitle("a") + theme_bw() +
theme(plot.title=element_text(hjust=0),
panel.background=element_rect(fill=bottom.col),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p2 <- filter(echogram, freq==710) %>%
ggplot(aes(x=Dist_M, y=Layer_depth_max, fill=Sv_mean, width=width)) +
geom_tile() +
scale_x_continuous(expand=c(0, 0), name="Distance (m)") +
scale_y_reverse(limits=c(20, 1.5), expand=c(0, 0), name="Depth (m)") +
scale_fill_gradientn(colors=echo.col, limits=c(-80, -50), oob=squish, name=expression(S[v~710])) +
ggtitle("b") + theme_bw() +
theme(plot.title=element_text(hjust=0),
panel.background=element_rect(fill=bottom.col),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p3 <- ggplot() +
geom_tile(aes(x=Dist_M, y=Layer_depth_max, fill=class, width=2), data=db.diff) +
scale_fill_manual(labels=c("", "F", "Z"), name="Class",
values=c("white", "red", "lightskyblue")) +
geom_point(aes(x=Dist_M, y=Layer_depth_max), data=filter(db.diff, class=="Fish"),
color="red", shape=15, size=0.7) +
scale_x_continuous(expand=c(0, 0), name="Distance (m)") +
scale_y_reverse(limits=c(20, 1.5), expand=c(0, 0), name="Depth (m)") +
ggtitle("c") + theme_bw() +
theme(plot.title=element_text(hjust=0),
panel.background=element_rect(fill=bottom.col),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
png("graphics/example_echograms.png", width=7, height=6, units="in", res=300)
grid.arrange(p1, p2, p3, ncol=1)
dev.off()
tiff("graphics/example_echograms.tiff", width=7, height=6, units="in", res=300)
grid.arrange(p1, p2, p3, ncol=1)
dev.off()
################################################################################
# Depth profiles
################################################################################
mean.ts.zoop <- zoop.ts %>%
filter(freq == 710) %>%
group_by(trip, Lake) %>%
summarize(sigma = sum(10^(TS/10) * proportion),
TS = 10*log10(sigma),
weight = sum(weight * proportion),
volume = sum(volume * proportion)) %>%
mutate(class = "Sv_zoop") %>%
as.data.frame()
ts.display.table <- zoop.ts %>%
filter(freq == 710) %>%
mutate(percent = round(proportion*100),
weight = round(weight * 1e6, 1),
TS = round(TS, 1)) %>%
select(-freq, -total, -proportion) %>%
melt(measure.vars = c("TS", "weight", "n", "percent")) %>%
dcast(trip + Lake ~ model + variable) %>%
left_join(select(mean.ts.zoop, trip, Lake, TS, weight)) %>%
mutate(TS = round(TS, 1),
weight = round(weight * 1e6, 1))
write.csv(ts.display.table, "nets/ts.display.table.csv")
mean.ts.fish <- fish.ts %>%
select(trip, Lake, sigma, TS, weight) %>%
mutate(volume = NA, class = "Sv_fish")
mean.ts <- rbind(mean.ts.zoop, mean.ts.fish)
profiles <- echo %>%
group_by(trip, Lake, Layer_depth_max) %>%
summarise(Sv_zoop = dB.mean(Sv_zoop, na.rm=T),
Sv_fish = dB.mean(Sv_fish, na.rm=T)) %>%
melt(measure.vars=c("Sv_zoop", "Sv_fish"), variable.name="class", value.name="Sv") %>%
mutate(sv = 10^(Sv / 10),
class = as.character(class))
profiles$sv[is.na(profiles$sv)] <- 0
profiles <- left_join(profiles, mean.ts)
profiles <- mutate(profiles,
density = sv / sigma,
biomass = density * weight)
# aggregate the fish in bigger bins
profiles.fish <- profiles %>%
filter(class=="Sv_fish") %>%
mutate(depth = round(Layer_depth_max / 5 ) * 5) %>%
group_by(Lake, trip, depth) %>%
summarise(density=mean(density),
biomass = mean(biomass))
p1 <- filter(profiles, class=="Sv_zoop", Layer_depth_max <= 25) %>%
ggplot(aes(y=Layer_depth_max, x=biomass, linetype=Lake, shape=Lake)) +
geom_point(size=1) + geom_path() +
facet_wrap(~trip, nrow=1, scales="free_x") +
scale_y_reverse(limits=c(25, 0), expand=c(0, 0)) +
ylab("Depth (m)") + xlab(expression(Zooplankton~biomass~(g~m^-3))) +
theme_minimal() + theme(panel.border = element_rect(fill="#00000000", colour="grey"))
p2 <- ggplot(profiles.fish, aes(y=depth, x=biomass, linetype=Lake, shape=Lake)) +
geom_point() + geom_path() +
facet_wrap(~trip, nrow=1, scales="free_x") +
scale_y_reverse(limits=c(50, 0), expand=c(0, 0)) +
ylab("Depth (m)") + xlab(expression(Fish~biomass~(g~m^-3))) +
theme_minimal() + theme(panel.border = element_rect(fill="#00000000", colour="grey"))
p <- cowplot::plot_grid(p1, p2, ncol=1, rel_heights = c(31.5/50, 1))
ggsave("graphics/profiles.png", p, width=8, height=7, units="in")
ggsave("graphics/profiles.eps", p, width=8, height=7, units="in")
################################################################################
# Average densities and biomasses by lake
################################################################################
lake_areas = data.frame(
Lake = c("Tahoe", "Independence", "Cherry", "Eleanor"),
area = c(490, 2.6, 6.3, 3.9) # in km^2,
)
lake_areas <- mutate(lake_areas,
area = area * 1000^2, # convert areas to m^2
Lake = as.character(Lake))
lake.biomass <- profiles %>%
filter(! (class=="Sv_zoop" & Layer_depth_max > 30)) %>%
group_by(Lake, trip, class) %>%
summarise(density = mean(density),
biomass = mean(biomass),
area.biomass = sum(biomass)) %>%
left_join(lake_areas, by="Lake") %>%
mutate(total = signif(area.biomass * area / 1e3, 3),# convert g to kg
density = signif(density, 3),
area.biomass = signif(area.biomass, 3),
biomass = signif(biomass, 3)) %>%
select(-area)
reshape2::dcast(lake.biomass, Lake + class ~ trip, value.var="density")
reshape2::dcast(lake.biomass, Lake + class ~ trip, value.var="biomass")
reshape2::dcast(lake.biomass, Lake + class ~ trip, value.var="total")
reshape2::dcast(lake.biomass, trip + Lake ~ class, value.var="biomass")
lake.biomass.display <- lake.biomass %>%
reshape2::dcast(Lake + trip ~ class, value.var="total") %>%
mutate(ratio = Sv_zoop / Sv_fish) %>%
arrange(trip, Lake) %>%
rename(Zoop = Sv_zoop, Fish = Sv_fish) %>%
mutate(Zoop = signif(Zoop, 2),
Fish = signif(Fish, 2),
ratio = signif(ratio, 2)) %>%
select(trip, Lake, Zoop, Fish, ratio)
write.csv(lake.biomass.display, "lake_biomass_display.csv", row.names = F)
net.totals <- counts %>%
filter(! Group %in% c("Rotifers", "Nematode", "Hydroids")) %>%
group_by(trip, Lake) %>%
summarize(count = sum(Count)) %>%
ungroup() %>%
left_join(net.meta) %>%
mutate(num.density = count / VolFiltered / Dilution,
biovolume = Biovolume / VolFiltered / TERCSplitFactor) %>%
select(trip, Lake, count, biovolume, num.density)
net.totals <- left_join(net.totals, lake.biomass, by=c("trip", "Lake"))
# placeholder rows in data frame so all bars in barplot have same width
net.totals.barplot <- net.totals
net.totals.barplot$biovolume[is.na(net.totals.barplot$biovolume)] <- 0.1
net.totals.dummy <- net.totals.barplot[grepl("2014", net.totals.barplot$trip), ]
net.totals.dummy$Lake <- gsub("Cherry", "Tahoe", net.totals.dummy$Lake)
net.totals.dummy$Lake <- gsub("Eleanor", "Independence", net.totals.dummy$Lake)
net.totals.dummy$biomass <- 0
net.totals.dummy$biovolume <- 0
net.totals.barplot <- rbind(net.totals.barplot,
net.totals.dummy)
ii <- net.totals.barplot$Lake == "Tahoe" & net.totals.barplot$trip == "2013-10"
# nudge the Tahoe 2013-10 values up just slightly so the bar shows up on the plot
net.totals.barplot$biomass[ii] <- net.totals.barplot$biomass[ii] + 0.005
net.totals.barplot$biovolume[ii] <- net.totals.barplot$biovolume[ii] + 0.02
p1 <- ggplot(net.totals.barplot, aes(x=trip, y=biovolume, fill=Lake)) +
geom_bar(stat="identity", position="dodge") +
xlab("") + ylab(expression(Net~biovolume~(mL~m^-3))) + ggtitle("a") +
scale_fill_grey(start=0.8, end=0) +
theme_minimal() +
theme(panel.border = element_rect(fill="#00000000", colour="grey"),
plot.title=element_text(hjust=0))
p2 <- ggplot(net.totals.barplot, aes(x=trip, y=biomass, fill=Lake)) +
geom_bar(stat="identity", position="dodge") +
scale_fill_grey(start=0.8, end=0) +
xlab("") + ylab(expression(Acoustic~biomass~(g~m^-3))) + ggtitle("b") +
theme_minimal() +
theme(panel.border = element_rect(fill="#00000000", colour="grey"),
plot.title=element_text(hjust=0))
p <- gridExtra::grid.arrange(p1, p2, ncol=1)
ggsave("graphics/seasonal_biomass.png", p, width=6, height=5, units="in")
ggsave("graphics/seasonal_biomass.eps", p, width=6, height=5, units="in")
# Regression of net and acoustic biomass
net.totals <- counts %>%
filter(! Group %in% c("Rotifers", "Nematode", "Hydroids")) %>%
group_by(trip, Lake) %>%
summarize(count = sum(Count)) %>%
ungroup() %>%
left_join(net.meta) %>%
mutate(Biovolume = replace(Biovolume, is.na(Biovolume), 0.1)) %>%
mutate(num.density = count / VolFiltered / Dilution,
biovolume = Biovolume / VolFiltered / TERCSplitFactor) %>%
select(trip, Lake, count, biovolume, num.density)
net.times <- read.csv("nets/net_times.csv") %>%
mutate(net.time = ymd_hm(net.time))
filter(echo, Lake=="Eleanor", trip=="2014-04") %>%
ggplot(aes(x=Interval, y=Layer_depth_max, fill=Sv_zoop)) +
geom_tile() + scale_y_reverse() + scale_fill_viridis()#limits=c(-100, -70))
net.locs <- echo %>%
mutate(net.time = ymd_hm(paste(Date_M, hour, minute))) %>%
select(trip, Lake, net.time, Lat_M, Lon_M) %>%
inner_join(net.times) %>%
group_by(trip, Lake) %>%
summarize(net.time=first(net.time), net.lat=first(Lat_M), net.lon=first(Lon_M)) %>%
ungroup()
net.echo <- echo %>%
left_join(net.locs) %>%
group_by(trip, Lake) %>%
mutate(net.dist = oce::geodDist(Lon_M, Lat_M, net.lon, net.lat),
timedelta = net.time - datetime) %>%
ungroup() %>%
filter(class=="Zooplankton",
net.dist < .025,
timedelta < 10*60,
Layer_depth_max < 30) %>%
select(trip, Lake, datetime, Layer_depth_max, Sv_zoop) %>%
left_join(mean.ts.zoop) %>%
mutate(sv_zoop = 10^(Sv_zoop/10),
density_acoustic = sv_zoop / sigma,
biomass_acoustic = density_acoustic * weight) %>%
group_by(trip, Lake, Layer_depth_max) %>%
summarize(sv = mean(sv_zoop, na.rm=T),
density_acoustic = mean(density_acoustic, na.rm=T),
biomass_acoustic = mean(biomass_acoustic, na.rm=T)) %>%
ungroup() %>%
group_by(trip, Lake) %>%
summarize(sv = mean(sv, na.rm=T),
density_acoustic = mean(density_acoustic, na.rm=T),
biomass_acoustic = mean(biomass_acoustic, na.rm=T)) %>%
ungroup() %>%
left_join(net.totals) %>%
left_join(mean.ts.zoop) %>%
mutate(biovolume.calc = num.density * volume ,
biomass.calc = num.density * weight) %>%
rename(biovolume_gc = biovolume)
net.echo
pairs(~ sv + density_acoustic + biomass_acoustic + biovolume_gc + num.density + biovolume.calc,
net.echo, pch=16, cex=2)
net.echo.sub <- filter(net.echo, !(Lake=="Eleanor" & trip=="2014-04"))
mod.net.acoustic <- lm(biovolume_gc ~ 0 + biomass_acoustic, net.echo.sub)
summary(mod.net.acoustic)
confint(mod.net.acoustic)
mod.net.acoustic.outlier <- lm(biovolume_gc ~ 0 + biomass_acoustic, net.echo)
summary(mod.net.acoustic.outlier)
confint(mod.net.acoustic.outlier)
## Geometric mean regression
(mod.net.acoustic.yx <- lm(biomass_acoustic ~ 0 + biovolume_gc, net.echo.sub))
m <- sqrt(coef(mod.net.acoustic) * (1/coef(mod.net.acoustic.yx)))
b <- with(net.echo.sub, mean(biovolume_gc) - m * mean(biomass_acoustic))
(mod.net.acoustic.yx.out <- lm(biomass_acoustic ~ 0 + biovolume_gc, net.echo))
m.outlier <- sqrt(coef(mod.net.acoustic.outlier) * (1/coef(mod.net.acoustic.yx.out)))
b.outlier <- with(net.echo, mean(biovolume_gc) - m.outlier * mean(biomass_acoustic))
net.echo$label.y <- net.echo$biovolume_gc
net.echo$label.x <- net.echo$biomass_acoustic
select(net.echo, Lake, trip, label.x, label.y)
net.echo$label.y[3] <- net.echo$label.y[3] + 0.03
net.echo$label.y[2] <- net.echo$label.y[2] - 0.03
net.echo$label.y[1] <- net.echo$label.y[1] - 0.15
net.echo$label.x[1] <- net.echo$label.x[1] - 0.02
net.echo$label.x[5] <- net.echo$label.x[5] - 3
png("graphics/net_vs_acoustics.png", width=1000, height=700, pointsize = 24)
mar.default <- par("mar")
par(mar=c(5, 5, 3, 2))
plot(biovolume_gc ~ biomass_acoustic, net.echo.sub, xlim=c(0, 12), ylim=c(-0.1, 3),
pch=16, bty='n', xlab=expression(Acoustic~biomass~(g~m^-3)),
ylab=expression(Net~biovolume~(mL~m^-3)))
points(biovolume_gc ~ biomass_acoustic, net.echo[5, ], pch=1)
text(net.echo$label.x, net.echo$label.y, paste(net.echo$Lake, net.echo$trip),
pos=4, cex=0.8, col="#666666")
lines(0:15, predict(mod.net.acoustic, newdata=list(biomass_acoustic=0:15)))
lines(0:15, predict(mod.net.acoustic.outlier, newdata=list(biomass_acoustic=0:15)), lty=2)
par(mar=mar.default)
dev.off()
png("graphics/net_vs_acoustics.png", width=1000, height=700, pointsize = 24)
mar.default <- par("mar")
par(mar=c(5, 5, 3, 2))
plot(biovolume_gc ~ biomass_acoustic, net.echo.sub, xlim=c(0, 12), ylim=c(-0.1, 3),
pch=16, bty='n', xlab=expression(Acoustic~biomass~(g~m^-3)),
ylab=expression(Net~biovolume~(mL~m^-3)))
points(biovolume_gc ~ biomass_acoustic, net.echo[5, ], pch=1)
text(net.echo$label.x, net.echo$label.y, paste(net.echo$Lake, net.echo$trip),
pos=4, cex=0.8, col="#666666")
lines(0:15, predict(mod.net.acoustic, newdata=list(biomass_acoustic=0:15)))
lines(0:15, predict(mod.net.acoustic.outlier, newdata=list(biomass_acoustic=0:15)), lty=2)
par(mar=mar.default)
dev.off()
setEPS()
postscript("graphics/net_vs_acoustics.eps")
mar.default <- par("mar")
par(mar=c(5, 5, 3, 2))
plot(biovolume_gc ~ biomass_acoustic, net.echo.sub, xlim=c(0, 12), ylim=c(-0.1, 3),
pch=16, bty='n', xlab=expression(Acoustic~biomass~(g~m^-3)),
ylab=expression(Net~biovolume~(mL~m^-3)))
points(biovolume_gc ~ biomass_acoustic, net.echo[5, ], pch=1)
text(net.echo$label.x, net.echo$label.y, paste(net.echo$Lake, net.echo$trip),
pos=4, cex=0.8, col="#666666")
lines(0:15, predict(mod.net.acoustic, newdata=list(biomass_acoustic=0:15)))
lines(0:15, predict(mod.net.acoustic.outlier, newdata=list(biomass_acoustic=0:15)), lty=2)
par(mar=mar.default)
dev.off()
################################################################################
# Track lines
################################################################################
echo$Sv_zoop[echo$Layer_depth_max > 30] <- NA
tracks <- echo %>%
group_by(trip, Lake, Interval) %>%
summarise(Lat_M = mean(Lat_M, na.rm=T),
Lon_M = mean(Lon_M, na.rm=T),
Sv_zoop = dB.mean(Sv_zoop, na.rm=T),
Sv_fish = dB.mean(Sv_fish, na.rm=T),
bottom = max(Layer_depth_max)) %>%
melt(measure.vars=c("Sv_zoop", "Sv_fish"), variable.name="class", value.name="Sv") %>%
mutate(sv = 10^(Sv / 10))
# tracks$sv[is.na(tracks$sv)] <- 0
tracks <- left_join(tracks, mean.ts)
tracks <- mutate(tracks,
density = sv / sigma,
biomass = density * weight)
tracks <- filter(tracks, is.finite(Lon_M), is.finite(Lat_M))
save(tracks, file="tracks.Rdata")
|
d9098600b9bf6c40d0f7551b0e26c452b993a576
|
b339ae2c3ac541f070b7b074cfc77cebdd6ae8d3
|
/face2gender.R
|
80de6546e254e2239551ce59cd384bdc21be0988
|
[] |
no_license
|
jkortner/clarifai
|
54f875ada795a9b23cb8169e2341ca8125933adc
|
68dff924b71e55eaad8796eef7dd4afb10f685e8
|
refs/heads/master
| 2023-02-19T09:18:17.654697
| 2021-01-12T14:18:29
| 2021-01-12T14:18:29
| 286,798,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,032
|
r
|
face2gender.R
|
# --------------------------------------------------------------------------- #
# face2gender
# --------------------------------------------------------------------------- #
library(httr)
library(readr)
library(progress)
library(reticulate)
use_python('/usr/local/opt/python@3.8/bin/python3', required = T)
py_config()
# --------------------------------------------------------------------------- #
source_python("clarifai/clarifai.py")
api_key <- read_file('clarifai/clarifai-api-key-new.txt')
# function
face2gender <- function(jpg_path, api_key) {
output <- clarifai_demographics(jpg_path, api_key, 'gender-appearance')
demographics <- dict()
# race / multicultural appearance
demographics[[output$data$regions[0]$data$concepts[0]$name]] <- output$data$regions[0]$data$concepts[0]$value
demographics[[output$data$regions[0]$data$concepts[1]$name]] <- output$data$regions[0]$data$concepts[1]$value
return(demographics)
}
# --------------------------------------------------------------------------- #
|
fc551fdc115563fb95750ce2158b3f64ac5f4d97
|
4c7159fd76fd372717900030dc85ffad62cc49c0
|
/man/diagnositcs.Rd
|
26af46c7ce5165fa97b1478d48250006841ab133
|
[
"MIT"
] |
permissive
|
miheerdew/cbce
|
0bc296a0d99e377d76ce5df6b5f5a5d4a57c4ee0
|
58422a6106f7ed69737055a5f806dfd52ff46262
|
refs/heads/master
| 2023-08-31T20:09:25.290422
| 2023-08-20T20:24:47
| 2023-08-20T20:24:47
| 102,528,314
| 4
| 1
|
NOASSERTION
| 2023-06-10T19:18:28
| 2017-09-05T20:46:57
|
R
|
UTF-8
|
R
| false
| true
| 399
|
rd
|
diagnositcs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnostics.R
\name{diagnostics_none}
\alias{diagnostics_none}
\title{Don't collect any diagnostics from the method.}
\usage{
diagnostics_none(event, e = parent.frame())
}
\description{
Don't collect any diagnostics from the method.
}
\section{Functions}{
\itemize{
\item \code{diagnostics_none}:
}}
\keyword{internal}
|
0e45d184d8f0e8b084c0a92565be7bf4cbf47d5f
|
85860d715fd99de4c0d1da9b8a98596a7dba1067
|
/codigo/Exercicio9.R
|
4d4367a62977a15f4d4749e28564a70aa0a526d6
|
[] |
no_license
|
JVFayad/Estatastica-Basica
|
7cd14c70400298bb47f90b65fdd51a4d8c0abf66
|
16716ad4573b989271796c21b7ccd1397274feda
|
refs/heads/master
| 2020-03-30T14:47:30.737324
| 2018-10-09T13:39:05
| 2018-10-09T13:39:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 563
|
r
|
Exercicio9.R
|
setwd("C:/Estatistica Basica/Estat-stica-B-sica")
# install.packages("gridExtra")
library(xlsx)
library(gridExtra)
library(grid)
tabela <- read.xlsx("dados/exercicio9.xls", sheetName = "Plan1")
color = palette()[661]
jpeg('graficos/exercicio9_graf1.jpg')
histg <- hist(tabela$"SalÃ.rios", main = "Histograma", xlab = "Salario", ylab = "Frequencia", breaks=(max(tabela) - min(tabela)) / 5, col = color)
dev.off()
intervals <- cut(tabela$"SalÃ.rios", breaks = histg$breaks, right = FALSE, include.lowest = TRUE)
dist_freq <- cbind(table(intervals))
dist_freq
|
289b243a4111a7b4d63eda9f0b585430f8d114b5
|
593c0dc1fd751b61f1eb481d43f3543461c89560
|
/cluster_analysis.R
|
69a5e04a8cd39d412df01381682f96205d73e9aa
|
[] |
no_license
|
mcc-apsis/coal_tree
|
cab7120eaa3c60aeb2708efafada04b6ceda5185
|
a2e71a19123aae1d705d7985a369a07ca8c681a1
|
refs/heads/master
| 2020-04-11T07:41:05.097779
| 2017-07-03T12:29:29
| 2017-07-03T12:29:29
| 161,618,978
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,526
|
r
|
cluster_analysis.R
|
#http://www.statmethods.net/advstats/cluster.html
library(fpc)
library(pvclust)
library(mclust)
library(cluster)
mydata <- v_dataShort %>%
spread(variable, value) %>%
filter(year >= 1971, year <= 2012) %>%
filter(iso %in% u_iso) %>%
select(-iso, -year, -E_CC) %>%
select_("E_CP", "E_CIm", "E_CEx", "GDPpc", "P", "EE", "GDP_Ind", "GDP_Ser", "GDP_Tra")
# Prepare Data
mydata <- na.omit(mydata) # listwise deletion of missing
mydata <- scale(mydata) # standardize variables
#==== Partitioning ===============================================
# K-means clustering is the most popular partitioning method. It
# requires the analyst to specify the number of clusters to extract.
# A plot of the within groups sum of squares by number of clusters
# extracted can help determine the appropriate number of clusters.
# The analyst looks for a bend in the plot similar to a scree test
# in factor analysis. See Everitt & Hothorn (pg. 251).
# Determine number of clusters
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(mydata, centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
# K-Means Cluster Analysis
fit <- kmeans(mydata, 5) # 5 cluster solution
# get cluster means
aggregate(mydata,by=list(fit$cluster),FUN=mean)
# append cluster assignment
mydata <- data.frame(mydata, fit$cluster)
# A robust version of K-means based on mediods can be invoked by
# using pam() instead of kmeans(). The function pamk() in the fpc
# package is a wrapper for pam that also prints the suggested number
# of clusters based on optimum average silhouette width.
#==== Hierarchical Agglomerative =================================
# There are a wide range of hierarchical clustering approaches.
# I have had good luck with Ward's method described below.
# Ward Hierarchical Clustering
d <- dist(mydata, method = "euclidean") # distance matrix
fit <- hclust(d, method="ward")
plot(fit) # display dendogram
groups <- cutree(fit, k=5) # cut tree into 5 clusters
# draw dendogram with red borders around the 5 clusters
rect.hclust(fit, k=5, border="red")
# The pvclust( ) function in the pvclust package provides p-values
# for hierarchical clustering based on multiscale bootstrap resampling.
# Clusters that are highly supported by the data will have large p values.
# Interpretation details are provided Suzuki. Be aware that pvclust
# clusters columns, not rows. Transpose your data before using.
# Ward Hierarchical Clustering with Bootstrapped p values
fit <- pvclust(mydata, method.hclust="ward",
method.dist="euclidean")
plot(fit) # dendogram with p values
# add rectangles around groups highly supported by the data
pvrect(fit, alpha=.95)
#==== Model based ================================================
# Model based approaches assume a variety of data models and apply
# maximum likelihood estimation and Bayes criteria to identify the
# most likely model and number of clusters. Specifically, the Mclust()
# function in the mclust package selects the optimal model according
# to BIC for EM initialized by hierarchical clustering for
# parameterized Gaussian mixture models. (phew!). One chooses the model
# and number of clusters with the largest BIC.
# See help(mclustModelNames) to details on the model chosen as best.
# Model Based Clustering
fit <- Mclust(mydata)
plot(fit) # plot results
summary(fit) # display the best model
#==== Plotting Cluster Solutions =================================
# It is always a good idea to look at the cluster results.
# K-Means Clustering with 5 clusters
fit <- kmeans(mydata, 10)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
clusplot(mydata, fit$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
# Centroid Plot against 1st 2 discriminant functions
plotcluster(mydata, fit$cluster)
#==== Validating cluster solutions ===============================
# The function cluster.stats() in the fpc package provides a mechanism
# for comparing the similarity of two cluster solutions using a variety
# of validation criteria (Hubert's gamma coefficient, the Dunn index
# and the corrected rand index)
# comparing 2 cluster solutions
cluster.stats(d, fit1$cluster, fit2$cluster)
# where d is a distance matrix among objects, and fit1$cluster and
# fit$cluster are integer vectors containing classification results
# from two different clusterings of the same data.
|
d836ccdafc7fba6d07c2fd0f098dc56d5d736212
|
17b6dbd8acf2ce8556684754dc5f48a9373f7c96
|
/R/test_predictor.R
|
a290a35943a8929f062300b003651d1f8b2049d5
|
[] |
no_license
|
leekgroup/phenopredict
|
f53a517c670a9670041825c79456874367d92327
|
ab34f6ca3c0aeb90d2c672837175d7a13c308ca5
|
refs/heads/master
| 2021-09-06T23:59:31.970592
| 2018-02-13T19:24:14
| 2018-02-13T19:24:14
| 66,372,434
| 16
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,010
|
r
|
test_predictor.R
|
#' Test accuracy of predictor on known phenotypes
#'
#' This function takes the expression data input to
#' build_predictor() and the coefficient estimates from
#' build_predictor() for phenotype prediction. The known
#' phenotypes are also input for comparison and
#' asseessment of predictor accuracy.
#'
#' @param inputdata output from filter_regions() \code{inputdata}
#' @param phenodata data set with phenotype information; samples in rows,
#' variables in columns \code{phenodata}
#' @param phenotype phenotype of interest \code{phenotype}
#' @param type The class of the phenotype of interest (numeric, binary, factor)
#' \code{type}
#' @param covariates Which covariates to include in model \code{covariates}
#' @param predictordata object output from build_predictor \code{predictordata}
#'
#' @return list of actual and predicted phenotype, and summarization of output
#'
#' @keywords phenotype, prediction, test
#'
#' @export
#'
#' @examples
#'
#' library('GenomicRanges')
#' library('dplyr')
#'
#' ## Make up some some region data
#' regions <- GRanges(seqnames = 'chr2', IRanges(
#' start = c(28971710:28971712, 29555081:29555083, 29754982:29754984),
#' end = c(29462417:29462419, 29923338:29923340, 29917714:29917716)))
#'
#' ## make up some expression data for 9 rows and 30 people
#' data(sysdata, package='phenopredict')
#' ## includes R object 'cm'
#' exp= cm[1:length(regions),1:30]
#'
#' ## generate some phenotype information
#' sex = as.data.frame(rep(c("male","female"),each=15))
#' age = as.data.frame(sample(1:100,30))
#' pheno = dplyr::bind_cols(sex,age)
#' colnames(pheno) <- c("sex","age")
#'
#' ## select regions to be used to build the predictor
#' inputdata <- filter_regions(expression=exp, regiondata=regions,
#' phenodata=pheno, phenotype="sex",
#' covariates=NULL,type="factor", numRegions=2)
#'
#' ## build phenotype predictor
#' predictor<-build_predictor(inputdata=inputdata ,phenodata=pheno,
#' phenotype="sex", covariates=NULL,type="factor", numRegions=2)
#'
#' ## determine resubstitution error
#' ## carry out prediction in training data set
#' predictions_test<-test_predictor(inputdata=inputdata ,phenodata=pheno,
#' phenotype="sex", covariates=NULL,type="factor",predictordata=predictor)
test_predictor <- function(inputdata=NULL ,phenodata=NULL,
phenotype=NULL, covariates=NULL,type="factor",predictordata=NULL){
type <- match.arg(type,c("factor","binary", "numeric") )
if(is.null(inputdata)) {
stop('Must specify inputdata to use.
This is the output from filter_regions()')
}
if(is.null(phenodata)) {
stop('Must include phenotype file.')
}
if(is.null(predictordata)) {
stop('Must specify predictor data to use.
This is the output from build_predictor()')
}
predictor = predictordata
## to chose max value, but assign NA if max is 0
which.highest <- function(x){
if(max(x)!=0){
return(which.max(x))
}else{
return(length(possibles)+1)
}
}
#extract regions
expressiondata = inputdata$covmat[predictor$trainingProbes,]
regiondata = inputdata$regiondata[predictor$trainingProbes]
ov = GenomicRanges::findOverlaps(inputdata$regiondata,
predictor$regiondata)
## predictions
if(type=="factor"){
# define possible predictions
possibles = levels(droplevels(as.factor(phenodata[,phenotype])))
possNA = c(possibles,"Unassigned")
# make predictions
projectCellType(Y=expressiondata,
coefCellType=predictor$coefEsts) -> predictions
maxs <- apply(predictions,1,max)
esttype = apply(predictions,1,which.highest)
predicted <- possNA[esttype]
}
if(type=="numeric"){
## prepare data
## ensure regions are named the same way as in build_predictor
expressiondata = as.data.frame(t(expressiondata))
colnames(expressiondata) <- paste0("exp_",1:ncol(expressiondata))
knots_picked = predictor$knots_picked
# Prepare model
# Fit ns(expression, 5) for each expressed region
l=5
Xnew = model.matrix(as.formula(paste0("~",paste(
paste0(" splines::ns(",colnames(expressiondata),",df=",
l,", knots=knots_picked[,\'",colnames(knots_picked),"\'])"),
collapse="+"))), data=expressiondata)
## generate predictions
predicted = as.numeric(as.matrix(t(predictor$coefEsts))%*% t(Xnew))
}
#summarize data
actual <- phenodata[,phenotype]
number_sites = length(predictor$trainingProbes)
if(type=="factor"){
number_match <- sum(predicted==actual)
perc_correct = sum(predicted==actual)/length(actual)
summarized = cbind(number_sites,number_match, perc_correct)
colnames(summarized) <- c("sites_tested",
"number_correct", "percent_correct")
}
if(type=="numeric"){
correlation = stats::cor(predicted, actual)
mean_diff = mean(abs(predicted-actual))
summarized = cbind(number_sites, correlation, mean_diff)
colnames(summarized) <- c("sites_tested", "correlation","mean_diff")
}
res <- list(actual = actual, predicted=predicted, summarized=summarized)
return(res)
}
|
ea33ce3f4f06df70aa9570b2aad8740f9f2b976b
|
2395d3e954c587ac39025fe4ca3e36e6ec74b15d
|
/DataFrameManipulation/ChangeColumnNames.R
|
b681e0be13944b00fccd2ee3848baa0fd8707d42
|
[] |
no_license
|
CamEJ/R-basics
|
32426728fcf2661cedfc7e06fb5c3772667c8cf5
|
d9a309ed96e37b9f22728a5d624c475fa71e3ac0
|
refs/heads/master
| 2023-07-24T15:27:23.472944
| 2023-07-13T16:00:38
| 2023-07-13T16:00:38
| 83,305,312
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 284
|
r
|
ChangeColumnNames.R
|
# Change column names
# if you want to change the column name based on the old column name
# instead of saying a number which column it is.
colnames(dataframe)[which(names(dataframe) == "columnName")] <- "newColumnName"
# by column number
colnames(dataframe)[2] <- "NewColName"
|
814d37c86963464921126134c215fe3ed7224a96
|
2cf25dbb82c95b55dfb95eff59f62208ea7aa434
|
/man/mrmctools.Rd
|
d75bfb61de85f7a3fe222659f5e36f54bcb83f4e
|
[] |
no_license
|
rickeycarter/mrmctools
|
b7ab940fe0ef9bb44ac58207a8e7252f9b5cf009
|
27b4215b26ef2d371bfd50085a7bcd3305bc673b
|
refs/heads/master
| 2021-09-10T06:57:51.512979
| 2018-03-22T00:49:48
| 2018-03-22T00:49:48
| 120,644,400
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 378
|
rd
|
mrmctools.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mrmctools.R
\docType{package}
\name{mrmctools}
\alias{mrmctools}
\alias{mrmctools-package}
\title{\code{mrmctools} package}
\description{
Package mrmctools to support multireader, multicase study designs
}
\details{
See the README on
\href{https://github.com/rickeycarter/mrmctools#readme}{GitHub}
}
|
b3ab6c6179b5655710edae60a52ed779c82469c8
|
5857eaa429f8228a2380fcf86102242f47a4f1aa
|
/Plot1.R
|
9cb73e6d7d8deef0e67082be9778c45b1246c402
|
[] |
no_license
|
darykov/ExData_Plotting1
|
b3a657ffaf84d598825bd88d0c975d9b7ba4b4f2
|
42cc6a811deebdd99990840d96d8f90f38119de9
|
refs/heads/master
| 2021-01-24T21:53:43.623347
| 2016-04-04T04:24:41
| 2016-04-04T04:24:41
| 55,380,377
| 0
| 0
| null | 2016-04-04T02:28:07
| 2016-04-04T02:28:07
| null |
UTF-8
|
R
| false
| false
| 1,047
|
r
|
Plot1.R
|
file<-"household_power_consumption.txt"
Electric_power<-read.table(file, sep = ";", header = TRUE, stringsAsFactors = F)
Electric_power$Date<-as.Date(Electric_power$Date, format = "%d/%m/%Y")
date <- Electric_power$Date >= as.Date("2007-02-01") & Electric_power$Date <= as.Date("2007-02-02")
Electric_power<-Electric_power[date,]
Electric_power$Global_active_power<-as.numeric(Electric_power$Global_active_power)
Electric_power$Global_reactive_power<-as.numeric(Electric_power$Global_reactive_power)
Electric_power$Voltage<-as.numeric(Electric_power$Voltage)
Electric_power$Global_intensity<-as.numeric(Electric_power$Global_intensity)
Electric_power$Sub_metering_1<-as.numeric(Electric_power$Sub_metering_1)
Electric_power$Sub_metering_2<-as.numeric(Electric_power$Sub_metering_2)
Electric_power$Sub_metering_3<-as.numeric(Electric_power$Sub_metering_3)
par(mfrow=c(1,1))
hist(Electric_power$Global_active_power, col="red", main ="Global Active Power", xlab = "Global Active Power (kilowatts)" )
dev.copy(png, filename = "Plot1.png")
dev.off()
|
0e6a6dbaa36376d7f2c2086b54cb48f397b3c0e3
|
eeea10b971ed75bf87305d7b4163cf355eac1240
|
/RF LOWESS sim/Results/Graphics.R
|
e5fb2a148014ea1030c307b3b9d015b0e1b8fa7b
|
[] |
no_license
|
AndrewjSage/RF-Robustness
|
42e0caa6cc5c1f46031f6a3b77e33a56dc4fc83b
|
bace62de6a191832c1a9d19462c140686a15bf1b
|
refs/heads/master
| 2022-11-21T12:09:04.041716
| 2020-07-24T04:52:51
| 2020-07-24T04:52:51
| 106,871,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,713
|
r
|
Graphics.R
|
setwd("~/OneDrive - Lawrence University/Research/Robust RF/RFLOWESS sim/Results")
library(tidyverse)
ntechs <- 16
nprops <- 6
p <- c(rep(c(0, 0.05, 0.10, 0.15, 0.20, 0.25),each=ntechs))
Techs=c("Y-bar", "RF", "QRF", "Li-Martin(Tukey)", "Li-Martin(Huber)","Mean-Med.", "Med.-Med.", "Med.-Mean","LOWESS-6","LOWESS-U","LOWESS-UA", "LOWESS-RF", "LOWESS-RFA", "LOWESS-L", "LOWESS-LA", "Truth")
Technique <- c(rep(Techs, nprops))
dodge <- position_dodge(width=0.01)
################################################################################################
#Roy-Larocque 1
load("RL1_Results.Rdata")
df <- RL1m1df
RL1m1 <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line()+
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("a) s=0.20")+ theme(plot.title = element_text(hjust = 0.5))
RL1m1a <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("a) s=0.20")+ theme(plot.title = element_text(hjust = 0.5))
df <- RL1m2df
RL1m2 <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("b) s=0.40")+ theme(plot.title = element_text(hjust = 0.5))
RL1m2a <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("b) s=0.40")+ theme(plot.title = element_text(hjust = 0.5))
df <- RL1m3df
RL1m3 <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("c) s=0.60")+ theme(plot.title = element_text(hjust = 0.5))
RL1m3a <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("c) s=0.60")+ theme(plot.title = element_text(hjust = 0.5))
df <- RL1m4df
RL1m4 <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("d) s=0.80")+ theme(plot.title = element_text(hjust = 0.5))
RL1m4a <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("d) s=0.80")+ theme(plot.title = element_text(hjust = 0.5))
################################################################################################
#Roy-Larocque 2
load("RL2_Results.Rdata")
df <- RL2m1df
RL2m1 <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("a) s=0.15")+ theme(plot.title = element_text(hjust = 0.5))
RL2m1a <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("a) s=0.15")+ theme(plot.title = element_text(hjust = 0.5))
df <- RL2m2df
RL2m2 <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("b) s=0.30")+ theme(plot.title = element_text(hjust = 0.5))
RL2m2a <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("b) s=0.30")+ theme(plot.title = element_text(hjust = 0.5))
df <- RL2m3df
RL2m3 <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("c) s=0.45")+ theme(plot.title = element_text(hjust = 0.5))
RL2m3a <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("c) s=0.45")+ theme(plot.title = element_text(hjust = 0.5))
df <- RL2m4df
RL2m4 <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("d) s=0.60")+ theme(plot.title = element_text(hjust = 0.5))
RL2m4a <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("d) s=0.60")+ theme(plot.title = element_text(hjust = 0.5))
################################################################################################
#Li-Martin Results
load("LM_Results.Rdata")
#Eliminate RF from graphic since it's so big makes others hard to see
df <- LM1mdf %>% filter(Technique !="RF")
LM1p <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("a) Uncorrelated Predictors")+ theme(plot.title = element_text(hjust = 0.5))
LM1pa <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("b) Uncorrelated Predictors")+ theme(plot.title = element_text(hjust = 0.5))
df <- LM2mdf %>% filter(Technique !="RF")
LM2p <- ggplot(df, aes(x = p, y = MSPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMSPE, ymax = UpperMSPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("a) Correlated Predictors")+ theme(plot.title = element_text(hjust = 0.5))
LM2pa <- ggplot(df, aes(x = p, y = MAPE, color = Technique)) + geom_line() +
geom_errorbar(aes(ymin = LowerMAPE, ymax = UpperMAPE), position = dodge, width = 0.05) + theme(legend.position = "bottom")+ggtitle("b) Correlated Predictors")+ theme(plot.title = element_text(hjust = 0.5))
#########################################################################
#Function to create plots with one legend
library(gridExtra)
#Get legend at bottom of all plots
g_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
###################################################################################
# Plots
#RL1
mylegend<-g_legend(RL1m1)
p <- grid.arrange(arrangeGrob(RL1m1 + theme(legend.position="none"),
RL1m2 + theme(legend.position="none"),
RL1m3 + theme(legend.position="none"),
RL1m4 + theme(legend.position="none"),
nrow=2),
mylegend, nrow=2,heights=c(10, 2))
ggsave(filename="RL1MSPE_Plots.eps", plot=p, device="eps", width= 6, height=6, units="in", dpi=300)
ggsave(filename="RL1MSPE_Plots.jpeg", plot=p, device="jpeg", width= 6, height=6, units="in", dpi=300)
#RL1
mylegend<-g_legend(RL1m1)
p <- grid.arrange(arrangeGrob(RL1m1a + theme(legend.position="none"),
RL1m2a + theme(legend.position="none"),
RL1m3a + theme(legend.position="none"),
RL1m4a + theme(legend.position="none"),
nrow=2),
mylegend, nrow=2,heights=c(10, 2))
ggsave(filename="RL1MAPE_Plots.eps", plot=p, device="eps", width= 6, height=6, units="in", dpi=300)
ggsave(filename="RL1MAPE_Plots.jpeg", plot=p, device="jpeg", width= 6, height=6, units="in", dpi=300)
#RL2
mylegend<-g_legend(RL1m1)
p <- grid.arrange(arrangeGrob(RL2m1 + theme(legend.position="none"),
RL2m2 + theme(legend.position="none"),
RL2m3 + theme(legend.position="none"),
RL2m4 + theme(legend.position="none"),
nrow=2),
mylegend, nrow=2,heights=c(10, 2))
ggsave(filename="RL2MSPE_Plots.eps", plot=p, device="eps", width= 6, height=6, units="in", dpi=300)
ggsave(filename="RL2MSPE_Plots.jpeg", plot=p, device="jpeg", width= 6, height=6, units="in", dpi=300)
#RL2
mylegend<-g_legend(RL2m1)
p <- grid.arrange(arrangeGrob(RL2m1a + theme(legend.position="none"),
RL2m2a + theme(legend.position="none"),
RL2m3a + theme(legend.position="none"),
RL2m4a + theme(legend.position="none"),
nrow=2),
mylegend, nrow=2,heights=c(10, 2))
ggsave(filename="RL2MAPE_Plots.eps", plot=p, device="eps", width= 6, height=6, units="in", dpi=300)
ggsave(filename="RL2MAPE_Plots.jpeg", plot=p, device="jpeg", width= 6, height=6, units="in", dpi=300)
#LM1-MSPE
mylegend<-g_legend(LM1p)
p <- grid.arrange(arrangeGrob(LM1p + theme(legend.position="none")+ylim(c(7.5,14))+ggtitle("a) Uncorrelated Predictors")+ theme(plot.title = element_text(hjust = 0.5)) ,
LM2p + theme(legend.position="none")+ylim(c(7.5,14))+ggtitle("b) Correlated Predictors")+ theme(plot.title = element_text(hjust = 0.5)) ,
nrow=1),
mylegend, nrow=2,heights=c(10, 2))
ggsave(filename="LMMSPE_Plots.eps", plot=p, device="eps", width= 6, height=3, units="in", dpi=300)
ggsave(filename="LMMSPE_Plots.jpeg", plot=p, device="jpeg", width= 6, height=3, units="in", dpi=300)
#LM1-MAPE
mylegend<-g_legend(LM1p)
p <- grid.arrange(arrangeGrob(LM1pa + theme(legend.position="none")+ylim(c(2.05,2.45))+ggtitle("a) Uncorrelated Predictors")+ theme(plot.title = element_text(hjust = 0.5)) ,
LM2pa + theme(legend.position="none")+ylim(c(1.8,2.15))+ggtitle("b) Correlated Predictors")+ theme(plot.title = element_text(hjust = 0.5)) ,
nrow=1),
mylegend, nrow=2,heights=c(10, 2))
ggsave(filename="LMMAPE_Plots.eps", plot=p, device="eps", width= 6, height=3, units="in", dpi=300)
ggsave(filename="LMMAPE_Plots.jpeg", plot=p, device="jpeg", width= 6, height=3, units="in", dpi=300)
############################################################################################
#Parameter Tuning Results
load("RL2.Rdata")
#Used to create ERR by p plots for each type of tuning
TuningResFunc <- function(p,m,r,Crit){
ERR <- rep(NA,3)
if(Crit=="MSPE"){
ERR[1] <- df[3,p,m,r][[1]][[6]][1,1] #all cases no weighting in CV
ERR[2] <- df[3,p,m,r][[1]][[6]][1,3] #all cases with weighted CV
ERR[3] <- df[3,p,m,r][[1]][[6]][3,1] #only non-outliers (ideal scenario)
} else{
ERR[1] <- df[3,p,m,r][[1]][[6]][1,2] #all cases no weighting in CV
ERR[2] <- df[3,p,m,r][[1]][[6]][1,4] #all cases with weighted CV
ERR[3] <- df[3,p,m,r][[1]][[6]][3,2] #only non-outliers (ideal scenario)
}
return(ERR)
}
#Used to plot avg. error against alpha
ERRbyAlpha <- function(p,m,r,Crit){
if(Crit=="MSPE"){
df1 <- df[3,p,m,r][[1]][[2]][,1]
} else{
df1 <- df[3,p,m,r][[1]][[2]][,2]
}
return(df1)
}
ntechs <- 3
nprops <- 6
p <- c(rep(c(0, 0.05, 0.10, 0.15, 0.20, 0.25),each=ntechs))
Techs=c("UCV", "WCV", "NC")
Technique <- c(rep(Techs, nprops))
df <- RL2
#s=1
MSPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=1, Crit="MSPE")})
MAPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=1, Crit="MAPE")})
MSPE <- c(apply(MSPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
MAPE <- c(apply(MAPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
df <- data.frame(p, Technique, MSPE, MAPE)
df$Technique <- rep(c("UCV", "WCV", "NC"),6)
RL2m1CVdf <- df
RL2m1CV <- ggplot(RL2m1CVdf, aes(x = p, y = MSPE, color = Technique)) + geom_line()+geom_point()+ theme(legend.position = "bottom")+ggtitle("a) m=0.15")+ theme(plot.title = element_text(hjust = 0.5))
RL2m1CVa <- ggplot(RL2m1CVdf, aes(x = p, y = MAPE, color = Technique)) + geom_line()+geom_point()+ theme(legend.position = "bottom")+ggtitle("a) m=0.15")+ theme(plot.title = element_text(hjust = 0.5))
#m=2
df <- RL2
MSPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=2, Crit="MSPE")})
MAPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=2, Crit="MAPE")})
MSPE <- c(apply(MSPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
MAPE <- c(apply(MAPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
df <- data.frame(p, Technique, MSPE, MAPE)
df$Technique <- rep(c("UCV", "WCV", "NC"),6)
RL2m2CVdf <- df
RL2m2CV <- ggplot(RL2m2CVdf, aes(x = p, y = MSPE, color = Technique)) + geom_line()+geom_point()+ theme(legend.position = "bottom")+ggtitle("a) m=0.3")+ theme(plot.title = element_text(hjust = 0.5))
RL2m2CVa <- ggplot(RL2m2CVdf, aes(x = p, y = MAPE, color = Technique)) + geom_line()+geom_point()+ theme(legend.position = "bottom")+ggtitle("a) m=0.3")+ theme(plot.title = element_text(hjust = 0.5))
#m=3
df <- RL2
MSPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=3, Crit="MSPE")})
MAPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=3, Crit="MAPE")})
MSPE <- c(apply(MSPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
MAPE <- c(apply(MAPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
df <- data.frame(p, Technique, MSPE, MAPE)
df$Technique <- rep(c("UCV", "WCV", "NC"),6)
RL2m3CVdf <- df
RL2m3CV <- ggplot(RL2m3CVdf, aes(x = p, y = MSPE, color = Technique)) + geom_line()+geom_point()+ theme(legend.position = "bottom")+ggtitle("a) m=0.45")+ theme(plot.title = element_text(hjust = 0.5))
RL2m3CVa <- ggplot(RL2m3CVdf, aes(x = p, y = MAPE, color = Technique)) + geom_line()+geom_point()+ theme(legend.position = "bottom")+ggtitle("a) m=0.45")+ theme(plot.title = element_text(hjust = 0.5))
#m=4
df <- RL2
MSPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=4, Crit="MSPE")})
MAPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=4, Crit="MAPE")})
MSPE <- c(apply(MSPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
MAPE <- c(apply(MAPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
df <- data.frame(p, Technique, MSPE, MAPE)
df$Technique <- rep(c("UCV", "WCV", "NC"),6)
RL2m4CVdf <- df
RL2m4CV <- ggplot(RL2m4CVdf, aes(x = p, y = MSPE, color = Technique)) + geom_line()+geom_point() + theme(legend.position = "bottom")+ggtitle("a) m=0.6")+ theme(plot.title = element_text(hjust = 0.5))
RL2m4CVa <- ggplot(RL2m4CVdf, aes(x = p, y = MAPE, color = Technique)) + geom_line()+geom_point() + theme(legend.position = "bottom")+ggtitle("a) m=0.6")+ theme(plot.title = element_text(hjust = 0.5))
load("LM1.Rdata")
df <- LM1
MSPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=1, Crit="MSPE")})
MAPEarray <- sapply(1:6, simplify="array", function(prop){sapply(1:500, FUN=TuningResFunc, p=prop, m=1, Crit="MAPE")})
MSPE <- c(apply(MSPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
MAPE <- c(apply(MAPEarray,c(1,3),mean)) #Row1=unwt CV, row2= wt. CV, row3=nonoutliers, cols index p
df <- data.frame(p, Technique, MSPE, MAPE)
df$Technique <- rep(c("UCV", "WCV", "NC"),6)
LMCVdf <- df
LMCV <- ggplot(LMCVdf, aes(x = p, y = MSPE, color = Technique)) + geom_line()+geom_point()+ theme(plot.title = element_text(hjust = 0.5)) + theme(legend.position = "bottom")
LMCVa <- ggplot(LMCVdf, aes(x = p, y = MAPE, color = Technique)) + geom_line()+geom_point()+ theme(plot.title = element_text(hjust = 0.5)) + theme(legend.position = "bottom")
ggsave(filename="LMCV_MSPE.eps", plot=LMCV, device="eps", width= 3, height=3, units="in", dpi=300)
ggsave(filename="LMCV_MSPE.jpeg", plot=LMCV, device="jpeg", width= 3, height=3, units="in", dpi=300)
ggsave(filename="LMCV_MAPE.eps", plot=LMCVa, device="eps", width= 3, height=3, units="in", dpi=300)
ggsave(filename="LMCV_MAPE.jpeg", plot=LMCVa, device="jpeg", width= 3, height=3, units="in", dpi=300)
##########################################################################################################
#Plot ERR by alpha
load("LM1.Rdata")
alpha <- seq(from=1, to=30, by=0.25)
df <- LM1
#for m=1, p=6
AlphERR <- sapply(1:500, FUN=ERRbyAlpha, p=6, m=1, Crit="MSPE")
MSPE <- apply(AlphERR, 1, mean)[3:119]
df1 <- data.frame(MSPE, alpha)
pl <- ggplot(df1, aes(x = alpha, y = MSPE)) + geom_line()+geom_line() +ylim(c(10,15)) +xlab(expression(alpha))
pl
ggsave(filename="ERR_Curve.eps", plot=pl, device="eps", width= 3, height=3, units="in", dpi=300)
ggsave(filename="ERR_Curve.jpeg", plot=pl, device="jpeg", width= 6, height=6, units="in", dpi=300)
############################################################################################################
#Plot ERR by alpha
#used for boxplots with alphas
GetAlpha <- function(df, p,m,r,Crit){
Res <- c(NA, NA)
if(Crit=="MSPE"){
Res[1] <- df[3,p,m,r][[1]][[3]][1,1] #unweighted
Res[2] <- df[3,p,m,r][[1]][[3]][1,3] #weighted
} else{
Res[1] <- df[3,p,m,r][[1]][[3]][1,2]
Res[2] <- df[3,p,m,r][[1]][[3]][1,4]
}
return(Res)
}
Alphas <- c(sapply(1:500, FUN=GetAlpha, df=LM1, p=6, m=1, Crit="MSPE"))
Technique <- rep(c("UCV", "WCV"),500)
df <- data.frame(Alphas, Technique)
pl <- ggplot(df, aes(x = Technique, y = Alphas)) + geom_boxplot(outlier.shape=16, outlier.size=1)+ylim(c(0,30))+ylab(expression(alpha))
pl
ggsave(filename="Alpha_BP.eps", plot=pl, device="eps", width= 3, height=3, units="in", dpi=300)
ggsave(filename="Alpha_BP.jpeg", plot=pl, device="jpeg", width= 6, height=6, units="in", dpi=300)
|
52ac179c5a82f88242dcad5a0411467d39d72aae
|
1fc75d5c1d2ae986fd44b2b4c1f3981227a388b4
|
/R/shortcut.R
|
8cdb3666c2a4dfa1f0dd6f24638329f8220d15b4
|
[] |
no_license
|
bcipolli/rprojroot
|
1853390dce73b8b4035420542f2f69a587639605
|
71bd742a4e4ba4e246e4f580697e5a1702117ccc
|
refs/heads/master
| 2023-01-06T22:37:05.193625
| 2017-06-13T08:42:34
| 2017-06-13T08:42:34
| 107,057,897
| 0
| 1
| null | 2017-10-15T23:50:39
| 2017-10-15T23:50:39
| null |
UTF-8
|
R
| false
| false
| 357
|
r
|
shortcut.R
|
#' @rdname find_root_file
#' @export
find_rstudio_root_file <- is_rstudio_project$find_file
#' @rdname find_root_file
#' @export
find_package_root_file <- is_r_package$find_file
#' @rdname find_root_file
#' @export
find_remake_root_file <- is_remake_project$find_file
#' @rdname find_root_file
#' @export
find_testthat_root_file <- is_testthat$find_file
|
f88dce5cdce05a976e5084683c810b406ccb6661
|
dbd98b2572d2043ef924cfb4d3f3d7cfecf91773
|
/R/remove_constant_columns.R
|
786fd9f64e669b2f9709bc66568b5a4abc691aeb
|
[] |
no_license
|
btaschler/scl_replicate
|
bedb52b778df3481b0ae428e94340723e0547a9a
|
b4ddeddf715d26254e75f4798e4a375539cee89b
|
refs/heads/master
| 2023-04-06T16:03:21.909567
| 2021-03-31T20:40:47
| 2021-03-31T20:40:47
| 232,549,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,499
|
r
|
remove_constant_columns.R
|
#' Remove constant columns
#'
#' @param obs_data Observational data
#' @param int_data Interventional data
#' @param int_names Names or indices of the genes targeted by the interventions
#' @param gene_names All gene names or indices
#' @return Interventional data without columns having constant variance or a fraction of zeros > 0.8
#' @export
#'
remove_constant_columns = function(obs_data, int_data, int_names, gene_names = NULL) {
if (is.null(gene_names)) {
if (is.character(int_names)) stop("Please provide all gene names")
if (is.numeric(int_names)) gene_names = 1:ncol(int_data)
}
obs_var = apply(obs_data, 2, var)
int_var = apply(int_data, 2, var)
frac_zeros = colMeans(int_data == 0)
keep_ids = which(obs_var != 0 & int_var != 0 & frac_zeros < 0.8)
obs_data = obs_data[ , keep_ids, drop=FALSE]
int_data = int_data[ , keep_ids, drop=FALSE]
gene_names = gene_names[keep_ids]
for (i in seq_along(int_names)) {
if (!(int_names[i] %in% gene_names)) {
int_names[i] = NA
}
}
int_to_keep = !is.na(int_names)
int_names = int_names[int_to_keep]
int_data = int_data[int_to_keep, ,drop=FALSE]
int_indices = get_int_indices(int_names, gene_names)
return( list(obs_data = obs_data,
int_data = int_data,
int_indices = int_indices,
int_names = int_names,
gene_names = gene_names,
keep_ids = keep_ids) )
}
|
a4540aed7e3ec8636b08c6fafe881f574a61199c
|
a78ab6d5a719e4eb59b03bcad2e3474d9e40b1b3
|
/analysis/R/code.R
|
3d0f9b1aaa43694356c6e385ff5ffbc764d0bc2b
|
[] |
no_license
|
KaJaeHyeob/KOSTAT
|
372f25aa8f2d792f6e3bb001f9df1ebc654637ed
|
b81fbe0c417f047121821c774ba08969a8c3bb73
|
refs/heads/master
| 2023-06-12T16:27:14.520448
| 2021-07-06T06:32:08
| 2021-07-06T06:32:08
| 270,701,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,319
|
r
|
code.R
|
## 주제 : 시도별 범죄율 차이를 발생시키는 요인 분석 (2010년과 2017년의 비교를 중심으로)
## 분석 방법 : 주성분 분석 (PCA ; Principal Component Analysis)
# delete all objs
rm(list=ls())
# set working directory
getwd()
setwd("~/Downloads/kostat_intern/analysis/refine_data")
# 데이터 불러오기
# install.packages("xlsx")
library(xlsx)
AdmArea = read.xlsx("AdmArea.xlsx", sheetIndex=1)
Aging = read.xlsx("Aging.xlsx", sheetIndex=1)
Arrest = read.xlsx("Arrest.xlsx", sheetIndex=1)
Car_num = read.xlsx("Car_num.xlsx", sheetIndex=1)
CrimePerThousand = read.xlsx("CrimePerThousand.xlsx", sheetIndex = 1)
EcoAct = read.xlsx("EcoAct.xlsx", sheetIndex = 1)
EcoDev = read.xlsx("EcoDev.xlsx", sheetIndex=1)
FM = read.xlsx("FM.xlsx", sheetIndex=1)
ForeignerPerThousand = read.xlsx("ForeignerPerThousand.xlsx", sheetIndex=1)
Grdp = read.xlsx("GRDP.xlsx", sheetIndex=1)
Houseprice = read.xlsx("Houseprice.xlsx", sheetIndex = 1)
HyDrink = read.xlsx("HyDrink.xlsx", sheetIndex = 1)
Hystress = read.xlsx("Hystress.xlsx", sheetIndex=1)
Moving_rate = read.xlsx("Moving_rate.xlsx", sheetIndex = 1)
Park = read.xlsx("Park.xlsx", sheetIndex=1)
PolicePop = read.xlsx("PolicePop.xlsx", sheetIndex = 1)
PopDensity = read.xlsx("PopDensity.xlsx", sheetIndex=1)
PopGrowth = read.xlsx("PopGrowth.xlsx", sheetIndex=1)
Unemply_rate = read.xlsx("Unemply_rate.xlsx", sheetIndex=1)
OldmanWelfare = read.xlsx("OldmanWelfare.xlsx", sheetIndex=1)
Suicide = read.xlsx("Suicide.xlsx", sheetIndex=1)
Fat = read.xlsx("Fat.xlsx", sheetIndex=1)
Smoke = read.xlsx("Smoke.xlsx", sheetIndex=1)
# 데이터 정리 (2010~2017, 세종시 제외, NA 제거, 타입 맞추기)
str(AdmArea)
AdmArea = AdmArea[-c(1,18,19), -c(10:59)]
str(Aging)
Aging = Aging[-c(1,9),]
Aging$X2010 = as.numeric(Aging$X2010)
Aging$X2011 = as.numeric(Aging$X2011)
str(Arrest)
Arrest = Arrest[-1,]
str(Car_num)
Car_num = Car_num[-c(1,9,19,20), -c(10:58)]
Car_num$X2010 = as.numeric(Car_num$X2010)
Car_num$X2011 = as.numeric(Car_num$X2011)
str(CrimePerThousand)
CrimePerThousand = CrimePerThousand[-c(1,9),]
str(EcoAct)
EcoAct = EcoAct[-c(1,9,19,20), -c(10:35)]
EcoAct$X2010 = as.numeric(EcoAct$X2010)
EcoAct$X2011 = as.numeric(EcoAct$X2011)
EcoAct$X2012 = as.numeric(EcoAct$X2012)
EcoAct$X2013 = as.numeric(EcoAct$X2013)
EcoAct$X2014 = as.numeric(EcoAct$X2014)
EcoAct$X2015 = as.numeric(EcoAct$X2015)
EcoAct$X2016 = as.numeric(EcoAct$X2016)
str(EcoDev)
EcoDev = EcoDev[-c(1,18,19,20), -c(10:35)]
str(FM)
FM = FM[-c(1,9,19,20),]
FM$X2010 = as.numeric(FM$X2010)
FM$X2011 = as.numeric(FM$X2011)
str(ForeignerPerThousand)
ForeignerPerThousand = ForeignerPerThousand[-c(1,9),]
ForeignerPerThousand$X2010 = as.numeric(ForeignerPerThousand$X2010)
ForeignerPerThousand$X2011 = as.numeric(ForeignerPerThousand$X2011)
str(Grdp)
Grdp = Grdp[-c(1,9,19,20), -c(10:35)]
str(Houseprice)
Houseprice = Houseprice[-c(1,9,19,20), -c(10:58)]
Houseprice$X2010 = as.numeric(Houseprice$X2010)
Houseprice$X2011 = as.numeric(Houseprice$X2011)
Houseprice$X2012 = as.numeric(Houseprice$X2012)
str(HyDrink)
HyDrink = HyDrink[-c(1,9,19,20), -c(10:35)]
HyDrink$X2010 = as.numeric(HyDrink$X2010)
HyDrink$X2011 = as.numeric(HyDrink$X2011)
str(Hystress)
Hystress = Hystress[-c(1,9,19,20), -c(10:35)]
Hystress$X2010 = as.numeric(Hystress$X2010)
Hystress$X2011 = as.numeric(Hystress$X2011)
str(Moving_rate)
Moving_rate = Moving_rate[-c(1,9),]
str(Park)
Park = Park[-c(1,9,19,20), -c(10:58)]
Park$X2010 = as.numeric(Park$X2010)
Park$X2011 = as.numeric(Park$X2011)
str(PolicePop)
PolicePop = PolicePop[-c(1,9),]
PolicePop$X2010 = as.numeric(PolicePop$X2010)
PolicePop$X2011 = as.numeric(PolicePop$X2011)
PolicePop$X2012 = as.numeric(PolicePop$X2012)
PolicePop$X2013 = as.numeric(PolicePop$X2013)
PolicePop$X2014 = as.numeric(PolicePop$X2014)
PolicePop$X2015 = as.numeric(PolicePop$X2015)
PolicePop$X2016 = as.numeric(PolicePop$X2016)
PolicePop$X2017 = as.numeric(PolicePop$X2017)
str(PopDensity)
PopDensity = PopDensity[-c(1,9),]
str(PopGrowth)
PopGrowth = PopGrowth[-c(1,9,19), -10]
PopGrowth$X2010 = as.numeric(PopGrowth$X2010)
PopGrowth$X2011 = as.numeric(PopGrowth$X2011)
PopGrowth$X2012 = as.numeric(PopGrowth$X2012)
str(Unemply_rate)
Unemply_rate = Unemply_rate[-c(1,18,19,20), -c(10:58)]
str(OldmanWelfare)
OldmanWelfare = OldmanWelfare[-c(1,18,19,20), -c(10:58)]
str(Suicide)
Suicide = Suicide[-c(1,18,19,20), -c(10:58)]
str(Fat)
Fat = Fat[-c(1,18,19,20), -c(10:58)]
Fat$X2010 = as.numeric(Fat$X2010)
Fat$X2011 = as.numeric(Fat$X2011)
Fat$X2012 = as.numeric(Fat$X2012)
Fat$X2013 = as.numeric(Fat$X2013)
Fat$X2014 = as.numeric(Fat$X2014)
Fat$X2015 = as.numeric(Fat$X2015)
Fat$X2016 = as.numeric(Fat$X2016)
Fat$X2017 = as.numeric(Fat$X2017)
str(Smoke)
Smoke = Smoke[-c(1,18,19,20),-c(10:58)]
Smoke$X2010 = as.numeric(Smoke$X2010)
Smoke$X2011 = as.numeric(Smoke$X2011)
Smoke$X2012 = as.numeric(Smoke$X2012)
Smoke$X2013 = as.numeric(Smoke$X2013)
Smoke$X2014 = as.numeric(Smoke$X2014)
Smoke$X2015 = as.numeric(Smoke$X2015)
Smoke$X2016 = as.numeric(Smoke$X2016)
Smoke$X2017 = as.numeric(Smoke$X2017)
# 년도별로 데이터 병합
data_2010 = data.frame(Region=factor(c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")))
rownames(data_2010) = c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")
data_2010$AdmArea = AdmArea$X2010
data_2010$Aging = Aging$X2010
data_2010$Arrest = Arrest$X2010
data_2010$Car_num = Car_num$X2010
data_2010$CrimePerThousand = CrimePerThousand$X2010
data_2010$EcoAct = EcoAct$X2010
data_2010$EcoDev = EcoDev$X2010
data_2010$FM = FM$X2010
data_2010$ForeignerPerThousand = ForeignerPerThousand$X2010
data_2010$Grdp = Grdp$X2010
data_2010$Houseprice <- Houseprice$X2010
data_2010$HyDrink = HyDrink$X2010
data_2010$Hystress = Hystress$X2010
data_2010$Moving_rate = Moving_rate$X2010
data_2010$Park = Park$X2010
data_2010$PolicePop = PolicePop$X2010
data_2010$PopDensity = PopDensity$X2010
data_2010$PopGrowth = PopGrowth$X2010
data_2010$Unemply_rate = Unemply_rate$X2010
data_2010$OldmanWelfare = OldmanWelfare$X2010
data_2010$Suicide = Suicide$X2010
data_2010$Fat = Fat$X2010
data_2010$Smoke = Smoke$X2010
str(data_2010)
data_2011 = data.frame(Region=factor(c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")))
rownames(data_2011) = c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")
data_2011$AdmArea = AdmArea$X2011
data_2011$Aging = Aging$X2011
data_2011$Arrest = Arrest$X2011
data_2011$Car_num = Car_num$X2011
data_2011$CrimePerThousand = CrimePerThousand$X2011
data_2011$EcoAct = EcoAct$X2011
data_2011$EcoDev = EcoDev$X2011
data_2011$FM = FM$X2011
data_2011$ForeignerPerThousand = ForeignerPerThousand$X2011
data_2011$Grdp = Grdp$X2011
data_2011$Houseprice <- Houseprice$X2011
data_2011$HyDrink = HyDrink$X2011
data_2011$Hystress = Hystress$X2011
data_2011$Moving_rate = Moving_rate$X2011
data_2011$Park = Park$X2011
data_2011$PolicePop = PolicePop$X2011
data_2011$PopDensity = PopDensity$X2011
data_2011$PopGrowth = PopGrowth$X2011
data_2011$Unemply_rate = Unemply_rate$X2011
data_2011$OldmanWelfare = OldmanWelfare$X2011
data_2011$Suicide = Suicide$X2011
data_2011$Fat = Fat$X2011
data_2011$Smoke = Smoke$X2011
str(data_2011)
data_2012 = data.frame(Region=factor(c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")))
rownames(data_2012) = c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")
data_2012$AdmArea = AdmArea$X2012
data_2012$Aging = Aging$X2012
data_2012$Arrest = Arrest$X2012
data_2012$Car_num = Car_num$X2012
data_2012$CrimePerThousand = CrimePerThousand$X2012
data_2012$EcoAct = EcoAct$X2012
data_2012$EcoDev = EcoDev$X2012
data_2012$FM = FM$X2012
data_2012$ForeignerPerThousand = ForeignerPerThousand$X2012
data_2012$Grdp = Grdp$X2012
data_2012$Houseprice <- Houseprice$X2012
data_2012$HyDrink = HyDrink$X2012
data_2012$Hystress = Hystress$X2012
data_2012$Moving_rate = Moving_rate$X2012
data_2012$Park = Park$X2012
data_2012$PolicePop = PolicePop$X2012
data_2012$PopDensity = PopDensity$X2012
data_2012$PopGrowth = PopGrowth$X2012
data_2012$Unemply_rate = Unemply_rate$X2012
data_2012$OldmanWelfare = OldmanWelfare$X2012
data_2012$Suicide = Suicide$X2012
data_2012$Fat = Fat$X2012
data_2012$Smoke = Smoke$X2012
str(data_2012)
data_2013 = data.frame(Region=factor(c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")))
rownames(data_2013) = c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")
data_2013$AdmArea = AdmArea$X2013
data_2013$Aging = Aging$X2013
data_2013$Arrest = Arrest$X2013
data_2013$Car_num = Car_num$X2013
data_2013$CrimePerThousand = CrimePerThousand$X2013
data_2013$EcoAct = EcoAct$X2013
data_2013$EcoDev = EcoDev$X2013
data_2013$FM = FM$X2013
data_2013$ForeignerPerThousand = ForeignerPerThousand$X2013
data_2013$Grdp = Grdp$X2013
data_2013$Houseprice <- Houseprice$X2013
data_2013$HyDrink = HyDrink$X2013
data_2013$Hystress = Hystress$X2013
data_2013$Moving_rate = Moving_rate$X2013
data_2013$Park = Park$X2013
data_2013$PolicePop = PolicePop$X2013
data_2013$PopDensity = PopDensity$X2013
data_2013$PopGrowth = PopGrowth$X2013
data_2013$Unemply_rate = Unemply_rate$X2013
data_2013$OldmanWelfare = OldmanWelfare$X2013
data_2013$Suicide = Suicide$X2013
data_2013$Fat = Fat$X2013
data_2013$Smoke = Smoke$X2013
str(data_2013)
data_2014 = data.frame(Region=factor(c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")))
rownames(data_2014) = c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")
data_2014$AdmArea = AdmArea$X2014
data_2014$Aging = Aging$X2014
data_2014$Arrest = Arrest$X2014
data_2014$Car_num = Car_num$X2014
data_2014$CrimePerThousand = CrimePerThousand$X2014
data_2014$EcoAct = EcoAct$X2014
data_2014$EcoDev = EcoDev$X2014
data_2014$FM = FM$X2014
data_2014$ForeignerPerThousand = ForeignerPerThousand$X2014
data_2014$Grdp = Grdp$X2014
data_2014$Houseprice <- Houseprice$X2014
data_2014$HyDrink = HyDrink$X2014
data_2014$Hystress = Hystress$X2014
data_2014$Moving_rate = Moving_rate$X2014
data_2014$Park = Park$X2014
data_2014$PolicePop = PolicePop$X2014
data_2014$PopDensity = PopDensity$X2014
data_2014$PopGrowth = PopGrowth$X2014
data_2014$Unemply_rate = Unemply_rate$X2014
data_2014$OldmanWelfare = OldmanWelfare$X2014
data_2014$Suicide = Suicide$X2014
data_2014$Fat = Fat$X2014
data_2014$Smoke = Smoke$X2014
str(data_2014)
data_2015 = data.frame(Region=factor(c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")))
rownames(data_2015) = c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")
data_2015$AdmArea = AdmArea$X2015
data_2015$Aging = Aging$X2015
data_2015$Arrest = Arrest$X2015
data_2015$Car_num = Car_num$X2015
data_2015$CrimePerThousand = CrimePerThousand$X2015
data_2015$EcoAct = EcoAct$X2015
data_2015$EcoDev = EcoDev$X2015
data_2015$FM = FM$X2015
data_2015$ForeignerPerThousand = ForeignerPerThousand$X2015
data_2015$Grdp = Grdp$X2015
data_2015$Houseprice <- Houseprice$X2015
data_2015$HyDrink = HyDrink$X2015
data_2015$Hystress = Hystress$X2015
data_2015$Moving_rate = Moving_rate$X2015
data_2015$Park = Park$X2015
data_2015$PolicePop = PolicePop$X2015
data_2015$PopDensity = PopDensity$X2015
data_2015$PopGrowth = PopGrowth$X2015
data_2015$Unemply_rate = Unemply_rate$X2015
data_2015$OldmanWelfare = OldmanWelfare$X2015
data_2015$Suicide = Suicide$X2015
data_2015$Fat = Fat$X2015
data_2015$Smoke = Smoke$X2015
str(data_2015)
data_2016 = data.frame(Region=factor(c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")))
rownames(data_2016) = c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")
data_2016$AdmArea = AdmArea$X2016
data_2016$Aging = Aging$X2016
data_2016$Arrest = Arrest$X2016
data_2016$Car_num = Car_num$X2016
data_2016$CrimePerThousand = CrimePerThousand$X2016
data_2016$EcoAct = EcoAct$X2016
data_2016$EcoDev = EcoDev$X2016
data_2016$FM = FM$X2016
data_2016$ForeignerPerThousand = ForeignerPerThousand$X2016
data_2016$Grdp = Grdp$X2016
data_2016$Houseprice <- Houseprice$X2016
data_2016$HyDrink = HyDrink$X2016
data_2016$Hystress = Hystress$X2016
data_2016$Moving_rate = Moving_rate$X2016
data_2016$Park = Park$X2016
data_2016$PolicePop = PolicePop$X2016
data_2016$PopDensity = PopDensity$X2016
data_2016$PopGrowth = PopGrowth$X2016
data_2016$Unemply_rate = Unemply_rate$X2016
data_2016$OldmanWelfare = OldmanWelfare$X2016
data_2016$Suicide = Suicide$X2016
data_2016$Fat = Fat$X2016
data_2016$Smoke = Smoke$X2016
str(data_2016)
data_2017 = data.frame(Region=factor(c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")))
rownames(data_2017) = c("Seoul","Busan","Daegu","Incheon",
"Gwangju","Daejeon", "Ulsan","Gyeonggi",
"Gangwon","Chungbuk","Chungnam","Jenbuk",
"Jennam","Gyengbuk","Gyengnam", "Jeju")
data_2017$AdmArea = AdmArea$X2017
data_2017$Aging = Aging$X2017
data_2017$Arrest = Arrest$X2017
data_2017$Car_num = Car_num$X2017
data_2017$CrimePerThousand = CrimePerThousand$X2017
data_2017$EcoAct = EcoAct$X2017
data_2017$EcoDev = EcoDev$X2017
data_2017$FM = FM$X2017
data_2017$ForeignerPerThousand = ForeignerPerThousand$X2017
data_2017$Grdp = Grdp$X2017
data_2017$Houseprice <- Houseprice$X2017
data_2017$HyDrink = HyDrink$X2017
data_2017$Hystress = Hystress$X2017
data_2017$Moving_rate = Moving_rate$X2017
data_2017$Park = Park$X2017
data_2017$PolicePop = PolicePop$X2017
data_2017$PopDensity = PopDensity$X2017
data_2017$PopGrowth = PopGrowth$X2017
data_2017$Unemply_rate = Unemply_rate$X2017
data_2017$OldmanWelfare = OldmanWelfare$X2017
data_2017$Suicide = Suicide$X2017
data_2017$Fat = Fat$X2017
data_2017$Smoke = Smoke$X2017
str(data_2017)
# 분산분석을 통해 주제 성립 유무 판별
# 귀무가설 : 시도별 범죄율이 같다
# 대립가설 : 시도별 범죄율이 다르다
data_all = rbind(data_2010,data_2011,data_2012,data_2013,
data_2014,data_2015,data_2016,data_2017)
View(data_all)
colSums(is.na(data_all))
data_all = na.omit(data_all)
aov_all = aov(CrimePerThousand~Region, data=data_all)
summary(aov_all)
# Df Sum Sq Mean Sq F value Pr(>F)
# Region 15 2542.5 169.50 25.93 <2e-16 ***
# Residuals 112 732.1 6.54
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# 다중공선성 제거
# 변수 사이의 상관계수 살피기
X = data_all[,-1]
cor_all = cor(X)
cor_all
# install.packages("PerformanceAnalytics")
library(PerformanceAnalytics)
chart.Correlation(X, histogram=T, col="grey10", pch=1)
# 보기 쉽게 히트맵 작성하기
# install.packages("GGally")
library(GGally)
ggcorr(X, name="corr", label=T)
# VIF 지수 살피기
# install.packages("fmsb")
library(fmsb)
vif_func = function(in_frame, thresh=10, trace=F, ...){
require(fmsb)
if(class(in_frame) != 'data.frame'){
in_frame = data.frame(in_frame)
}
vif_init = vector('list', length=ncol(in_frame))
names(vif_init) = names(in_frame)
var_names = names(in_frame)
for(val in var_names){
regressors = var_names[-which(var_names==val)]
form = paste(regressors, collapse='+')
form_in = formula(paste(val,' ~ .'))
vif_init[[val]] = VIF(lm(form_in,data=in_frame,...))
}
vif_max = max(unlist(vif_init))
if(vif_max < thresh){
if(trace == T){
prmatrix(vif_init, collab=c('var','vif'), rowlab=rep('',times=nrow(vif_init)), quote=F)
cat('\n')
cat(paste('All variables have VIF < ',thresh,', max VIF ',round(vif_max,2),sep=''), '\n\n')
}
return(names(in_frame))
}
else{
in_dat = in_frame
while(vif_max >= thresh){
vif_vals = vector('list', length=ncol(in_dat))
names(vif_vals) = names(in_dat)
var_names = names(in_dat)
for(val in var_names){
regressors = var_names[-which(var_names == val)]
form = paste(regressors, collapse = '+')
form_in = formula(paste(val,' ~ .'))
vif_add = VIF(lm(form_in,data=in_dat,...))
vif_vals[[val]] = vif_add
}
max_row = which.max(vif_vals)
vif_max = vif_vals[max_row]
if(vif_max<thresh){
break
}
if(trace == T){
vif_vals = do.call('rbind', vif_vals)
vif_vals
prmatrix(vif_vals,collab='vif',rowlab=row.names(vif_vals),quote=F)
cat('\n')
cat('removed: ',names(vif_max),unlist(vif_max),'\n\n')
flush.console()
}
in_dat = in_dat[,!names(in_dat) %in% names(vif_max)]
}
return(names(in_dat))
}
}
X_vif = vif_func(X,thresh=10,trace=T)
X_vif
|
004197ce6f8ff0da1cbe36e39a7a9bc4de1c61fe
|
28acfcc2beaffa7a6ce72d605ec9995ab1e969ce
|
/tests/testthat/test-vctrs-math.R
|
b7f3d0fffa828f1c2bd11f372ad7a19028d845ee
|
[
"MIT"
] |
permissive
|
minghao2016/bignum
|
f25fa2c79fefdff5482e66a8ed401c1cb83f32ea
|
58cef3a361852b58a286f5f758c9124ccc647eb9
|
refs/heads/master
| 2023-05-21T17:59:08.626420
| 2021-06-13T16:15:48
| 2021-06-13T16:15:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,863
|
r
|
test-vctrs-math.R
|
test_that("sum() works", {
x <- c(2, 3, NA)
ans <- sum(x, na.rm = TRUE)
expect_equal(sum(biginteger(x), na.rm = TRUE), biginteger(ans))
expect_equal(sum(biginteger(x), na.rm = FALSE), NA_biginteger_)
expect_equal(sum(bigfloat(x), na.rm = TRUE), bigfloat(ans))
expect_equal(sum(bigfloat(x), na.rm = FALSE), NA_bigfloat_)
})
test_that("prod() works", {
x <- c(2, 3, NA)
ans <- prod(x, na.rm = TRUE)
expect_equal(prod(biginteger(x), na.rm = TRUE), biginteger(ans))
expect_equal(prod(biginteger(x), na.rm = FALSE), NA_biginteger_)
expect_equal(prod(bigfloat(x), na.rm = TRUE), bigfloat(ans))
expect_equal(prod(bigfloat(x), na.rm = FALSE), NA_bigfloat_)
})
test_that("mean() works", {
x <- c(2, 3, NA)
ans <- mean(x, na.rm = TRUE)
expect_equal(mean(biginteger(x), na.rm = TRUE), bigfloat(ans))
expect_equal(mean(biginteger(x), na.rm = FALSE), NA_bigfloat_)
expect_equal(mean(biginteger(x[!is.na(x)]), na.rm = FALSE), bigfloat(ans))
expect_equal(mean(bigfloat(x), na.rm = TRUE), bigfloat(ans))
expect_equal(mean(bigfloat(x), na.rm = FALSE), NA_bigfloat_)
expect_equal(mean(bigfloat(x[!is.na(x)]), na.rm = FALSE), bigfloat(ans))
})
test_that("ceiling() works", {
x <- c(2.5, -2.5)
ans <- ceiling(x)
expect_equal(ceiling(biginteger(ceiling(x))), biginteger(ans))
expect_equal(ceiling(bigfloat(x)), bigfloat(ans))
})
test_that("floor() works", {
x <- c(2.5, -2.5)
ans <- floor(x)
expect_equal(floor(biginteger(floor(x))), biginteger(ans))
expect_equal(floor(bigfloat(x)), bigfloat(ans))
})
test_that("trunc() works", {
x <- c(2.5, -2.5)
ans <- trunc(x)
expect_equal(trunc(biginteger(trunc(x))), biginteger(ans))
expect_equal(trunc(bigfloat(x)), bigfloat(ans))
})
test_that("special value math works", {
x <- c(1, NA, NaN, Inf, -Inf)
expect_equal(is.nan(suppressWarnings(biginteger(x))), is.nan(suppressWarnings(as.integer(x))))
expect_equal(is.nan(bigfloat(x)), is.nan(x))
expect_equal(is.finite(suppressWarnings(biginteger(x))), is.finite(suppressWarnings(as.integer(x))))
expect_equal(is.finite(bigfloat(x)), is.finite(x))
expect_equal(is.infinite(suppressWarnings(biginteger(x))), is.infinite(suppressWarnings(as.integer(x))))
expect_equal(is.infinite(bigfloat(x)), is.infinite(x))
x <- bigfloat("1e1000")
expect_true(is.finite(x))
expect_false(is.infinite(x))
})
test_that("math returning same type works", {
check_math <- function(x, fun, ...) {
expect_equal(
as.integer(fun(biginteger(x), ...)),
suppressWarnings(fun(x, ...))
)
expect_equal(
as.double(fun(bigfloat(x), ...)),
suppressWarnings(fun(x, ...))
)
}
x <- c(-2, 2)
check_math(x, abs)
check_math(x, sign)
x <- c(2, 3, NA, 1)
check_math(x, cumsum)
check_math(x, cumprod)
check_math(x, cummax)
check_math(x, cummin)
})
test_that("math returning float works", {
check_math <- function(x, fun, ...) {
expect_equal(
allow_lossy_cast(as.double(fun(biginteger(x), ...))),
suppressWarnings(fun(x, ...))
)
expect_equal(
allow_lossy_cast(as.double(fun(bigfloat(x), ...))),
suppressWarnings(fun(x, ...))
)
}
x <- c(2, 3, NA, -1)
check_math(x, sqrt)
check_math(x, exp)
check_math(x, expm1)
check_math(x, log)
check_math(x, log, 2)
check_math(x, log, base = 2)
check_math(x, log10)
check_math(x, log2)
check_math(x, log1p)
check_math(x, cos)
check_math(x, sin)
check_math(x, tan)
check_math(x, cosh)
check_math(x, sinh)
check_math(x, tanh)
check_math(x, cospi)
check_math(x, sinpi)
check_math(x, tanpi)
check_math(c(-1, 0, 1, NA), acos)
check_math(c(-1, 0, 1, NA), asin)
check_math(x, atan)
check_math(x, acosh)
check_math(x, asinh)
check_math(x, atanh)
check_math(x, gamma)
check_math(x, lgamma)
check_math(x, digamma)
check_math(c(1, NA), trigamma)
})
|
9ad81c50cad72af32a2721a8eb53ff644a23e762
|
73381b0e7466a0df3a9b7689288084cf5dadb6c5
|
/R/ssgsrt.gene_set_rank_test.R
|
aa5043a09ecaed45f6bac5ef14d1cf055f902d70
|
[] |
no_license
|
dolchan/ssgsrt
|
78ce6fdc6aea0d3f9dd95d0a65c41bcc52da0969
|
8ebf2aa4be0f1b5e3072e51cc421d021f0783bf0
|
refs/heads/master
| 2016-09-13T09:07:51.586243
| 2016-04-20T21:35:00
| 2016-04-20T21:35:00
| 56,541,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,677
|
r
|
ssgsrt.gene_set_rank_test.R
|
#' Perform a rank-based single sample gene set analysis.
#'
#' This function performs a rank-based single sample gene set analysis for
#' a set of samples (\code{data}) and a list of gene sets (\code{geneset_list}).
#'
#' @param data Either a set of samples in \code{data.frame} format
#' or a sample in a named array format.
#' @param geneset_list a named list of gene sets, each of list element is named
#' and a list ofgenes.
#' @param alternative see 'a_sample_gene_set_rank_test' for detail.
#' @param test.method \code{c('ks.test', 'wilcox.test')}. Defaults to 'ks'.
#' @export
#' @examples
#' gene_set_rank_test(data, a_geneset_list)
gene_set_rank_test <- function(data, geneset_list, row_names = NULL, alternative = "two.sided", test.method = "ks") {
if (is.data.frame(data)) {
n_samples <- ncol(data)
gsas_list <- vector("list", length(n_samples))
row_names_x <- row.names(data)
for (ii in 1:n_samples) {
x <- data[, ii]
a_gsa <- a_sample_gene_set_rank_test(x, geneset_list,
row_names = row_names_x,
alternative = alternative,
test.method = test.method)
gsas_list[[ii]] <- a_gsa
print(sprintf("%s (%d / %d) processed", colnames(data)[ii], ii, n_samples))
}
names(gsas_list) <- colnames(data)
gsas_list
} else {
list(a_sample_gene_set_rank_test(data, geneset_list,
row_names = row_names,
alternative = alternative,
test.method = test.method))
}
}
|
fefe6df05ae6b7fb30688f5771a3c78727ae7da9
|
198de39536eb0f81bbed80aab7781d662cffd859
|
/Algoritmos/FX-C.R
|
264c48e657129f86ccb5a8de8293cb2c6fc1ebbe
|
[] |
no_license
|
avidaurref/stock-prices
|
539d11911501263abdbc10635245ae02d4ef1ada
|
306d8ba8d987714a2e0c70270a389c3bc7285b4e
|
refs/heads/master
| 2020-04-16T11:37:00.189356
| 2019-01-13T19:14:41
| 2019-01-13T19:14:41
| 165,543,981
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 97
|
r
|
FX-C.R
|
forexC00 <- forexB
forexC <- forexC00[,(names(forexC00) %in% rmColumns)]
rm(forexC00,rmColumns)
|
d935683c5377839f79740989d8280dfbcbd04a8a
|
25c5d243ffac4b4f4f9efcd6a28cb41d51b23c90
|
/src/test/scripts/functions/aggregate/GroupedAggregate.R
|
324a94596d975a023634764108b392fc9e5dbd5a
|
[
"Apache-2.0"
] |
permissive
|
apache/systemds
|
5351e8dd9aa842b693e8c148cf3be151697f07a7
|
73555e932a516063c860f5d05c84e6523cc7619b
|
refs/heads/main
| 2023-08-31T03:46:03.010474
| 2023-08-30T18:25:59
| 2023-08-30T18:34:41
| 45,896,813
| 194
| 167
|
Apache-2.0
| 2023-09-13T08:43:37
| 2015-11-10T08:00:06
|
Java
|
UTF-8
|
R
| false
| false
| 1,920
|
r
|
GroupedAggregate.R
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
library("Matrix")
library("moments")
A <- as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
B <- as.matrix(readMM(paste(args[1], "B.mtx", sep="")));
fn = as.integer(args[2]);
if( fn==0 )
{
C = aggregate(as.vector(A), by=list(as.vector(B)), FUN=sum)[,2]
}
if( fn==1 )
{
C = aggregate(as.vector(A), by=list(as.vector(B)), FUN=length)[,2]
}
if( fn==2 )
{
C = aggregate(as.vector(A), by=list(as.vector(B)), FUN=mean)[,2]
}
if( fn==3 )
{
C = aggregate(as.vector(A), by=list(as.vector(B)), FUN=var)[,2]
}
if( fn==4 )
{
C = aggregate(as.vector(A), by=list(as.vector(B)), FUN=moment, order=3, central=TRUE)[,2]
}
if( fn==5 )
{
C = aggregate(as.vector(A), by=list(as.vector(B)), FUN=moment, order=4, central=TRUE)[,2]
}
if ( fn==6 )
{
C = aggregate(as.vector(A), by=list(as.vector(B)), FUN=min)[,2]
}
if ( fn==7 )
{
C = aggregate(as.vector(A), by=list(as.vector(B)), FUN=max)[,2]
}
writeMM(as(C, "CsparseMatrix"), paste(args[3], "C", sep=""));
|
b7e3e767eff894c83c301c0408792bba5db754ab
|
b75b290b2dd161e4c850858ecf7abee486bdede8
|
/tests/testthat/test_student_npr_history.R
|
34d594375421939f3cf66edea84e9de4e52864e3
|
[] |
no_license
|
rabare/mapvizieR
|
7d7bffb8691f6045e678d822f9e461e748038cd3
|
1a344ec1376ee41e85dcda0407f8bc63cfb95a82
|
refs/heads/master
| 2020-12-25T16:14:33.669385
| 2015-07-14T21:24:27
| 2015-07-14T21:24:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,172
|
r
|
test_student_npr_history.R
|
context("student_npr_history_plot tests")
#make sure that constants used below exist
testing_constants()
studentids <- mapviz$roster %>%
dplyr::filter(grade == 8,
schoolname == "Mt. Bachelor Middle School",
termname == "Spring 2013-2014") %>%
dplyr::select(studentid) %>%
unique()
test_that("student_npr_history_plot errors when handed an improper mapviz object", {
expect_error(
student_npr_history_plot(cdf, studentids$studentid, "Reading"),
"The object you passed is not a conforming mapvizieR object"
)
})
test_that("student_npr_history_plot produces proper plot with a grade level of kids", {
p <- student_npr_history_plot(mapviz, studentids$studentid[1:40], 'Mathematics')
p_build <- ggplot_build(p)
expect_true(is.ggplot(p))
expect_equal(nrow(p_build$data[[1]]), 160)
expect_equal(ncol(p_build$data[[2]]), 4)
expect_equal(sum(p_build$data[[3]][, 2]), 320, tolerance = .001)
})
test_that("fuzz test student_npr_history_plot plot", {
results <- fuzz_test_plot(
'student_npr_history_plot',
n = 5,
additional_args=list('measurementscale' = 'Mathematics')
)
expect_true(all(unlist(results)))
})
|
40245453593835015daae59c4ce757d5991b6fdf
|
02d17420f72d67c2623cc7b37a013fa97dd205fb
|
/Project4.Imaging/code/03_plots_TC.R
|
09b52ca18bed7735081325abc7738cc039fb0dfb
|
[] |
no_license
|
rachaelvp/ph244-big_data
|
7a8641e1270dab39c572cc6fd73bc8ec89454962
|
b22d34e9860fe765c03de1d847aab4f47ba3040d
|
refs/heads/master
| 2021-09-13T23:52:07.207524
| 2018-05-06T04:32:55
| 2018-05-06T04:32:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,617
|
r
|
03_plots_TC.R
|
# Code to create summary plots of raw data
# Read in the raw CSV
train_summary <- read.csv("/Users/Tommy/Desktop/ph244-big_data/Project4.Imaging/data/train_summ.csv")
# There's one patient with a whole row of NA's... we should remove them.
train_summary <- train_summary[-c(374), ]
# Subset the important columns
train_summary <- train_summary[, c( "HU_A", "HU_B", "HU_C", "HU_D", "HU_E", "HU_F", "HU_G", "HU_H", "HU_I", "HU_J", "HU_K", "HU_L", "HU_M", "HU_N", "HU_O", "HU_P", "HU_Q", "id", "cancer")]
train_summary_sub <- train_summary[, c( "HU_A", "HU_F", "HU_K", "HU_L", "HU_M", "HU_N", "HU_O", "HU_P", "HU_Q", "id", "cancer")]
train_summary_sum <- train_summary[, c("Kurtosis", "Variance", "Skew", "Mean", "Median", "id", "cancer")]
# Side by side boxplot
library(ggplot2)
library(reshape)
# Melt the dataframe for boxplot
melted <- melt(train_summary, id.vars = c("id", "cancer"))
melted_sub <- melt(train_summary_sub, id.vars = c("id", "cancer"))
melted_sub_sum <- melt(train_summary_sum, id.vars = c("id", "cancer"))
# Convert cancer to factor for boxplot
melted$cancer <- factor(melted$cancer)
melted_sub$cancer <- factor(melted_sub$cancer)
melted_sub_sum$cancer <- factor(melted_sub_sum$cancer)
summ_plot <- ggplot(data = melted, aes(x=variable, y=value)) + geom_boxplot(aes(fill=cancer))
summ_plot + coord_flip() + scale_fill_manual(values=rainbow(10),
name="Lung Cancer Status",
breaks=c(0, 1),
labels=c("No Cancer", "Cancer")) + ylab("Percent Hounsfield Unit Makeup") + xlab("Various Hounsfield Bins") + ggtitle("Frequency Percentages of HU")
summ_plot_sub <- ggplot(data = melted_sub, aes(x=variable, y=value)) + geom_boxplot(aes(fill=cancer))
summ_plot_sub + scale_fill_manual(values=rainbow(10),
name="Lung Cancer Status",
breaks=c(0, 1),
labels=c("No Cancer", "Cancer")) + ylab("Percent Hounsfield Unit Makeup") + xlab("Various Hounsfield Bins") + ggtitle("Frequency Percentages of HU")
summ_plot_sub_sum <- ggplot(data = melted_sub_sum, aes(x=variable, y=value)) + geom_boxplot(aes(fill=cancer))
summ_plot_sub_sum + coord_flip() + scale_fill_manual(values=rainbow(10),
name="Lung Cancer Status",
breaks=c(0, 1),
labels=c("No Cancer", "Cancer")) + ylab("Hounsfield Unit Makeup") + xlab("Various Hounsfield Summaries") + ggtitle("Frequency Percentages of HU")
|
1b46a4a00b86824c5d2c0fe17c684467b5325f62
|
d1a388b98b8c248c5f0388672189acedf96b0a93
|
/estimationJob/m_t_new/GJR_GARCH/GJR_Gaussian_Esscher_returns/Function_Pricer_VIX_GJR.R
|
e0ddafb36b5abd4539322ddbc466d32d5a7645a3
|
[] |
no_license
|
Fanirisoa/dynamic_pricing
|
1951438ea282358cf0aa90a04b1273846cd70836
|
66722ae75f446a3c9e2a672890ae808c679f72cd
|
refs/heads/master
| 2023-04-09T04:36:15.646529
| 2021-04-20T19:38:35
| 2021-04-20T19:38:35
| 212,493,315
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,820
|
r
|
Function_Pricer_VIX_GJR.R
|
######################################################################
###### Compute option prices using MC simulation ##
######################################################################
Pricer<-function(N_v,para_h1 ,Data.N){
## This function compute option prices by MC, using the function "h" and "sim"
## para_h is a vector containing the volatility's parameters
## para_distribution is a vector containing the distribution's parameters
# para_h<-c() set up the parameters of the model
a0=para_h1[1]; a1=para_h1[2]; a2=para_h1[3]; b1= para_h1[4] ; lamda0= para_h1[5] ; ro=para_h1[6]
T=Data.N$T #### Time to maturity expressed in terms of years
S=Data.N$S #### Prix du sous-jacent: Data.contract$S
K=Data.N$K #### Strike Prix d'exercice: data$strike
r=Data.N$r/250 #### Interest rate Data.contract$r
Z1=length(r)
T=Data.N$T
T=T*250
T=round(T,0) #### Time to maturity expressed in terms of days
######################################
## Step 1 : Sampling the returns ##
######################################
Y_t= lapply(1:Z1, function(x) Matrice_ret(x,N_v))
#####################################################
## Step 2 : turning returns into Monte-Carlos Prices#
#####################################################
St= MC_Sim_St(Y_t)
##############################################
## Step 3 : Martingalisation of the sample ##
##############################################
St_Mar=Mar_St(St,N_v)
################################################
## Step 4 : Computation of the option prices ##
################################################
P=P_T(St_Mar)
return(list(P=P,Yt=Y_t,St=St, St_Mar=St_Mar))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.