blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
867fef54f480b93efb1adf1b4583d9475837a044
|
e17f3877ad5e350e63d8dc785df6ef6773e087f2
|
/Rcommands.R
|
4c79bca83dc07a8f9189eef1c4230b7576a44614
|
[] |
no_license
|
stillme/altmetrics
|
241c02ae5476964561c8c210548edf6dbef06cd4
|
4c52ea324b22ae467f9ca49b2c070393c7dec3e7
|
refs/heads/master
| 2021-01-01T19:06:43.927717
| 2015-09-16T18:17:59
| 2015-09-16T18:17:59
| 42,600,064
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,380
|
r
|
Rcommands.R
|
counts_raw <- read.delim("data/counts-raw.txt.gz")
dim(counts_raw)
head(counts_raw)
tail(counts_raw)
counts_raw[1, 10]
counts_raw[1:3, 10:12]
counts_raw[1:3, ]
counts_raw[1:10, "pmid"]
str(counts_raw$daysSincePublished)
head(counts_raw$daysSincePublished / 7)
is.numeric(counts_raw$daysSincePublished)
str(counts_raw$journal)
levels(counts_raw$journal)
counts_raw$authorsCount[1:10]
is.na(counts_raw$authorsCount[1:10])
anyNA(counts_raw$authorsCount[1:10])
summary(counts_raw$wosCountThru2011)
mean(counts_raw$wosCountThru2011)
hist(counts_raw$wosCountThru2011)
hist(sqrt(counts_raw$wosCountThru2011))
plot(counts_raw$daysSincePublished, counts_raw$wosCountThru2011)
counts_raw$authorsCount[1:10] > 7
counts_raw$authorsCount[1:10]
dim(counts_raw[counts_raw$journal == "pone" , ])
dim(counts_raw[counts_raw$journal %in% c("pone" , "pbio" , "pgen"), ])
dim(counts_raw[grepl("Immunology", counts_raw$plosSubjectTags), ])
head(counts_raw$plosSubjectTags)
for (bitches in 1:10) {
print(bitches)
}
x <- numeric()
for (i in 1: length(counts_raw$wosCountThru2011)) {
x <- c(x, counts_raw$wosCountThru2011[i] + 1)
}
levels(counts_raw$journal)
results <-numeric(length = length(levels(counts_raw$journal)))
results
names(results) <- levels(counts_raw$journal)
for (j in levels(counts_raw$journal)) {
results[j] <- mean(counts_raw$wosCountThru2011[counts_raw$journal == j])
}
results
|
de5c1e2e9234dd5826e990f20559264a00e4d682
|
0f5fc517c7beb08b4a11fd85749d0d1a50c28f5b
|
/man/zi_fit_pms.Rd
|
43cdda845185aee96f8a896245f039ca7a7f6aaa
|
[] |
no_license
|
sqyu/ZiDAG
|
544de482c6e7a3e35968408826c6136e57d2cb25
|
d893be61690031b13ced18b18a7e7c98d4b78804
|
refs/heads/master
| 2023-02-13T19:04:25.840259
| 2021-01-13T08:16:53
| 2021-01-13T08:16:53
| 239,381,238
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 8,584
|
rd
|
zi_fit_pms.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zero_fit_pms.R
\name{zi_fit_pms}
\alias{zi_fit_pms}
\title{Fits a Hurdle conditional model with pms parametrization of specified degree.}
\usage{
zi_fit_pms(
V,
Y,
left,
right,
extra_regressors = NULL,
extra_reg_pen_factors = NULL,
p_V_degree = 1,
p_Y_degree = 1,
p_Y_V_degree = 1,
mu_V_degree = 1,
mu_Y_degree = 1,
mu_Y_V_degree = 1,
value_only = TRUE,
tol = 1e-08,
maxit = 1e+05,
seed = NULL,
penalize_decider = function(X) { ncol(X) >= nrow(X)/2 },
nfits = 10,
runs = 2
)
}
\arguments{
\item{V}{A matrix of 0/1s, equal to Y != 0.}
\item{Y}{A data matrix of the same size as \code{V}.}
\item{left}{An integer between 1 and \code{ncol(Y)}. The index of the variable to be fit.}
\item{right}{A vector of integers between 1 and \code{ncol(Y)} different from \code{left}. Indices of the "regressors".}
\item{extra_regressors}{A matrix with the same number of rows as \code{V} and \code{Y}, extra regressors to be included in both regressions (conditional log odds/conditional mean). Defaults to \code{NULL}.}
\item{extra_reg_pen_factors}{A vector of non-negative numbers, defaults to \code{NULL}. Penalty factors for \code{extra_regressors}. If the main design matrix has \code{d} columns, \code{c(rep(1, d), extra_reg_pen_factors)} will be passed as the \code{penalty.factor} argument to \code{glmnet::glmnet()}. If \code{intercept == TRUE}, a \code{0} will also be prepended.}
\item{p_V_degree}{A non-negative integer, the degree for the \code{Vo} in the Hurdle polynomial for the conditional log odds. Defaults to 1.}
\item{p_Y_degree}{A non-negative integer, the degree for the \code{Yo} in the Hurdle polynomial for the conditional log odds. Defaults to 1.}
\item{p_Y_V_degree}{A non-negative integer, the degree for interaction between \code{Vo} and \code{Yo} in the Hurdle polynomial for the conditional log odds. Defaults to 1. If equal to 1, no interaction will be included (since it would be either a pure \code{V} term or a pure \code{Y} term).}
\item{mu_V_degree}{A non-negative integer, the degree for the \code{Vo} in the Hurdle polynomial for the conditional mean. Defaults to 1.}
\item{mu_Y_degree}{A non-negative integer, the degree for the \code{Yo} in the Hurdle polynomial for the conditional mean. Defaults to 1.}
\item{mu_Y_V_degree}{A non-negative integer, the degree for interaction between \code{Vo} and \code{Yo} in the Hurdle polynomial for the conditional mean. Defaults to 1. If equal to 1, no interaction will be included (since it would be either a pure \code{V} term or a pure \code{Y} term).}
\item{value_only}{If \code{TRUE}, returns the minimized negative log likelihood only. Defaults to \code{TRUE}.}
\item{tol}{A number, tolerance. Defaults to \code{1e-8}. Passed to \code{stats::glm()} for penalized logistic regressions, or as the \code{thresh} argument to \code{glmnet::glmnet()} for both logistic and linear regressions if penalized.}
\item{maxit}{An integer, the maximum number of iterations. Defaults to \code{100000}. Passed to \code{stats::glm()} for penalized logistic regressions, or to \code{glmnet::glmnet()} for both logistic and linear regressions if penalized.}
\item{seed}{A number, the random seed passed to \code{zi_fit_lm()} for both regressions (conditional log odds/conditional mean).}
\item{penalize_decider}{A logical or a function that takes a design matrix and returns a logical. Defaults to \code{function(X){ncol(X)>=nrow(X)/2}}. Used to decide whether to use penalized l2 (ridge) regression (if \code{TRUE}) when fitting each conditional distribution. Note that for either regression (conditional log odds/conditional mean), if the fits for unpenalized regressions are almost perfect, penalized regressions will be automatically used.}
\item{nfits}{A positive integer, defaults to \code{10}. Used for penalized regressions, as number of folds if \code{CV_BIC == TRUE} (\code{nfits} argument to \code{glmnet::cv.glmnet()}, with \code{nlambda} set to \code{100}), or the number of lambdas if \code{BIC == FALSE} (as the \code{nlambda} argument to \code{glmnet::glmnet()}).}
\item{runs}{A positive integer, the number of reruns. The fit with the maximum likelihood will be returned. Defaults to \code{2}.}
}
\value{
If \code{value_only == TRUE}, returns the minimized negative log likelihood only. Otherwise, returns
\item{nll}{A number, the minimized negative log likelihood.}
\item{par}{A vector of length \code{4*length(right)+3}, the fitted parameters, in the other of: the intercept for the \code{a} (a scalar), linear coefficients on \code{V[,right]} for \code{a}, linear coefficients on \code{Y[,right]} for \code{a}, the intercept for the \code{b} (a scalar), linear coefficients on \code{V[,right]} for \code{b}, linear coefficients on \code{Y[,right]} for \code{b}.}
\item{n}{An integer, the sample size.}
\item{effective_df}{\code{4*length(right)+3}, the effective degree of freedom.}
}
\description{
Fits a Hurdle conditional model with pms parametrization of specified degree.
}
\details{
A Hurdle conditional model with pms parametrization for the \code{left} node given those in \code{right} has log density with respect to the sum of the Lebesgue measure and a point mass at 0 equal to (in terms of \code{y})
\eqn{\log(1-p)}{log(1-p)} if \code{y == 0}, or \eqn{\log(p)-(y-mu)^2/2/sigmasq}{log(p)-(y-mu)^2/2/sigmasq} otherwise. That is, it is a mixture of a binomial with probability of success \code{p} and a Gaussian with conditional mean \code{mu} and conditional variance \code{sigmasq}.
Here \code{sigmasq} is assumed constant, and parameters \code{log(p/(1-p))} and \code{mu} are Hurdle polynomials, i.e. polynomials in the values for \code{right} and their indicators.
This function thus fits such a model using \code{Y[,left]}, \code{Y[,right]} and \code{V[,right] = (Y[,right] != 0)}, using a logistic for the log odds \code{log(p/(1-p))} and a linear regression for \code{mu}.
Writing \code{Yo <- Y[,right]}, a Hurdle polynomial in parents \code{Yo} is a polynomial in \code{Yo} and their 0/1 indicators \code{Vo}.
The \code{V_degree} of a term that is a product of some columns of \code{Vo} only is the number of parents that appears in it. For example, \code{V1 * V2 * V3} has \code{V_degree} equal to 3. Note that \code{V1^p} is equal to \code{V1} for any \code{p >= 1} so it does not make sense to include a power.
The \code{Y_degree} of a term that is a product of powers of some columns of \code{Yo} only is the degree of a polynomial in its usual sense. For example, \code{Y1^2 * Y2 * Y3^3} has \code{Y_degree} equal to 2+1+3=6.
The \code{Y_V_degree} of a term that involves both some columns of \code{Vo} and some of \code{Yo} is the sum of the \code{V_degree} of the \code{V} part and the \code{Y_degree} of the \code{Y} part. For example, \code{Y1^2 * V2 * Y3^3 * V4 * V5} has \code{Y_V_degree} equal to 2+1+3+1+1=8.
The design matrix thus includes all possible terms with \code{V_degree}, \code{Y_degree}, \code{Y_V_degree} less than or equal to those specified.
For example, if \code{Vo} and \code{Yo} has two columns and \code{V_degree == 2}, \code{Y_degree == 2}, \code{Y_V_degree == 2}, the design matrix has columns \code{V1}, \code{V2}, \code{V1*V2}, \code{Y1}, \code{Y2}, \code{Y1*Y2}, \code{Y1^2}, \code{Y2^2}, \code{Y1*V2}, \code{Y2*V1}. Note that terms like \code{V1*Y1} are not included as it is equivalent to \code{Y1}.
Parameters \code{p_V_degree}, \code{p_Y_degree}, \code{p_Y_V_degree}, \code{mu_V_degree}, \code{mu_Y_degree}, and \code{mu_Y_V_degree} specify these degrees for the regressions for the log odds \code{log(p/(1-p))} and the conditional mean \code{mu}, respectively.
For automatically choosing a uniform degree <= a specified maximum degree, please use \code{zi_fit_pms_choose_degree()}.
}
\examples{
m <- 3; n <- 1000
adj_mat <- make_dag(m, "complete")
dat <- gen_zero_dat(1, "pms", adj_mat, n, k_mode=1, min_num=10, gen_uniform_degree=1)
extra_regressors <- matrix(rnorm(n * 4), nrow=n)
extra_reg_pen_factors <- c(1, 2, 3, 4) / sum(c(1, 2, 3, 4))
zi_fit_pms(dat$V, dat$Y, 3, 1:2, extra_regressors=extra_regressors,
extra_reg_pen_factors=extra_reg_pen_factors, p_V_degree=2, p_Y_degree=2,
p_Y_V_degree=2, mu_V_degree=2, mu_Y_degree=2, mu_Y_V_degree=2, value_only=TRUE)
zi_fit_pms(dat$V, dat$Y, 3, 1:2, extra_regressors=extra_regressors,
extra_reg_pen_factors=extra_reg_pen_factors, p_V_degree=2, p_Y_degree=2,
p_Y_V_degree=2, mu_V_degree=2, mu_Y_degree=2, mu_Y_V_degree=2, value_only=FALSE)
}
|
35bf60fca3b79205db54d94c97d1b656b42eb8be
|
38c720a2af6d3d1bd8df15eaa42e87e88f1973e5
|
/man/as_TSP.Rd
|
a1c89645b35e473a16b5fa8f7b8c86023f5fba83
|
[] |
no_license
|
mllg/tspmeta
|
a5c2cc6570342ab2e5790f5834a839e34ddd24d4
|
db6a458781268a835203e07af9f1b9ece7bcc78c
|
refs/heads/master
| 2020-12-24T23:18:30.852333
| 2016-06-15T08:32:43
| 2016-06-15T08:32:43
| 61,191,420
| 0
| 0
| null | 2016-06-15T08:33:05
| 2016-06-15T08:33:05
| null |
UTF-8
|
R
| false
| false
| 375
|
rd
|
as_TSP.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/tsp_instance.R
\name{as_TSP}
\alias{as_TSP}
\title{Convert to TSP instance object of package TSP.}
\usage{
as_TSP(x)
}
\arguments{
\item{x}{[\code{\link{tsp_instance}}]\cr
TSP instance.}
}
\value{
[\code{\link[TSP]{TSP}}].
}
\description{
Convert to TSP instance object of package TSP.
}
|
d41dd1876b312043ea6e91a0ac2300ddba41a9f3
|
36e610c417776307c63228a461cdffd9dda8cc20
|
/man/annual.precipitation.totals.Madison.Rd
|
2b9899e9e403f50414ca862ac5368c7396e3fcff
|
[] |
no_license
|
cran/climtrends
|
79ee1153c09144ed0b575a11e0182ec6f29ab8df
|
d588ac8e4a1883cfead7579068226b6d1e9afab2
|
refs/heads/master
| 2021-01-21T14:04:48.389588
| 2016-05-26T17:56:25
| 2016-05-26T17:56:25
| 48,078,006
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 979
|
rd
|
annual.precipitation.totals.Madison.Rd
|
\name{annual.precipitation.totals.Madison}
\alias{annual.precipitation.totals.Madison}
\title{Annual precipitation totals in inches Madison (Wisconsin) }
\usage{annual.precipitation.totals.Madison}
\description{\code{annual.precipitation.totals.Madison} contains the annual precipitation totals in inches from Madison (Wisconsin) - courtesy of Madison Climate Page/State Climatology Homepage.
}
\format{
This data frame contains 2 columns (year and precipitationInches)
and 135 rows of data.
}
\source{
Madison Climate Page/State Climatology Homepage
Annual precipitation totals in inches
\url{http://www.aos.wisc.edu/~sco/clim-history/stations/msn/msn-pcpn.html}
}
\references{
Madison Climate Page/State Climatology Homepage
Annual precipitation totals in inches
\url{http://www.aos.wisc.edu/~sco/clim-history/stations/msn/msn-pcpn.html}
}
\author{Jose Gama}
\examples{
data(annual.precipitation.totals.Madison)
str(annual.precipitation.totals.Madison)
}
\keyword{datasets}
|
69c35f13b4d400f7ee393dc09b59928cb781dbd9
|
b9ed9dfe570c8f7e9baedd176886c9aee8868bca
|
/Linear_Model.R
|
b7feaaccdb987b3b8266d6128d3b0242090b3062
|
[
"MIT"
] |
permissive
|
Jun4871/R_programing
|
2b7b182ef2b2b5594356ca76f7870c881b030443
|
147dba41a62c0523f5aadd1240c5b67bab303576
|
refs/heads/master
| 2020-12-10T11:53:14.106811
| 2020-04-25T10:14:49
| 2020-04-25T10:14:49
| 233,586,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 166
|
r
|
Linear_Model.R
|
head(diamonds)
lmDiamond <- lm(price ~ carat, data = diamonds)
summary(lmDiamond)
par(mfrow <- c(2,2))
plot(lmDiamond)
|
9bc4dcbdbcde9e56d964efe2ea1eda4138a13c8b
|
411ab0a304cf6445f9189e3e1cd9a760f545ea1a
|
/man/str_entre.Rd
|
07398ca685dbd35a121ba0fe104d593d9d33b07f
|
[
"MIT"
] |
permissive
|
caayala/desuctools
|
ea059f0f0673eab4ba2c17f10a00f8d008881b43
|
7200f6d0a392967ce6eb7399690c94c66954e5b1
|
refs/heads/master
| 2023-08-29T04:46:53.667717
| 2023-08-07T21:26:01
| 2023-08-07T21:26:01
| 291,334,718
| 0
| 0
|
NOASSERTION
| 2020-08-29T19:20:58
| 2020-08-29T19:20:57
| null |
UTF-8
|
R
| false
| true
| 520
|
rd
|
str_entre.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers_variables.R
\name{str_entre}
\alias{str_entre}
\title{Extrae string entre dos textos}
\usage{
str_entre(text, ini = "", fin = "")
}
\arguments{
\item{text}{string. Puede ser un named string.}
\item{ini}{string, desde donde se extrae el texto}
\item{fin}{string, hasta donde se extrae el texto}
}
\value{
string
}
\description{
Extrae string entre dos textos
}
\examples{
str_entre('a (between) z', ini = '\\\\(', fin = '\\\\)')
}
|
b63eb799112fa0640ca83d02cced9fa33f24ea89
|
bdbfe5e0501ccf1dc6bf26d6939d8cddf353160f
|
/NewRScript.R
|
57a4054c335cb3b5fd585f944a2bf51eec02dd65
|
[] |
no_license
|
MickeyGitHub/Rcollaboration
|
acf5ec99fa000f1567f42e1c91d6673a8da564aa
|
647c9d43d5ea61eab7eb6e1699f485310df053df
|
refs/heads/master
| 2020-04-01T21:54:25.016602
| 2018-10-18T20:21:48
| 2018-10-18T20:21:48
| 153,681,262
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 106
|
r
|
NewRScript.R
|
# this is a new test script
myfucition <- function(x,y)
if(x>y) {
print("x is bigger")
}
|
9bbd85dc2a66652e4d333f41320df03b9e85d503
|
f70a41e996e76adbe3bb29f40c47fe7046e9b7d3
|
/Interns/ClaireMarie/slope_aspect_elevation.R
|
c9962eea86f012dd68c4cd3c0304a34849210f27
|
[] |
no_license
|
DrJonYearsley/Phenograss
|
5c541e25fafff1ee6d1f746f5a4e40129b1abd2a
|
d3cce1fa799939f6f84201561a7b08907c56ea7f
|
refs/heads/master
| 2022-08-12T10:50:32.685141
| 2022-07-15T14:37:31
| 2022-07-15T14:37:31
| 221,275,083
| 2
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,082
|
r
|
slope_aspect_elevation.R
|
# Slope aspect of SRTM elevation
#
# Claire-Marie Alla
# 29/06/2021
# ++++++++++++++++++++++++++++++++++++++++++++++
rm(list=ls())
library(sf)
library(stars)
library(raster)
library(ggplot2)
library(dplyr)
library(biwavelet)
library(grid)
elevation_dir = '~/Stage/Data_created/elevation_SRTM3_square'
modisPath = '~/Stage/Data/MODIS'
dataDir = '~/Stage/Data/MODIS/Phenophase_estimates'
outputDir = '~/Stage/Data_created'
input_file_preffix = 'phenology'
# Import data --------
squaresList = c(20)
year = 2019
# Method 1
for (e in 1:length(squaresList)) {
filename = paste0('square_',squaresList[e],'.tif')
square = read_stars(file.path(elevation_dir, filename))
filename2 = paste0(input_file_preffix,'_square_',squaresList[e],'_',year,'.RData')
load(file.path(dataDir,filename2))
c = st_contour(square, breaks=c(100, 150))
print(c)
# Create the gradient from the outline ?
# But we don't have the expression of the fonction
}
# Method 2
# Sobel filter : calculate for each point the gradient, the direction and
# the norm of the gradient of the raster
square2 = raster(file.path(elevation_dir, filename))
square2 = as.matrix(square2)
# Create the 2 gradient filter
deriv_hor = matrix(c(-1, -2, -1, 0, 0, 0, 1, 2, 1), nrow = 3)
deriv_vert = matrix(c(1, 0, -1, 2, 0, -2, 1, 0, -1), nrow = 3)
# Replace NA by 0
#square2[is.na(square2)] = 0
print(class(square2))
print(length(square2))
Gx = convolve2D(square2, deriv_hor, type="open")
Gy = convolve2D(square2, deriv_vert, type="open")
# Norm of the gradient
norm_grad = sqrt(Gx**2 + Gy**2)
# Direction of the gradient
#arctan(Gy/Gx)
direction_grad = atan2(Gx, Gy)
direction_grad = direction_grad * (360/pi)
# doesn't work
direction_grad[direction_grad < 0] = - direction_grad
print(direction_grad)
plot(c, reset = FALSE)
#contour(square_1, add = TRUE) # contour plot
# Read in MODIS grid
# modis = read_stars(file.path(modisPath, 'modis_grid_ireland.tif'))
# crs_modis = st_crs(modis)
#
# #direction_grad = st_as_stars(direction_grad, crs = crs_modis)
#
# #norm_grad = st_as_stars(norm_grad, crs = crs_modis)
#
# output_smoothed = st_as_sf(output_smoothed, coords = c("x_MODIS", "y_MODIS"),
# crs = crs_modis)
#
# output_smoothed$grp = sapply(st_equals(output_smoothed$geometry), max)
# output_smoothed = output_smoothed %>% group_by(grp, pixelID) %>% summarize(t = mean(t))
# Create a dataframe with coord / grad dir et grad norme
slope_aspect = data.frame()
c = 0
for (i in 1:44) {
for (j in 1:44) {
print(i)
print(j)
print(square2[[i,j]])
slope_aspect$geometry[c] = square2[[i,j]]
c = c +1
}
}
# if between 45 and 135 degrees => North
# if between 135 and 225 degrees => West
# if between 225 and 315 degrées => South
# if between 315 and 360, 0 and 45 => East
# display with ggpolt2 + geom_segment and arrow (coord of origin point, norm and direction)
#ggplot(data=as.data.frame(direction_grad))
|
b5650e57a0c367c088efe0da8562fbb2716ab1a3
|
423e53b3ca3e81220813d88be963cb4b8b3fd9b2
|
/man/print.survFitCstExp.Rd
|
91fcadd7a12d75fa8c61cb61ef905b1392d99f5d
|
[] |
no_license
|
cran/morse
|
9715ca0a55cdf7c42ecfd13039065a88a273f6dd
|
262ed591e1b80190e1cea7a3ae93164b0d030df2
|
refs/heads/master
| 2022-11-08T06:29:36.523446
| 2022-10-28T10:45:09
| 2022-10-28T10:45:09
| 20,999,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 942
|
rd
|
print.survFitCstExp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.survFitCstExp.R
\name{print.survFitCstExp}
\alias{print.survFitCstExp}
\title{Print of \code{survFit} object}
\usage{
\method{print}{survFitCstExp}(x, ...)
}
\arguments{
\item{x}{An object of class \code{survFitCstExp}}
\item{\dots}{Further arguments to be passed to generic methods.}
}
\value{
print the model text and the Jags Computing information
}
\description{
This is the generic \code{print} S3 method for the \code{survFitCstExp} class.
It prints the underlying JAGS model and some information on the Bayesian
inference procedure.
}
\examples{
# (1) Load the data
data(propiconazole)
# (2) Create an object of class 'survData'
dat <- survData(propiconazole)
\donttest{
# (3) Run the survFit function with TKTD model 'SD' or 'IT'
out <- survFit(dat, quiet = TRUE, model_type="SD")
# (4) Print the survFit object
print(out)
}
}
\keyword{print}
|
69378e7b7d30a833bb5bf624028b7ee1f8f8cd0f
|
ad522819f54aa659c951ff39fff1dda0fff0f89f
|
/R/tranforms.R
|
3180108ff48d864f1347af79f697e475b0c896fe
|
[
"MIT"
] |
permissive
|
davidbrae/torchaudio
|
4dbc4e12067b14dedd8fa785a6b753719e39b0d3
|
d20ccc237a8eff58e77bb8e3f08ef24150a4fc4e
|
refs/heads/master
| 2023-07-20T16:06:59.791249
| 2021-08-29T19:16:50
| 2021-08-29T19:16:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,364
|
r
|
tranforms.R
|
#' Spectrogram
#'
#' Create a spectrogram or a batch of spectrograms from a raw audio signal.
#' The spectrogram can be either magnitude-only or complex.
#'
#' @param pad (integer): Two sided padding of signal
#' @param window_fn (tensor or function): Window tensor that is applied/multiplied to each
#' frame/window or a function that generates the window tensor.
#' @param n_fft (integer): Size of FFT
#' @param hop_length (integer): Length of hop between STFT windows
#' @param win_length (integer): Window size
#' @param power (numeric): Exponent for the magnitude spectrogram, (must be > 0) e.g.,
#' 1 for energy, 2 for power, etc. If NULL, then the complex spectrum is returned instead.
#' @param normalized (logical): Whether to normalize by magnitude after stft
#' @param ... (optional) Arguments for window function.
#'
#'
#' @details forward param:
#' waveform (tensor): Tensor of audio of dimension (..., time)
#'
#' @return tensor: Dimension (..., freq, time), freq is n_fft %/% 2 + 1 and n_fft is the
#' number of Fourier bins, and time is the number of window hops (n_frame).
#'
#' @export
transform_spectrogram <- torch::nn_module(
"Spectrogram",
initialize = function(
n_fft = 400,
win_length = NULL,
hop_length = NULL,
pad = 0L,
window_fn = torch::torch_hann_window,
power = 2,
normalized = FALSE,
...
) {
self$n_fft = n_fft
# number of FFT bins. the returned STFT result will have n_fft // 2 + 1
# number of frequecies due to onesided=True in torch.stft
self$win_length = win_length %||% n_fft
self$hop_length = hop_length %||% (self$win_length %/% 2)
window = window_fn(window_length = self$win_length, dtype = torch::torch_float(), ...)
self$register_buffer('window', window)
self$pad = pad
self$power = power
self$normalized = normalized
},
forward = function(waveform){
functional_spectrogram(
waveform = waveform,
pad = self$pad,
n_fft = self$n_fft,
window = self$window,
hop_length = self$hop_length,
win_length = self$win_length,
power = self$power,
normalized = self$normalized
)
}
)
#' Mel Scale
#'
#' Turn a normal STFT into a mel frequency STFT, using a conversion
#' matrix. This uses triangular filter banks.
#'
#' @param n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
#' @param sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
#' @param f_min (float, optional): Minimum frequency. (Default: ``0.``)
#' @param f_max (float or NULL, optional): Maximum frequency. (Default: ``sample_rate // 2``)
#' @param n_stft (int, optional): Number of bins in STFT. Calculated from first input
#' if NULL is given. See ``n_fft`` in :class:`Spectrogram`. (Default: ``NULL``)
#'
#' @details forward param:
#' specgram (Tensor): Tensor of audio of dimension (..., freq, time).
#'
#' @return `tensor`: Mel frequency spectrogram of size (..., ``n_mels``, time).
#'
#' @export
transform_mel_scale <- torch::nn_module(
"MelScale",
initialize = function(
n_mels = 128,
sample_rate = 16000,
f_min = 0.0,
f_max = NULL,
n_stft = NULL
) {
self$n_mels = n_mels
self$sample_rate = sample_rate
self$f_max = f_max %||% as.numeric(sample_rate %/% 2)
self$f_min = f_min
if(self$f_min > self$f_max) value_error(glue::glue("Require f_min: {self$f_min} < f_max: {self$f_max}"))
fb = if(is.null(n_stft)) {
torch::torch_empty(0)
} else {
functional_create_fb_matrix(
n_freqs = n_stft,
f_min = self$f_min,
f_max = self$f_max,
n_mels = self$n_mels,
sample_rate = self$sample_rate
)
}
self$register_buffer('fb', fb)
},
forward = function(specgram) {
# pack batch
shape = specgram$size()
ls = length(shape)
specgram = specgram$reshape(list(-1, shape[ls-1], shape[ls]))
if(self$fb$numel() == 0) {
tmp_fb = functional_create_fb_matrix(
n_freqs = specgram$size(2),
f_min = self$f_min,
f_max = self$f_max,
n_mels = self$n_mels,
sample_rate = self$sample_rate
)
self$fb$resize_(tmp_fb$size())
self$fb$copy_(tmp_fb)
}
# (channel, frequency, time).transpose(...) dot (frequency, n_mels)
# -> (channel, time, n_mels).transpose(...)
mel_specgram = torch::torch_matmul(specgram$transpose(2L, 3L), self$fb$to(device = specgram$device))$transpose(2L, 3L)
# unpack batch
lspec = length(mel_specgram$shape)
mel_specgram = mel_specgram$reshape(c(shape[-((ls-1):ls)], mel_specgram$shape[(lspec-1):lspec]))
return(mel_specgram)
}
)
#' Amplitude to DB
#'
#' Turn a tensor from the power/amplitude scale to the decibel scale.
#'
#' This output depends on the maximum value in the input tensor, and so
#' may return different values for an audio clip split into snippets vs. a
#' a full clip.
#'
#' @param stype (str, optional): scale of input tensor ('power' or 'magnitude'). The
#' power being the elementwise square of the magnitude. (Default: ``'power'``)
#' @param top_db (float or NULL, optional): Minimum negative cut-off in decibels. A reasonable number
#' is 80. (Default: ``NULL``)
#'
#' @details forward param:
#' x (Tensor): Input tensor before being converted to decibel scale
#'
#' @return `tensor`: Output tensor in decibel scale
#'
#' @export
transform_amplitude_to_db <- torch::nn_module(
"AmplitudeToDB",
initialize = function(
stype = 'power',
top_db = NULL
) {
self$stype = stype
if(!is.null(top_db) && top_db < 0) value_error("top_db must be positive value")
self$top_db = top_db
self$multiplier = if(stype == 'power') 10.0 else 20.0
self$amin = 1e-10
self$ref_value = 1.0
self$db_multiplier = log10(max(self$amin, self$ref_value))
},
forward = function(x) {
functional_amplitude_to_db(
x = x,
multiplier = self$multiplier,
amin = self$amin,
db_multiplier = self$db_multiplier,
top_db = self$top_db
)
}
)
#' Mel Spectrogram
#'
#' Create MelSpectrogram for a raw audio signal. This is a composition of Spectrogram
#' and MelScale.
#'
#' @param sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
#' @param win_length (int or NULL, optional): Window size. (Default: ``n_fft``)
#' @param hop_length (int or NULL, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
#' @param n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
#' @param f_min (float, optional): Minimum frequency. (Default: ``0.``)
#' @param f_max (float or NULL, optional): Maximum frequency. (Default: ``NULL``)
#' @param pad (int, optional): Two sided padding of signal. (Default: ``0``)
#' @param n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
#' @param window_fn (function, optional): A function to create a window tensor
#' that is applied/multiplied to each frame/window. (Default: ``torch_hann_window``)
#' @param power (float, optional): Power of the norm. (Default: to ``2.0``)
#' @param normalized (logical): Whether to normalize by magnitude after stft (Default: ``FALSE``)
#' @param ... (optional): Arguments for window function.
#'
#' @details forward param:
#' waveform (Tensor): Tensor of audio of dimension (..., time).
#'
#' @return `tensor`: Mel frequency spectrogram of size (..., ``n_mels``, time).
#'
#' @section Sources:
#' - [https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe]()
#' - [https://timsainb.github.io/spectrograms-mfccs-and-inversion-in-python.html]()
#' - [https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html]()
#'
#' @examples #' Example
#' \dontrun{
#'
#' if(torch::torch_is_installed()) {
#' mp3_path <- system.file("sample_audio_1.mp3", package = "torchaudio")
#' sample_mp3 <- transform_to_tensor(tuneR_loader(mp3_path))
#' # (channel, n_mels, time)
#' mel_specgram <- transform_mel_spectrogram(sample_rate = sample_mp3[[2]])(sample_mp3[[1]])
#' }
#' }
#'
#' @export
transform_mel_spectrogram <- torch::nn_module(
"MelSpectrogram",
initialize = function(
sample_rate = 16000,
n_fft = 400,
win_length = NULL,
hop_length = NULL,
f_min = 0.0,
f_max = NULL,
pad = 0,
n_mels = 128,
window_fn = torch::torch_hann_window,
power = 2.,
normalized = FALSE,
...
) {
self$sample_rate = sample_rate
self$n_fft = n_fft
self$win_length = win_length %||% n_fft
self$hop_length = hop_length %||% (self$win_length %/% 2)
self$pad = pad
self$power = power
self$normalized = normalized
self$n_mels = n_mels # number of mel frequency bins
self$f_max = f_max
self$f_min = f_min
self$spectrogram = transform_spectrogram(
n_fft = self$n_fft,
win_length = self$win_length,
hop_length = self$hop_length,
pad = self$pad,
window_fn = window_fn,
power = self$power,
normalized = self$normalized,
...
)
self$mel_scale = transform_mel_scale(
n_mels = self$n_mels,
sample_rate = self$sample_rate,
f_min = self$f_min,
f_max = self$f_max,
n_stft = (self$n_fft %/% 2) + 1
)
},
forward = function(waveform) {
specgram = self$spectrogram(waveform)
mel_specgram = self$mel_scale(specgram)
return(mel_specgram)
}
)
#' Mel-frequency Cepstrum Coefficients
#'
#' Create the Mel-frequency cepstrum coefficients from an audio signal.
#'
#' @param sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
#' @param n_mfcc (int, optional): Number of mfc coefficients to retain. (Default: ``40``)
#' @param dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``)
#' @param norm (str, optional): norm to use. (Default: ``'ortho'``)
#' @param log_mels (bool, optional): whether to use log-mel spectrograms instead of db-scaled. (Default: ``FALSE``)
#' @param ... (optional): arguments for [torchaudio::transform_mel_spectrogram].
#'
#'
#' @details forward param:
#' waveform (tensor): Tensor of audio of dimension (..., time)
#'
#' By default, this calculates the MFCC on the DB-scaled Mel spectrogram.
#' This output depends on the maximum value in the input spectrogram, and so
#' may return different values for an audio clip split into snippets vs. a
#' a full clip.
#'
#' @return `tensor`: specgram_mel_db of size (..., ``n_mfcc``, time).
#'
#' @export
transform_mfcc <- torch::nn_module(
"MFCC",
initialize = function(
sample_rate = 16000,
n_mfcc = 40,
dct_type = 2,
norm = 'ortho',
log_mels = FALSE,
...
) {
supported_dct_types = c(2)
if(!dct_type %in% supported_dct_types) {
value_error(paste0('DCT type not supported:', dct_type))
}
self$sample_rate = sample_rate
self$n_mfcc = n_mfcc
self$dct_type = dct_type
self$norm = norm
self$top_db = 80.0
self$amplitude_to_db = transform_amplitude_to_db('power', self$top_db)
self$mel_spectrogram = transform_mel_spectrogram(sample_rate = self$sample_rate, ...)
if(self$n_mfcc > self$mel_spectrogram$n_mels) value_error('Cannot select more MFCC coefficients than # mel bins')
dct_mat = functional_create_dct(
n_mfcc = self$n_mfcc,
n_mels = self$mel_spectrogram$n_mels,
norm = self$norm
)
self$register_buffer('dct_mat', dct_mat)
self$log_mels = log_mels
},
forward = function(waveform) {
# pack batch
shape = waveform$size()
ls = length(shape)
waveform = waveform$reshape(list(-1, shape[ls]))
mel_specgram = self$mel_spectrogram(waveform)
if(self$log_mels) {
log_offset = 1e-6
mel_specgram = torch::torch_log(mel_specgram + log_offset)
} else {
mel_specgram = self$amplitude_to_db(mel_specgram)
}
# (channel, n_mels, time).tranpose(...) dot (n_mels, n_mfcc)
# -> (channel, time, n_mfcc).tranpose(...)
mfcc = torch::torch_matmul(mel_specgram$transpose(2, 3), self$dct_mat)$transpose(2, 3)
# unpack batch
lspec = length(mfcc$shape)
mfcc = mfcc$reshape(c(shape[-ls], mfcc$shape[(lspec-1):lspec]))
return(mfcc)
}
)
#' Inverse Mel Scale
#'
#' Solve for a normal STFT from a mel frequency STFT, using a conversion
#' matrix. This uses triangular filter banks.
#'
#' @param n_stft (int): Number of bins in STFT. See ``n_fft`` in [torchaudio::transform_spectrogram].
#' @param n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
#' @param sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
#' @param f_min (float, optional): Minimum frequency. (Default: ``0.``)
#' @param f_max (float or NULL, optional): Maximum frequency. (Default: ``sample_rate %/% 2``)
#' @param max_iter (int, optional): Maximum number of optimization iterations. (Default: ``100000``)
#' @param tolerance_loss (float, optional): Value of loss to stop optimization at. (Default: ``1e-5``)
#' @param tolerance_change (float, optional): Difference in losses to stop optimization at. (Default: ``1e-8``)
#' @param ... (optional): Arguments passed to the SGD optimizer. Argument lr will default to 0.1 if not specied.(Default: ``NULL``)
#'
#' @details forward param:
#' melspec (Tensor): A Mel frequency spectrogram of dimension (..., ``n_mels``, time)
#'
#' It minimizes the euclidian norm between the input mel-spectrogram and the product between
#' the estimated spectrogram and the filter banks using SGD.
#'
#' @return Tensor: Linear scale spectrogram of size (..., freq, time)
#'
#' @export
transform_inverse_mel_scale <- torch::nn_module(
"InverseMelScale",
initialize = function(
n_stft,
n_mels = 128,
sample_rate = 16000,
f_min = 0.,
f_max = NULL,
max_iter = 100000,
tolerance_loss = 1e-5,
tolerance_change = 1e-8,
...
) {
self$n_mels = n_mels
self$sample_rate = sample_rate
self$f_max = f_max %||% as.numeric(sample_rate %/% 2)
self$f_min = f_min
self$max_iter = max_iter
self$tolerance_loss = tolerance_loss
self$tolerance_change = tolerance_change
self$sgdargs = list(...) %||% list('lr' = 0.1, 'momentum' = 0.9)
self$sgdargs$lr = self$sgdargs$lr %||% 0.1 # lr is required for torch::optim_sgd()
if(f_min > self$f_max)
value_error(glue::glue('Require f_min: {f_min} < f_max: {self$f_max}'))
fb = functional_create_fb_matrix(
n_freqs = n_stft,
f_min = self$f_min,
f_max = self$f_max,
n_mels = self$n_mels,
sample_rate = self$sample_rate
)
self$fb <- fb
},
forward = function(melspec) {
# pack batch
shape = melspec$size()
ls = length(shape)
melspec = melspec$view(c(-1, shape[ls-1], shape[ls]))
n_mels = shape[ls-1]
time = shape[ls]
freq = self$fb$size(1) # (freq, n_mels)
melspec = melspec$transpose(-1, -2)
if(self$n_mels != n_mels) runtime_error("self$n_mels != n_mels")
specgram = torch::torch_rand(melspec$size()[1], time, freq, requires_grad=TRUE,
dtype=melspec$dtype, device=melspec$device)
self$sgdargs$params <- specgram
optim <- do.call(torch::optim_sgd, self$sgdargs)
loss = Inf
for(i in seq.int(self$max_iter)){
optim$zero_grad()
diff = melspec - specgram$matmul(self$fb$to(device = melspec$device))
new_loss = diff$pow(2)$sum(dim=-1)$mean()
# take sum over mel-frequency then average over other dimensions
# so that loss threshold is applied par unit timeframe
new_loss$backward()
optim$step()
specgram$set_data(specgram$data()$clamp(min=0))
new_loss = new_loss$item()
if(new_loss < self$tolerance_loss | abs(loss - new_loss) < self$tolerance_change)
break
loss = new_loss
}
specgram$requires_grad_(FALSE)
specgram = specgram$clamp(min=0)$transpose(-1, -2)
# unpack batch
specgram = specgram$view(c(shape[-c(ls-1, ls)], freq, time))
return(specgram)
}
)
#' Mu Law Encoding
#'
#' Encode signal based on mu-law companding. For more info see
#' the [Wikipedia Entry](https://en.wikipedia.org/wiki/M-law_algorithm)
#'
#' @param quantization_channels (int, optional): Number of channels. (Default: ``256``)
#'
#' @details forward param:
#' x (Tensor): A signal to be encoded.
#'
#' @return x_mu (Tensor): An encoded signal.
#'
#' @details
#' This algorithm assumes the signal has been scaled to between -1 and 1 and
#' returns a signal encoded with values from 0 to quantization_channels - 1.
#'
#' @export
transform_mu_law_encoding <- torch::nn_module(
"MuLawEncoding",
initialize = function(quantization_channels = 256) {
self$quantization_channels = quantization_channels
},
forward = function(x) {
return(functional_mu_law_encoding(x, self$quantization_channels))
}
)
#' Mu Law Decoding
#'
#' Decode mu-law encoded signal. For more info see the
#' [Wikipedia Entry](https://en.wikipedia.org/wiki/M-law_algorithm)
#'
#' This expects an input with values between 0 and quantization_channels - 1
#' and returns a signal scaled between -1 and 1.
#'
#' @param quantization_channels (int, optional): Number of channels. (Default: ``256``)
#'
#' @details forward param:
#' x_mu (Tensor): A mu-law encoded signal which needs to be decoded.
#'
#' @return Tensor: The signal decoded.
#'
#' @export
transform_mu_law_decoding <- torch::nn_module(
"MuLawDecoding",
initialize = function(quantization_channels = 256) {
self$quantization_channels = quantization_channels
},
forward = function(x_mu) {
return(functional_mu_law_decoding(x_mu, self$quantization_channels))
}
)
#' Signal Resample
#'
#' Resample a signal from one frequency to another. A resampling method can be given.
#'
#' @param orig_freq (float, optional): The original frequency of the signal. (Default: ``16000``)
#' @param new_freq (float, optional): The desired frequency. (Default: ``16000``)
#' @param resampling_method (str, optional): The resampling method. (Default: ``'sinc_interpolation'``)
#'
#' @details forward param:
#' waveform (Tensor): Tensor of audio of dimension (..., time).
#'
#' @return Tensor: Output signal of dimension (..., time).
#'
#' @export
transform_resample <- torch::nn_module(
"Resample",
initialize = function(
orig_freq = 16000,
new_freq = 16000,
resampling_method = 'sinc_interpolation'
) {
self$orig_freq = orig_freq
self$new_freq = new_freq
self$resampling_method = resampling_method
},
forward = function(waveform) {
if(self$resampling_method == 'sinc_interpolation') {
# pack batch
shape = waveform$size()
ls = length(shape)
waveform = waveform$view(c(-1, shape[ls]))
waveform = kaldi_resample_waveform(waveform, self$orig_freq, self$new_freq)
# unpack batch
lws = length(waveform$shape)
waveform = waveform$view(c(shape[-ls], waveform$shape[lws]))
return(waveform)
} else {
value_error(glue::glue('Invalid resampling method: {self$resampling_method}'))
}
}
)
#' Complex Norm
#'
#' Compute the norm of complex tensor input.
#'
#' @param power (float, optional): Power of the norm. (Default: to ``1.0``)
#'
#' @details forward param:
#' complex_tensor (Tensor): Tensor shape of `(..., complex=2)`.
#'
#' @return Tensor: norm of the input tensor, shape of `(..., )`.
#'
#' @export
transform_complex_norm <- torch::nn_module(
"ComplexNorm",
initialize = function(power = 1.0) {
self$power = power
},
forward = function(complex_tensor) {
return(functional_complex_norm(complex_tensor, self$power))
}
)
#' Delta Coefficients
#'
#' Compute delta coefficients of a tensor, usually a spectrogram.
#'
#' @param win_length (int): The window length used for computing delta. (Default: ``5``)
#' @param mode (str): Mode parameter passed to padding. (Default: ``'replicate'``)
#'
#' @details forward param:
#' specgram (Tensor): Tensor of audio of dimension (..., freq, time).
#'
#' See [torchaudio::functional_compute_deltas] for more details.
#'
#' @return Tensor: Tensor of deltas of dimension (..., freq, time).
#'
#' @export
transform_compute_deltas <- torch::nn_module(
"ComputeDeltas",
initialize = function( win_length = 5, mode = "replicate") {
self$win_length = win_length
self$mode = mode
},
forward = function(specgram) {
return(functional_compute_deltas(specgram, win_length=self$win_length, mode=self$mode))
}
)
#' Time Stretch
#'
#' Stretch stft in time without modifying pitch for a given rate.
#'
#' @param hop_length (int or NULL, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
#' @param n_freq (int, optional): number of filter banks from stft. (Default: ``201``)
#' @param fixed_rate (float or NULL, optional): rate to speed up or slow down by.
#' If NULL is provided, rate must be passed to the forward method. (Default: ``NULL``)
#'
#'
#' @details forward param:
#' complex_specgrams (Tensor): complex spectrogram (..., freq, time, complex=2).
#'
#' overriding_rate (float or NULL, optional): speed up to apply to this batch.
#' If no rate is passed, use ``self$fixed_rate``. (Default: ``NULL``)
#'
#' @return Tensor: Stretched complex spectrogram of dimension (..., freq, ceil(time/rate), complex=2).
#'
#' @export
transform_time_stretch <- torch::nn_module(
"TimeStretch",
initialize = function(
hop_length = NULL,
n_freq = 201,
fixed_rate = NULL
) {
self$fixed_rate = fixed_rate
n_fft = (n_freq - 1) * 2
hop_length = if(!is.null(hop_length)) hop_length else n_fft %/% 2
self$register_buffer('phase_advance', torch::torch_linspace(0, pi * hop_length, n_freq)[.., NULL])
},
forward = function(complex_specgrams, overriding_rate = NULL) {
lcs = length(complex_specgrams$size())
if(complex_specgrams$size()[lcs] != 2)
value_error("complex_specgrams should be a complex tensor, shape (..., complex=2)")
if(is.null(overriding_rate)) {
rate = self$fixed_rate
if(is.null(rate)) {
value_error("If no fixed_rate is specified, must pass a valid rate to the forward method.")
}
} else {
rate = overriding_rate
}
if(rate == 1.0) {
return(complex_specgrams)
} else {
return(functional_phase_vocoder(complex_specgrams, rate, self$phase_advance))
}
}
)
#' Fade In/Out
#'
#' Add a fade in and/or fade out to an waveform.
#'
#' @param fade_in_len (int, optional): Length of fade-in (time frames). (Default: ``0``)
#' @param fade_out_len (int, optional): Length of fade-out (time frames). (Default: ``0``)
#' @param fade_shape (str, optional): Shape of fade. Must be one of: "quarter_sine",
#' "half_sine", "linear", "logarithmic", "exponential". (Default: ``"linear"``)
#'
#' @details forward param:
#' waveform (Tensor): Tensor of audio of dimension (..., time).
#'
#' @return Tensor: Tensor of audio of dimension (..., time).
#'
#' @export
transform_fade <- torch::nn_module(
"Fade",
initialize = function(
fade_in_len = 0,
fade_out_len = 0,
fade_shape = "linear"
) {
self$fade_in_len = fade_in_len
self$fade_out_len = fade_out_len
self$fade_shape = fade_shape
},
forward = function(waveform) {
lws = length(waveform$size())
waveform_length = waveform$size()[lws]
device = waveform$device
return(self$.fade_in(waveform_length)$to(device = device) * self$.fade_out(waveform_length)$to(device = device) * waveform)
},
.fade_in = function(waveform_length) {
fade = torch::torch_linspace(0, 1, self$fade_in_len)
ones = torch::torch_ones(waveform_length - self$fade_in_len)
if(self$fade_shape == "linear")
fade = fade
if(self$fade_shape == "exponential")
fade = torch::torch_pow(2, (fade - 1)) * fade
if(self$fade_shape == "logarithmic")
fade = torch::torch_log10(.1 + fade) + 1
if(self$fade_shape == "quarter_sine")
fade = torch::torch_sin(fade * pi / 2)
if(self$fade_shape == "half_sine")
fade = torch::torch_sin(fade * pi - pi / 2) / 2 + 0.5
return(torch::torch_cat(list(fade, ones))$clamp_(0, 1))
},
.fade_out = function( waveform_length) {
fade = torch::torch_linspace(0, 1, self$fade_out_len)
ones = torch::torch_ones(waveform_length - self$fade_out_len)
if(self$fade_shape == "linear")
fade = - fade + 1
if(self$fade_shape == "exponential")
fade = torch::torch_pow(2, - fade) * (1 - fade)
if(self$fade_shape == "logarithmic")
fade = torch::torch_log10(1.1 - fade) + 1
if(self$fade_shape == "quarter_sine")
fade = torch::torch_sin(fade * pi / 2 + pi / 2)
if(self$fade_shape == "half_sine")
fade = torch::torch_sin(fade * pi + pi / 2) / 2 + 0.5
return(torch::torch_cat(list(ones, fade))$clamp_(0, 1))
}
)
#' Axis Masking
#'
#' Apply masking to a spectrogram.
#'
#' @param mask_param (int): Maximum possible length of the mask.
#' @param axis (int): What dimension the mask is applied on.
#' @param iid_masks (bool): Applies iid masks to each of the examples in the batch dimension.
#' This option is applicable only when the input tensor is 4D.
#'
#' @details forward param:
#' specgram (Tensor): Tensor of dimension (..., freq, time).
#'
#' mask_value (float): Value to assign to the masked columns.
#'
#' @return Tensor: Masked spectrogram of dimensions (..., freq, time).
#'
#' @export
transform__axismasking <- torch::nn_module(
"_AxisMasking",
initialize = function(mask_param, axis, iid_masks) {
self$mask_param = mask_param
self$axis = axis
self$iid_masks = iid_masks
},
forward = function(specgram, mask_value = 0.) {
# if(iid_masks flag marked and specgram has a batch dimension
if(self$iid_masks & specgram$dim() == 4) {
return(functional_mask_along_axis_iid(specgram, self$mask_param, mask_value, self$axis + 1L))
} else {
return(functional_mask_along_axis(specgram, self$mask_param, mask_value, self$axis))
}
}
)
#' Frequency-domain Masking
#'
#' Apply masking to a spectrogram in the frequency domain.
#'
#' @param freq_mask_param (int): maximum possible length of the mask.
#' Indices uniformly sampled from [0, freq_mask_param).
#' @param iid_masks (bool, optional): whether to apply different masks to each
#' example/channel in the batch. (Default: ``FALSE``)
#' This option is applicable only when the input tensor is 4D.
#'
#' @return not implemented yet.
#'
#' @export
transform_frequencymasking <- function(freq_mask_param, iid_masks) {
not_implemented_error("Class _AxisMasking to be implemented yet.")
}
# R6::R6Class(
# "FrequencyMasking",
# inherit = transform__axismasking,
# initialize = function(freq_mask_param, iid_masks = FALSE) {
# # super(FrequencyMasking, self).__init__(freq_mask_param, 1, iid_masks)
# # https://pytorch.org/audio/_modules/torchaudio/transforms.html#FrequencyMasking
# }
# )
#' Time-domain Masking
#'
#' Apply masking to a spectrogram in the time domain.
#'
#' @param time_mask_param (int): maximum possible length of the mask.
#' Indices uniformly sampled from [0, time_mask_param).
#' @param iid_masks (bool, optional): whether to apply different masks to each
#' example/channel in the batch. (Default: ``FALSE``)
#' This option is applicable only when the input tensor is 4D.
#'
#' @return not implemented yet.
#'
#' @export
transform_timemasking <- function(time_mask_param, iid_masks) {
not_implemented_error("Class _AxisMasking to be implemented yet.")
}
# torchaudio::transform_axismasking(
# "TimeMasking",
# initialize = function(time_mask_param, iid_masks = FALSE) {
# # super(TimeMasking, self).__init__(time_mask_param, 2, iid_masks)
# # https://pytorch.org/audio/_modules/torchaudio/transforms.html#TimeMasking
# not_implemented_error("Class _AxisMasking to be implemented yet.")
# }
# )
#' Add a volume to an waveform.
#'
#' @param gain (float): Interpreted according to the given gain_type:
#' If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio.
#' If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared).
#' If ``gain_type`` = ``db``, ``gain`` is in decibels.
#' @param gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``)
#'
#' @details forward param:
#' waveform (Tensor): Tensor of audio of dimension (..., time).
#'
#' @return Tensor: Tensor of audio of dimension (..., time).
#'
#' @export
transform_vol <- torch::nn_module(
"Vol",
initialize = function(
gain,
gain_type = 'amplitude'
) {
self$gain = gain
self$gain_type = gain_type
if(gain_type %in% c('amplitude', 'power') & gain < 0)
value_error("If gain_type = amplitude or power, gain must be positive.")
},
forward = function(waveform) {
if(self$gain_type == "amplitude")
waveform = waveform * self$gain
if(self$gain_type == "db")
waveform = functional_gain(waveform, self$gain)
if(self$gain_type == "power")
waveform = functional_gain(waveform, 10 * log10(self$gain))
return(torch::torch_clamp(waveform, -1, 1))
}
)
#' sliding-window Cepstral Mean Normalization
#'
#' Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
#'
#' @param cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
#' @param min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
#' Only applicable if center == ``FALSE``, ignored if center==``TRUE`` (int, default = 100)
#' @param center (bool, optional): If ``TRUE``, use a window centered on the current frame
#' (to the extent possible, modulo end effects). If ``FALSE``, window is to the left. (bool, default = ``FALSE``)
#' @param norm_vars (bool, optional): If ``TRUE``, normalize variance to one. (bool, default = ``FALSE``)
#'
#' @details forward param:
#' waveform (Tensor): Tensor of audio of dimension (..., time).
#'
#' @return Tensor: Tensor of audio of dimension (..., time).
#'
#' @export
transform_sliding_window_cmn <- torch::nn_module(
"SlidingWindowCmn",
initialize = function(
cmn_window = 600,
min_cmn_window = 100,
center = FALSE,
norm_vars = FALSE
) {
self$cmn_window = cmn_window
self$min_cmn_window = min_cmn_window
self$center = center
self$norm_vars = norm_vars
},
forward = function(waveform) {
cmn_waveform = functional_sliding_window_cmn(
waveform,
self$cmn_window,
self$min_cmn_window,
self$center,
self$norm_vars
)
return(cmn_waveform)
}
)
#' Voice Activity Detector
#'
#' Voice Activity Detector. Similar to SoX implementation.
#'
#' Attempts to trim silence and quiet background sounds from the ends of recordings of speech.
#' The algorithm currently uses a simple cepstral power measurement to detect voice,
#' so may be fooled by other things, especially music.
#'
#' The effect can trim only from the front of the audio,
#' so in order to trim from the back, the reverse effect must also be used.
#'
#' @param sample_rate (int): Sample rate of audio signal.
#' @param trigger_level (float, optional): The measurement level used to trigger activity detection.
#' This may need to be cahnged depending on the noise level, signal level,
#' and other characteristics of the input audio. (Default: 7.0)
#' @param trigger_time (float, optional): The time constant (in seconds)
#' used to help ignore short bursts of sound. (Default: 0.25)
#' @param search_time (float, optional): The amount of audio (in seconds)
#' to search for quieter/shorter bursts of audio to include prior
#' the detected trigger point. (Default: 1.0)
#' @param allowed_gap (float, optional): The allowed gap (in seconds) between
#' quiteter/shorter bursts of audio to include prior
#' to the detected trigger point. (Default: 0.25)
#' @param pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve
#' before the trigger point and any found quieter/shorter bursts. (Default: 0.0)
#' @param boot_time (float, optional) The algorithm (internally) uses adaptive noise
#' estimation/reduction in order to detect the start of the wanted audio.
#' This option sets the time for the initial noise estimate. (Default: 0.35)
#' @param noise_up_time (float, optional) Time constant used by the adaptive noise estimator
#' for when the noise level is increasing. (Default: 0.1)
#' @param noise_down_time (float, optional) Time constant used by the adaptive noise estimator
#' for when the noise level is decreasing. (Default: 0.01)
#' @param noise_reduction_amount (float, optional) Amount of noise reduction to use in
#' the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35)
#' @param measure_freq (float, optional) Frequency of the algorithm’s
#' processing/measurements. (Default: 20.0)
#' @param measure_duration (float, optional) Measurement duration. (Default: Twice the measurement period; i.e. with overlap.)
#' @param measure_smooth_time (float, optional) Time constant used to smooth spectral measurements. (Default: 0.4)
#' @param hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied
#' at the input to the detector algorithm. (Default: 50.0)
#' @param lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied
#' at the input to the detector algorithm. (Default: 6000.0)
#' @param hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used
#' in the detector algorithm. (Default: 150.0)
#' @param lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used
#' in the detector algorithm. (Default: 2000.0)
#'
#' @details forward param:
#' waveform (Tensor): Tensor of audio of dimension `(..., time)`
#'
#' @references
#' - [http://sox.sourceforge.net/sox.html]()
#'
#' @return torch::nn_module()
#'
#' @export
transform_vad <- torch::nn_module(
"Vad",
initialize = function(
sample_rate,
trigger_level = 7.0,
trigger_time = 0.25,
search_time = 1.0,
allowed_gap = 0.25,
pre_trigger_time = 0.0,
boot_time = .35,
noise_up_time = .1,
noise_down_time = .01,
noise_reduction_amount = 1.35,
measure_freq = 20.0,
measure_duration = NULL,
measure_smooth_time = .4,
hp_filter_freq = 50.,
lp_filter_freq = 6000.,
hp_lifter_freq = 150.,
lp_lifter_freq = 2000.
) {
self$sample_rate = sample_rate
self$trigger_level = trigger_level
self$trigger_time = trigger_time
self$search_time = search_time
self$allowed_gap = allowed_gap
self$pre_trigger_time = pre_trigger_time
self$boot_time = boot_time
self$noise_up_time = noise_up_time
self$noise_down_time = noise_up_time
self$noise_reduction_amount = noise_reduction_amount
self$measure_freq = measure_freq
self$measure_duration = measure_duration
self$measure_smooth_time = measure_smooth_time
self$hp_filter_freq = hp_filter_freq
self$lp_filter_freq = lp_filter_freq
self$hp_lifter_freq = hp_lifter_freq
self$lp_lifter_freq = lp_lifter_freq
},
forward = function(waveform) {
return(functional_vad(
waveform=waveform,
sample_rate=self$sample_rate,
trigger_level=self$trigger_level,
trigger_time=self$trigger_time,
search_time=self$search_time,
allowed_gap=self$allowed_gap,
pre_trigger_time=self$pre_trigger_time,
boot_time=self$boot_time,
noise_up_time=self$noise_up_time,
noise_down_time=self$noise_up_time,
noise_reduction_amount=self$noise_reduction_amount,
measure_freq=self$measure_freq,
measure_duration=self$measure_duration,
measure_smooth_time=self$measure_smooth_time,
hp_filter_freq=self$hp_filter_freq,
lp_filter_freq=self$lp_filter_freq,
hp_lifter_freq=self$hp_lifter_freq,
lp_lifter_freq=self$lp_lifter_freq
))
}
)
|
a3eb9b63b553488218dfe4e96c8c9ddadec33c82
|
fa44e7a7a231c54078a56ead85da8cd26eef2e64
|
/Codici/04 - Veneto/01.1 - Creazione frontiera/Codice/Functions.R
|
9f2f9d358c6eb1e418ee816b604247d1fb3fbe5a
|
[] |
no_license
|
GabrieleMazza/TesiDiLaurea
|
1018d2d7aeaba3894f4042488a04f8923bb0a759
|
952e34355c2e718f180c4b79b55f1e48af33c887
|
refs/heads/master
| 2021-01-23T07:21:28.876038
| 2015-04-24T13:38:10
| 2015-04-24T13:38:10
| 26,230,512
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 13,332
|
r
|
Functions.R
|
# FUNZIONI DI APPOGGIO
SquareEuclideanDistance = function (p1,p2)
{
dist=sqrt((p1[1]+p2[1])^2+(p1[1]+p2[1])^2)
return(dist)
}
CleanPoints = function (Triang_old, IDDelete, x_old, y_old)
{
Triang<-Triang_old[-IDDelete,]
N=length(x_old)
#Creo un vettore di FALSE
#Se un punto è ritrovato nella triangolazione, ci metto TRUE
ID<-NULL
for(i in 1:N)
{
ID<-c(ID,FALSE)
}
for(i in 1:dim(Triang)[1])
{
for(j in 1:3)
{
ID[Triang[i,j]]=TRUE
}
}
Corrispondenze<-NULL
x<-NULL
y<-NULL
count=0
for(i in 1:length(x_old))
{
if(ID[i]==FALSE)
{
Corrispondenze<-c(Corrispondenze,NA)
} else
{
count=count+1
x<-c(x,x_old[i])
y<-c(y,y_old[i])
Corrispondenze<-c(Corrispondenze,count)
}
}
#Ora devo far passare tutta la vecchia triangolazione e riscriverla con tutto quello
#che serve
rangei=1:dim(Triang_old)[1]
rangei<-rangei[-IDDelete]
Triang<-NULL
points<-cbind(x,y)
for (i in rangei)
{
newtriangle<-c(0,0,0)
for (j in 1:3)
{
newtriangle[j]=Corrispondenze[Triang_old[i,j]]
}
Triang<-rbind(Triang,newtriangle)
}
row.names(Triang)<-NULL
#Ora dovrebbe essere tutto ok
L<-list(x,y,Triang)
return(L)
}
Intersections = function(x,y)
{
# Studio se un poligono è semplice o complesso
# Nel caso in cui sia complesso, restituisce gli indici delle coppie di
Intersect<-NULL
if(length(x)!=length(y))
{
stop('Lengths of vectors is not the same')
}
if(length(x)<=3)
{
return(Intersect)
}
for(i in 1:(length(x)-2))
{
if(i==1)
{
#Non vado fino all'ultimo segmento..
maximumj=length(x)-1
} else
{
maximumj=length(x)
}
for(j in (i+2):maximumj)
{
#Inanzitutto cerco il punto di arrivo del lato che parte da j
ind=j+1
if(j==length(x))
{
ind=1
}
#### CASO DI SEGMENTI ENTRAMBI VERTICALI ####
if((x[i]==x[i+1])&&(x[j]==x[ind]))
{
#Cioè se entrambi i segmenti sono verticali, si hanno due segmenti
#Paralleli
#Controllo che si intersecano
if(x[i]==x[j])
{
#Devono avere la stessa x
#E controllo se le y si intersecano
if(y[i]>y[i+1])
{
yimax=y[i]
yimin=y[i+1]
} else
{
yimax=y[i+1]
yimin=y[i]
}
if(y[j]>y[ind])
{
yjmax=y[j]
yjmin=y[ind]
} else
{
yjmax=y[ind]
yjmin=y[j]
}
#Si intersecano se, scelto il più alto, si ha
if (yimax>=yjmax)
{
if(yjmax>yimin)
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Segmenti sovrapposti"))
} else
{
if(yjmax==yimin)
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione"))
}
}
} else
{
if(yimax>yjmin)
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Segmenti sovrapposti"))
} else
{
if(yimax==yjmin)
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione"))
}
}
}
}
} else
{
#### CASO DI SEGMENTO CHE PARTE DA i VERTICALE ####
if(x[i]==x[i+1])
{
#Controllo che il segmento j lo intersechi
if(x[j]>x[ind])
{
xjmax=x[j]
xjmin=x[ind]
} else
{
xjmax=x[ind]
xjmin=x[j]
}
if((x[i]>xjmin)&&(x[i]<xjmax))
{
#Ok, è compreso. Contollo che ci siano intersezioni
#Ricavo la retta che passa per il segmento j
mj=(y[j]-y[ind])/(x[j]-x[ind])
qj=y[j]-mj*x[j]
yint=mj*x[i]+qj
#Questa yint appartiene al segmento?
if(y[i]>y[i+1])
{
yimax=y[i]
yimin=y[i+1]
} else
{
yimax=y[i+1]
yimin=y[i]
}
if((yint>=yimin)&&(yint<=yimax))
{
#Se vale con il maggiore, intersezione
if((yint>yimin)&&(yint<yimax))
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione"))
} else
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione ad un estremo"))
}
}
}
} else
{
#### CASO DI SEGMENTO CHE PARTE DA j VERTICALE ####
if(x[j]==x[ind])
{
#Controllo che il segmento j lo intersechi
if(x[i]>x[i+1])
{
ximax=x[i]
ximin=x[i+1]
} else
{
ximax=x[i+1]
ximin=x[i]
}
if((x[j]>ximin)&&(x[j]<ximax))
{
#Ok, è compreso. Contollo che ci siano intersezioni
#Ricavo la retta che passa per il segmento i
mi=(y[i]-y[i+1])/(x[i]-x[i+1])
qi=y[i]-mi*x[i]
yint=mi*x[j]+qi
#Questa yint appartiene al segmento?
if(y[j]>y[ind])
{
yjmax=y[j]
yjmin=y[ind]
} else
{
yjmax=y[ind]
yjmin=y[j]
}
if((yint>=yjmin)&&(yint<=yjmax))
{
#Se vale con il maggiore, intersezione
if((yint>yjmin)&&(yint<yjmax))
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione"))
} else
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione ad un estremo"))
}
}
}
} else
{
#Controllo se si intersecano i lati che partono dal punto i e dal punto j
#Ricavo la retta che passa per il lato che parte da i
mi=(y[i]-y[i+1])/(x[i]-x[i+1])
qi=y[i]-mi*x[i]
#Ricavo la retta del lato che parte da j
mj=(y[j]-y[ind])/(x[j]-x[ind])
qj=y[j]-mj*x[j]
#Cerco l'intersezione
if(mi!=mj)
{
#Delta
Delta=mi-mj
xint=-(qi-qj)/Delta
#yint=-(mi*qj-mj*qi)/Delta
#Basta la x, controllo che sia compresa in entrambi i segmenti
if(x[i]>x[i+1])
{
ximax=x[i]
ximin=x[i+1]
} else
{
ximax=x[i+1]
ximin=x[i]
}
if(x[j]>x[ind])
{
xjmax=x[j]
xjmin=x[ind]
} else
{
xjmax=x[ind]
xjmin=x[j]
}
if((xint>=ximin)&&(xint<=ximax)&&(xint>=xjmin)&&(xint<=xjmax))
{
#Se vale con il maggiore l'intersezione è interna
if ((xint>ximin)&&(xint<ximax)&&(xint>xjmin)&&(xint<xjmax))
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione"))
} else
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione ad un estremo"))
}
}
} else
{
#Allora sono paralleli. Ma si intersecano?
#Escludo il caso in cui siano paralleli ma distanti
if(qi==qj)
{
#E controllo se le y si intersecano
if(x[i]>x[i+1])
{
ximax=x[i]
ximin=x[i+1]
} else
{
ximax=x[i+1]
ximin=x[i]
}
if(x[j]>x[ind])
{
xjmax=x[j]
xjmin=x[ind]
} else
{
xjmax=x[ind]
xjmin=x[j]
}
#Si intersecano se, scelto il più alto, si ha
if (ximax>=xjmax)
{
if(xjmax>ximin)
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Segmenti sovrapposti"))
} else
{
if(xjmax==ximin)
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione"))
}
}
} else
{
if(ximax>xjmin)
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Segmenti sovrapposti"))
} else
{
if(ximax==xjmin)
{
Intersect<-rbind(Intersect,c(i,i+1,j,ind,"Intersezione"))
}
}
}
}
}
}
}
}
}
}
return(Intersect)
}
BorderTriangles = function (Triangulation,Boundaries)
{
firstindex=Boundaries[1,1]
ReturnTriangles<-NULL
for(i in 1:dim(Triangulation)[1])
{
count<-0
for(j in 1:3)
{
if(Triangulation[i,j]>=firstindex)
{
count<-count+1
}
}
if(count==3)
{
ReturnTriangles<-c(ReturnTriangles,i)
}
}
return(ReturnTriangles)
}
|
f22d2ce77413d0d01308b67dfb9e9d2813d4140b
|
9b97439cf690d1ee155625b954984115ea2d30d6
|
/Exploratory DA/plot2.R
|
59f74cd9acf86006c0709311866f9c8534a7baf5
|
[] |
no_license
|
skdery/datasciencecoursera
|
b2dfc405bb360fff84833dd12e2fb0e7ac4f0531
|
f33c711ad1b671bf0e0df319b3605b67fe4a1b38
|
refs/heads/master
| 2020-05-31T15:07:18.769269
| 2015-01-25T17:45:22
| 2015-01-25T17:45:22
| 27,982,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,133
|
r
|
plot2.R
|
## This program uses data from the National Emissions Inventory (NEI) to answer the question:
## Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
## Use the base plotting system to make a plot answering this question.
##
## set working directory
setwd("~/Documents/Courses/Coursera/Exploratory Data Analysis/Assignment and Project/exdata-data-NEI_data")
# read the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Have total emissions from PM2.5 decreased in Baltimore City, Maryland from 1999 to 2008?
# Select only data from Baltimore City
Balti.emissions <- NEI[NEI$fips=="24510",]
# group emissions by year
Balti.emissions.year <- aggregate(Emissions ~ year, Balti.emissions, sum)
png('plot2.png')
barplot(height=Balti.emissions.by.year$Emissions,
names.arg=Balti.emissions.year$year,
xlab="years", ylab=expression('Total PM'[2.5]*' emission'),
main=expression('Total PM'[2.5]*' emissions in Baltimore City, '*
'Maryland from 1999 - 2008'), col ="blue")
dev.off()
|
6fa73cd5d2f1b2896bf4a5eff942931c12c38f97
|
91a3c67cf3ffb822d6978c36eda17b9df6b602dc
|
/RCode/plotDiffCombinedResults.R
|
3cf0a56caf0085804e3ea82dea90c0dd816d2c43
|
[] |
no_license
|
bhklab/SkinFibrosis
|
eab2a6d355923c18067f391cb4be95857b832f72
|
05bc8b8d2f345214daf8b4c0602fdeb4912af2eb
|
refs/heads/master
| 2021-03-27T10:11:01.847777
| 2020-04-24T18:11:17
| 2020-04-24T18:11:17
| 76,301,956
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 958
|
r
|
plotDiffCombinedResults.R
|
plotDiffCombinedResults <- function() {
load("../Output/qSig850-diffExpAnalysis.RData")
iix <- which(rownames(res)=="caffeic acid")
resdiff <- res
load("../Output/qSig-KEGG.RData")
which(rownames(res)=="caffeic acid")
# # #[1] 307
resCombined <- res
#
ii <- match(rownames(resdiff), rownames(resCombined))
all(rownames(resCombined)[ii] == rownames(resdiff))
resCombined <- resCombined[ii,]
comb <- cbind(resdiff[,"Connectivity"], resCombined[,"Connectivity"])
colnames(comb) <- c("resDiff","resCombined")
iix <- which(rownames(comb)=="caffeic acid")
sum(comb[,1]>0) ## 267
sum(comb[,2]>0) ## 491
i1 <- comb[,1]>0
i2 <- comb[,2]>0
ix <- i1 & i2
topdiffandcombined <- comb[ix,]
######
rownames(comb)[iix]
cafacid <- comb[iix,]
comb <- comb[-iix,]
pdf("../Output/resDiffCombined-qSig850-ppar-glyco.pdf")
plot(comb, xlab = "conn score (human diff exps)", ylab = "conn score (ppar-glyco)", ylim=c(-0.6,0.6), xlim=c(-0.6,0.6))
dev.off()
}
|
3a8f00213e358d5576d98064560c5bb31fa779ed
|
d2753667d25bd6a6052c56227aea3fc0dcdbed04
|
/randomization/Block Randomization - Stratified and assigned to dataset.R
|
89bcad0f88ce079c099efd5f88ab1521da5d4d28
|
[] |
no_license
|
tkappen/MiscCodeR
|
e73f47aaab7e6c81f7f1e7851f736ccd036cc6bc
|
614a93852f1d0c3be4a0c33475eaafe484527ae4
|
refs/heads/master
| 2021-01-20T19:40:23.919224
| 2020-06-30T15:31:53
| 2020-06-30T15:31:53
| 60,108,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 969
|
r
|
Block Randomization - Stratified and assigned to dataset.R
|
#############################################################################
# Randomize cohort per stratum as blocks and assign the value to the dataset
#############################################################################
library(blockrand)
library(dplyr)
library(tidyr)
# Creat function that has a set of records as input and
# returns the appropriate length of randomization
# This is a set of records for a single stratum
block.assign <- function(x, ...) {
if(length(dim(x))!=2) stop('not the right number of dimensions')
n <- dim(x)[1]
block <- blockrand(n, ...)
return(block[1:n,])
}
# Apply function to each stratum and then unnest
set.seed(3)
dx <- d %>%
group_by(staffrole,specialty) %>%
select(vunetid,EmailAddress) %>%
do(vunetid = .$vunetid, EmailAddress = .$EmailAddress, treatment = block.assign(., levels=c('email','aims'),
block.sizes = 2)) %>%
unnest()
# Check randomization
table(dx$treatment, dx$staffrole, dx$specialty)
|
03f2e9ae2f864415aba86a441f4d29454d1d089f
|
519de33eed25dab10c39472442dc733cc63e4dd5
|
/plot4.R
|
fb077aabbe1a630ec4c8ca20db95540d291f8c9e
|
[] |
no_license
|
gdaliva/ExData_Plotting1
|
2dd45192f63cd567372b158e03207028a80e7716
|
bb16102037fd090270faa5dbb28f6e75b78e5e50
|
refs/heads/master
| 2021-08-07T19:13:30.497183
| 2017-11-08T20:11:56
| 2017-11-08T20:11:56
| 108,753,201
| 0
| 0
| null | 2017-10-29T16:58:44
| 2017-10-29T16:58:44
| null |
UTF-8
|
R
| false
| false
| 804
|
r
|
plot4.R
|
par(mfcol=c(2,2))
plot(data2$DateTime, as.numeric(as.character(data2$Global_active_power)),type='l',ylab="Global Active Power", xlab="")
plot(data2$DateTime, as.numeric(as.character(data2$Sub_metering_1)),type='l', xlab="",ylab ="Energy sub metering")
lines(data2$DateTime, as.numeric(as.character(data2$Sub_metering_2)),type='l', col='red')
lines(data2$DateTime, data2$Sub_metering_3,type='l', col="blue")
legend('topright', c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1,1),col=c("black","red","blue"))
plot(data2$DateTime, as.numeric(as.character(data2$Voltage)),type='l', ylab="Voltage",xlab="datetime" )
plot(data2$DateTime, as.numeric(as.character(data2$Global_reactive_power)),type='l', ylab="Global_reactive_power",xlab="datetime" )
dev.copy(png, file = "plot4.png")
dev.off()
|
d1b337def185c03f412ac2863aeffe98a84bb997
|
9333d3d6013f8745635590278330e899feaca70e
|
/plot2.R
|
626502b3813f6f704ba8fd878e68555e3348a1b1
|
[] |
no_license
|
jasanglay/ExData_Plotting1
|
11cb8744a3ce19f908912f1a2cdf0bc332d28df5
|
6a73056fd41140625550a43c0be140e6177d372e
|
refs/heads/master
| 2021-03-19T15:31:41.519632
| 2018-02-27T14:59:13
| 2018-02-27T14:59:13
| 123,110,865
| 0
| 0
| null | 2018-02-27T10:09:54
| 2018-02-27T10:09:53
| null |
UTF-8
|
R
| false
| false
| 706
|
r
|
plot2.R
|
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipfile <- "./Data.zip"
txtfile <- "./Data/household_power_consumption.txt"
download.file(fileUrl,zipfile)
unzip(zipfile,overwrite = TRUE,exdir = "./Data")
alldata <- read.table(txtfile,sep = ";",header = TRUE)
pdata <- rbind(alldata[as.character(alldata$Date) == "1/2/2007",],alldata[as.character(alldata$Date) == "2/2/2007",])
png(filename = "plot2.png")
plot(strptime(paste(as.character(pdata$Date),as.character(pdata$Time),sep = " "),"%d/%m/%Y %H:%M:%S"),as.numeric(as.character(pdata$Global_active_power)),type = "l",col = "black",xlab = " ",ylab = "Global Active Power (kilowatts)")
dev.off()
|
e0f8018bf7ae8825257f43bc7352cd460ac5972d
|
c3352c6e4471c5e7c8682825b84dd8270b31507d
|
/R/mesh-plot.R
|
07437ed2fb5315381ad195e078035265000fd668
|
[] |
no_license
|
MilesMcBain/quadmesh
|
2f6f7a17276a55ffb74fc2fdfa39d12a41dd54b3
|
712dfa1d9c05d936222c0e5768866ed118690623
|
refs/heads/master
| 2020-04-07T04:44:14.081500
| 2018-11-18T09:57:59
| 2018-11-18T09:57:59
| 158,069,412
| 1
| 0
| null | 2018-11-18T09:49:24
| 2018-11-18T09:49:24
| null |
UTF-8
|
R
| false
| false
| 7,126
|
r
|
mesh-plot.R
|
scl <- function(x) {
rg <- range(x, na.rm = TRUE);
(x - rg[1])/diff(rg)
}
#' Plot as a mesh
#'
#' Convert to a quadmesh and plot in efficient vectorized form using 'grid'.
#'
#' The mesh may be reprojected prior to plotting using the 'crs' argument to
#' define the target map projection in 'PROJ string' format. (There is no
#' "reproject" function for quadmesh, this is performed directly on the x-y
#' coordinates of the 'quadmesh' output). The 'colfun' argument is used to
#' generate colours which are mapped to the input object data as in 'image'.
#' @param x object to convert to mesh and plot
#' @param crs target map projection
#' @param colfun colour function to use, `viridis` is the default
#' @param add add to existing plot or start a new one
#' @param ... ignored
#'
#' @return nothing, used for the side-effect of creating or adding to a plot
#' @export
#'
#' @examples
#' mesh_plot(worldll)
#' ## crop otherwise out of bounds from PROJ
#' mesh_plot(raster::crop(worldll, raster::extent(-179, 179, -89, 89)), crs = "+proj=laea")
#' mesh_plot(worldll, crs = "+proj=moll")
#' prj <- "+proj=lcc +datum=WGS84 +lon_0=147 +lat_0=-40 +lat_1=-55 +lat_2=-20"
#' mesh_plot(etopo, crs = prj, add = FALSE, colfun = function(n = 20) grey(seq(0, 1, length = n)))
#' mesh_plot(worldll, crs = prj, add = TRUE)
mesh_plot <- function(x, crs = NULL, colfun = NULL, add = FALSE, ...) {
UseMethod("mesh_plot")
}
#' @name mesh_plot
#' @export
mesh_plot.BasicRaster <- function(x, crs = NULL, colfun = NULL, add = FALSE, ...) {
print("converting to single RasterLayer")
mesh_plot(x[[1]])
}
#' @name mesh_plot
#' @export
mesh_plot.RasterLayer <- function(x, crs = NULL, colfun = NULL, add = FALSE, ...) {
qm <- quadmesh::quadmesh(x, na.rm = FALSE)
if (is.null(colfun)) colfun <- viridis::viridis
ib <- qm$ib
xy <- t(qm$vb[1:2, ])
isLL <- raster::isLonLat(x)
if (!is.null(crs) ) {
if (!isLL) {
xy <- proj4::project(xy, raster::projection(x), inv = TRUE)
}
if (!raster::isLonLat(crs)) xy <- proj4::project(xy, crs)
}
## we have to remove any infinite vertices
## as this affects the entire thing
bad <- !is.finite(xy[,1]) | !is.finite(xy[,2])
## but we must identify the bad xy in the index
if (any(bad)) ib <- ib[,-which(bad)]
xx <- xy[c(ib),1]
yy <- xy[c(ib),2]
## we need a identifier grouping for each 4-vertex polygon
id <- rep(seq_len(ncol(ib)), each = nrow(ib))
## we also have to deal with any values that are NA
## because they propagate to destroy the id
cols <- colfun(100)[scl(values(x)) * 99 + 1]
if (any(is.na(cols))) {
colsna <- rep(cols, each = nrow(ib))
bad2 <- is.na(colsna)
xx <- xx[!bad2]
yy <- yy[!bad2]
id <- id[!bad2]
cols <- cols[!is.na(cols)]
}
x <- list(x = xx, y = yy, id = id, col = cols)
if (!add) {
graphics::plot.new()
graphics::plot.window(xlim = range(x$x, finite = TRUE), ylim = range(x$y, finite = TRUE), asp = if (isLL) 1/cos(mean(x$y, na.rm = TRUE) * pi/180) else 1 )
}
vps <- gridBase::baseViewports()
grid::pushViewport(vps$inner, vps$figure, vps$plot)
grid::grid.polygon(x$x, x$y, x$id, gp = grid::gpar(col = NA, fill = x$col),
default.units = "native")
grid::popViewport(3)
invisible(NULL)
}
## still not working right, triangulating the centres works but triangulating the quads makes a mush
# # @name mesh_plot
# # @export
# # @importFrom grDevices grey
# # @examples
# # f = normalizePath("~/Git/rasterwise/extdata/get1index_64/test.nc")
# # library(stars)
# # x <- read_stars(f, curvilinear = c("lon", "lat"))
# # mesh_plot(x, qtile = 56)
# # # mesh_plot(x, colfun = palr::sstPal, qtile = 67)
# # # mesh_plot(x, colfun = palr::sstPal, qtile = 67, crs = "+proj=laea +lat_0=-30")
# mesh_plot.stars <- function(x, crs = NULL, colfun = NULL, add = FALSE, ..., qtile = FALSE) {
# if (is.null(colfun)) colfun <- function(n) grDevices::grey(seq(0, 1, length.out = n))
# ## whoa, we might not be curvilinear
# if (is.null(st_dimensions(x)$x$values) || is.null(st_dimensions(x)$x$values)) {
# stop("not a curvilinear stars object")
# }
# if (is.null(dim(st_dimensions(x)$x$values)) || is.null(dim(st_dimensions(x)$x$values))) {
# ## looks rectilinear
# coords <- as.matrix(expand.grid(st_dimensions(x)$x$values, st_dimensions(x)$y$values))
# } else {
# ## looks curvilinear
# coords <- cbind(as.vector(st_dimensions(x)$x$values),
# as.vector(st_dimensions(x)$y$values))
# } # else
# ## fail, we need helpers for affine grids to values ...
#
# if (!is.null(crs)) {
# coords <- rgdal::project(coords, crs) ## assume forwards
# }
#
# # tri1 <- RTriangle::triangulate(RTriangle::pslg(P = cbind(rep(seq_len(nrow(x)), ncol(x)),
# # rep(seq_len(ncol(x)), each = nrow(x)))))
#
# tri <- list(T = t(triangulate_quads(quadmesh(raster::raster(extent(0, ncol(x) - 1, 0, nrow(x)-1), nrows = nrow(x)-1,
# ncols = ncol(x)-1))$ib)))
# XY <- coords[t(tri$T), ]
# #XY <- coords[t(tri), ]
# ID <- rep(1:nrow(tri$T), each = 3)
# one_slice <- function(x) {
# xx <- x[[1]]
# ## might need de-uniting here
# dm <- dim(xx)
# if (length(dm) > 2) xx <- xx[,,1, drop = TRUE]
# xx
# }
# ## watch out here, we need a triangle vertex to get the colours in the right order
# vals <- as.vector(one_slice(x))
# if (qtile > 0) {
# if (isTRUE(qtile)) qtile <- 12
# vals <- findInterval(vals, quantile(vals, prob = seq(0, 1, length = qtile)))
# }
# COL <- colfun(27)[scales::rescale(vals[tri$T[,1]], c(1, 12))]
# isLL <- !is.null(crs) ## assume it is
# if (!add ) {
# plot(cbind(range(coords[,1]), range(coords[,2])), type = "n", asp = if (isLL) 1/cos(mean(coords[,2]) * pi/180) else 1,
# xlab = "", ylab = "")
# }
# vps <- gridBase::baseViewports()
# grid::pushViewport(vps$inner, vps$figure, vps$plot)
# grid::grid.polygon(XY[,1], XY[,2], ID, gp = grid::gpar(col = NA, fill = COL),
# default.units = "native")
# grid::popViewport(3)
# #mm <- rnaturalearth::ne_countries(scale = "medium", returnclass="sp")
# #if (!is.null(crs)) mm <- sp::spTransform(mm, crs)
# #sp::plot(mm, add = TRUE, border = 'firebrick')
#
# }
# coord_plot <- function(x, coords) {
# ## triangulate the index (rather than quadmesh)
#
# tri <- RTriangle::triangulate(RTriangle::pslg(P = cbind(raster::colFromCell(x, seq_len(raster::ncell(x))),
# raster::rowFromCell(x, seq_len(raster::ncell(x))))))
#
#
# o <- structure(list(vb = t(cbind(coords, raster::values(x), 1)),
# it = t(tri$T),
# primitivetype = "triangle",
# material = list(),
# normals = NULL,
# texcoords = NULL), class = c("mesh3d", "shape3d"))
#
# rgl::open3d()
# cols <- viridis::viridis(56)[scales::rescale(o$vb[3,o$it], c(1, 56))]
#
# #print(str(cols))
# rgl::shade3d(o, col = cols)
# rgl::aspect3d(1, 1, .0001)
#
# invisible(o)
# }
|
a71c1d08cf4c87feb1aafa974c5f5d2d1c4f196c
|
0e84ee8922b96bd526883e3b7dcab258c278d84e
|
/R/data_consumption_income.R
|
a647f93b2f895f56a09b1d9ba2ef246bbceef1f6
|
[] |
no_license
|
zhaoxue-xmu/RDA
|
8f9f68620d9c1393c66e0efd1c9ccda7e1008ad6
|
ea8ed94680c1964f491bbbe17e22c9a52659f39c
|
refs/heads/master
| 2021-01-17T16:00:27.495392
| 2017-03-24T15:42:06
| 2017-03-24T15:42:06
| 82,945,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
data_consumption_income.R
|
#'Dataset of income and consumption in chapter12
#'
#'A dataset containing year,consumption and income 2 variables of 36 objects
#'
#'@format a dataframe with 36 rows and 3 variables
#'\describe{
#' \item{Y}{year}
#' \item{C}{consumption}
#' \item{Y}{income}
#'}
"consumption_income"
|
a24e1d0c083503c9a7628dc3cea570a412888dc4
|
b735abf24f9d9f86f2b00041b860674a888ca69f
|
/model/predict_returns.R
|
441b4db37e19f84949d458ff76ed589229fd8f4f
|
[] |
no_license
|
hanlin891016/trade-the-tweet
|
1a7c30afd2cb57dd95097f34ecdcbc629cc94a85
|
9beebd74530f049220da02a2fc81c6b5e2ffbbd0
|
refs/heads/master
| 2021-06-03T03:56:42.057528
| 2016-08-20T03:42:04
| 2016-08-20T03:42:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 678
|
r
|
predict_returns.R
|
predict_returns <- function(Y, returns, y) {
# Given historical matrix of word frequencies and historical returns, computes
# the predicted return
#
# Args:
# Y: weighted term-document matrix
# returns: the matrix of historical returns
# y: the vector of term frequencies we'd like to predict
#
# Returns:
# A vector of predicted returns
source('solve_Y.R')
library(tm)
# hard-coded hyper parameters #
d = 10
a = .4
lambda = a
mu = 1-a
rho = 10
###############################
model.output <- learn_UW(trainR,trainY,d, lambda, mu, rho)
predicted.return <- model.output$U %*% model.output$W %*% y
return(predicted.return)
}
|
7c149f195e310396aea0f3a1af648d352c537710
|
8cf4416f7e4c9016d85a616aaae3fbf0d48cf9a4
|
/r/Old/Sparrow20090803.r
|
e6b72021ed576380c2c3243ae87a2eec57b15b5a
|
[] |
no_license
|
willbmisled/MRB1
|
35f9bb4ef9279f55b1348b8b3fbda6543ddbc70d
|
af39fb697255df15ae41131d76c6fcf552a55a70
|
refs/heads/master
| 2020-07-20T08:43:00.460675
| 2017-06-14T14:09:38
| 2017-06-14T14:09:38
| 94,337,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,229
|
r
|
Sparrow20090803.r
|
rm(list=ls(all=T)) #clear workspace
# Read data-****Make Sure the Path Is Correct****
require(RODBC) #Package RODBC must be installed
con <- odbcConnectAccess("//AA.AD.EPA.GOV/ORD/NAR/USERS/EC2/wmilstea/Net MyDocuments/EPA/Data/Sparrow/MRB1Sparrow/MRB1Sparrow.mdb")
get <- sqlQuery(con, "
SELECT MRB1_WBIDLakes.WB_ID, tblJoinNLAID_WBID.NLA_ID, Round([AlbersAreaM],0) AS area,
Round([AlbersAreaM]*[DEPTHMAX]/3,0) AS ConeVolume, tblGISLakeVolume.GISVol AS GISVolume,
tblSparrowLoads.OutflowM3_yr AS Outflow,
tblNLA_AnalysisTeamData20090421.DEPTHMAX, [Area]/[ConeVolume] AS z_Cone, [Area]/[GISVolume] AS z_GIS,
[OutflowM3_yr]/[ConeVolume] AS rho_Cone, [OutflowM3_yr]/[GISVolume] AS rho_GIS, 1/[rho_Cone] AS tau_Cone,
1/[rho_GIS] AS tau_GIS, tblNLA_WaterQualityData.NTL AS TN, [N_Load_kg_yr]*1000000/[AlbersAreaM] AS LN,
1000*[N_Conc_Load_mg_l] AS CN, tblNLA_WaterQualityData.PTL AS TP,
[P_Load_kg_yr]*1000000/[AlbersAreaM] AS LP, 1000*[P_Conc_Load_mg_l] AS CP,
1000*[P_Conc_Outflow_mg_l] AS CP_out, tblNLA_WaterQualityData.CHLA, tblNLA_WaterQualityData.SECMEAN
FROM ((((MRB1_WBIDLakes INNER JOIN tblJoinNLAID_WBID ON MRB1_WBIDLakes.WB_ID = tblJoinNLAID_WBID.WB_ID) INNER JOIN tblNLA_AnalysisTeamData20090421 ON tblJoinNLAID_WBID.NLA_ID = tblNLA_AnalysisTeamData20090421.SITEID) INNER JOIN tblSparrowLoads ON MRB1_WBIDLakes.WB_ID = tblSparrowLoads.WB_ID) INNER JOIN tblNLA_WaterQualityData ON (tblNLA_AnalysisTeamData20090421.VISITNO = tblNLA_WaterQualityData.VISIT_NO) AND (tblNLA_AnalysisTeamData20090421.SITEID = tblNLA_WaterQualityData.SITE_ID)) INNER JOIN tblGISLakeVolume ON MRB1_WBIDLakes.WB_ID = tblGISLakeVolume.WBID
WHERE (((tblSparrowLoads.N_Percent)=1) AND ((tblNLA_AnalysisTeamData20090421.VISITNO)=1))
")
MRB1<-data.frame(get)
close(con)
attach(MRB1)
logTP=log10(TP)
logCP=log10(CP)
logCP_out=log10(CP_out)
logTN=log10(TN)
logCN=log10(CN)
logCHLA=log10(CHLA)
logSECMEAN=log10(SECMEAN)
NPRatio=TN/TP
plot.new()
par(mfrow=c(2,2))
#Compare Observed NLA TN with Sparrow N concentration
test=summary(lm(logTN~logCN))
plot(logCN, logTN, xlab="Sparrow Observed Log Nitrogen Load Concentration", ylab="NLA Measured Log Total Nitrogen")
title(main = "NLA TN vs. Sparrow CN",
sub=paste('Without Volume Estimate; r-squared=',round(test$r.squared,4)))
abline(test, lwd=2)
#Estimate TN from Sparrow CN with Conic Volume
#Welch & Jacoby Fig 7.1 P.180-Nitrogen Load Concentration logPN=log10(CN)/(1+(1.17*tau_Cone**.45))
#Estimate parameters
estimate <- nls(logTN ~ log10(CN)/(1+(beta1*tau_Cone**beta2)),
start=list(beta1 = 1.17, beta2 = .45), trace=T)
keep=summary(estimate)
keep
#Add parameter estimates to model
One=keep$coefficients[1,1] #beta1
Two=keep$coefficients[2,1] #beta2
#Predict log Total Nitrogen (LogPN) from Sparrow Concentration
logPN=log10(CN)/(1+(One*tau_Cone**Two))
test=summary(lm(logTN~logPN))
plot(logPN, logTN, xlab="Sparrow Predicted Log Total Nitrogen", ylab="NLA Measured Log Total Nitrogen")
abline(lm(logTN~logPN), lwd=2)
title(main = paste("log10(CN)/(1+(",round(One,2),"*HRT**", round(Two,2),"))"),
sub=paste('With Conic Volume Estimate; r-squared=',round(test$r.squared,4)))
#Estimate TN from Sparrow CN with GIS Volume
#Welch & Jacoby Fig 7.1 P.180-Nitrogen Load Concentration logPN=log10(CN)/(1+(1.17*tau_GIS**.45))
#Estimate parameters
estimate <- nls(logTN ~ log10(CN)/(1+(beta1*tau_GIS**beta2)),
start=list(beta1 = 1.17, beta2 = .45), trace=T)
keep=summary(estimate)
keep
#Add parameter estimates to model
One=keep$coefficients[1,1] #beta1
Two=keep$coefficients[2,1] #beta2
#Predict log Total Nitrogen (LogPN) from Sparrow Concentration
logPN=log10(CN)/(1+(One*tau_GIS**Two))
test=summary(lm(logTN~logPN))
plot(logPN, logTN, xlab="Sparrow Predicted Log Total Nitrogen", ylab="NLA Measured Log Total Nitrogen")
abline(lm(logTN~logPN), lwd=2)
title(main = paste("log10(CN)/(1+(",round(One,2),"*HRT**", round(Two,2),"))"),
sub=paste('With GIS Volume Estimate; r-squared=',round(test$r.squared,4)))
plot.new()
par(mfrow=c(2,2))
#Compare Observed NLA TP with Sparrow P outflow concentration
test=summary(lm(logTP~logCP_out), lwd=2)
plot(logCP_out, logTP, xlab="Sparrow Observed Log Phosporus Outflow Concentration", ylab="NLA Measured Log Total Phosporus")
title(main = "NLA TP vs. Sparrow CP_out",
sub=paste('Without Volume Estimate; r-squared=',round(test$r.squared,4)))
abline(test, lwd=2)
#Estimate TP from Sparrow CP_out with Conic Volume
#Welch & Jacoby Fig 7.1 P.180-Phosporus Load Concentration logPP=log10(CP_out)/(1+(1.17*tau_Cone**.45))
#Estimate parameters
estimate <- nls(logTP ~ log10(CP_out)/(1+(beta1*tau_Cone**beta2)),
start=list(beta1 = 1.17, beta2 = .45), trace=T)
keep=summary(estimate)
keep
#Add parameter estimates to model
One=keep$coefficients[1,1] #beta1
Two=keep$coefficients[2,1] #beta2
#Predict log Total Phosporus (LogPP) from Sparrow Concentration
logPP=log10(CP_out)/(1+(One*tau_Cone**Two))
test=summary(lm(logTP~logPP))
plot(logPP, logTP, xlab="Sparrow Predicted Log Total Phosporus", ylab="NLA Measured Log Total Phosporus")
abline(lm(logTP~logPP), lwd=2)
title(main = paste("log10(CP_out)/(1+(",round(One,2),"*HRT**", round(Two,2),"))"),
sub=paste('With Conic Volume Estimate; r-squared=',round(test$r.squared,4)))
#Estimate TP from Sparrow CP_out with GIS Volume
#Welch & Jacoby Fig 7.1 P.180-Phosporus Load Concentration logPP=log10(CP_out)/(1+(1.17*tau_GIS**.45))
#Estimate parameters
estimate <- nls(logTP ~ log10(CP_out)/(1+(beta1*tau_GIS**beta2)),
start=list(beta1 = 1.17, beta2 = .45), trace=T)
keep=summary(estimate)
keep
#Add parameter estimates to model
One=keep$coefficients[1,1] #beta1
Two=keep$coefficients[2,1] #beta2
#Predict log Total Phosporus (LogPP) from Sparrow Concentration
logPP=log10(CP_out)/(1+(One*tau_GIS**Two))
test=summary(lm(logTP~logPP))
plot(logPP, logTP, xlab="Sparrow Predicted Log Total Phosporus", ylab="NLA Measured Log Total Phosporus")
abline(lm(logTP~logPP), lwd=2)
title(main = paste("log10(CP_out)/(1+(",round(One,2),"*HRT**", round(Two,2),"))"),
sub=paste('With GIS Volume Estimate; r-squared=',round(test$r.squared,4)))
#From Ken Reckhow Eutromod
logTP_mgl=log10(TP/1000)
TP_mgl=TP/1000
Pin=CP_out/1000 #CP_out=outflow P conc. in ug/l-convert to mg/l
hrt=tau_GIS #hydraulic residence time
z=z_GIS #mean depth
c1 = 12.26; c2 = -.55; c3=-.16; c4=.5 #coefficients from Eutromod
spam=data.frame(logTP_mgl,TP_mgl,Pin,hrt,z)
#test Eutromod coefficients with Sparrow data
logPP=log10(Pin/(1+(c1*(hrt**c2)*(z**c3)*(Pin**c4))))
test=summary(lm(logTP_mgl~logPP))
test
estimate <- nlrob(logTP_mgl ~ log10(Pin/(1+(c1*(hrt**c2)*(z**c3)*(Pin**c4)))),
start=list(c1 = 12.26, c2 = -.55, c3=-.16,c4=.5),
data=spam,algorithm = "default", trace=T)
#Add parameter estimates to model
c1=estimate$coefficients[1] #beta1
c2=estimate$coefficients[2] #beta2
c3=estimate$coefficients[3] #beta2
c4=estimate$coefficients[4] #beta2
#Predict log Total Phosporus (LogPP) from Sparrow Concentration
logPP=(Pin/(1+(c1*(hrt**c2)*(z**c3)*(Pin**c4))))
testNLRob=summary(lm(logTP_mgl~logPP))
testNLRob
|
b04335b96a052245f845d9a3c3c2d70c3fd2adbf
|
29eb088d0563d95616a53ba6984882c70671fb7c
|
/man/allDags.Rd
|
e22063095344e6c9a979576360520ca871e65749
|
[] |
no_license
|
SharonLutz/DisentangleSNP
|
6936f3a23d494db342fd48ea7f716071867c5dea
|
95f911e86760f7c9973587bd110343406423adfb
|
refs/heads/master
| 2021-09-14T20:06:00.996421
| 2018-05-18T16:04:03
| 2018-05-18T16:04:03
| 103,295,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
rd
|
allDags.Rd
|
\name{allDags}
\alias{allDags}
\docType{data}
\title{
allDags
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("allDags")}
\format{
The format is:
logi [1:25, 1:9] FALSE FALSE FALSE FALSE FALSE FALSE ...
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\keyword{datasets}
|
812701adf10370e6adbe1f26501aef85a6cd7adc
|
3ea8066910a8b32d9a4b3204e720a45f06405efb
|
/R/LDTFPsurvival.R
|
f9a27d0092eb63ba53b8e9e2cde15bed030a5af5
|
[] |
no_license
|
cran/DPpackage
|
ae76a06a3f35dc88d1f18476e2470473a67e2277
|
33af05b258c49ae4826655dd196d0ecbf5a008b1
|
refs/heads/master
| 2020-05-16T23:58:39.158728
| 2018-01-06T07:39:08
| 2018-01-06T07:39:08
| 17,678,687
| 3
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,145
|
r
|
LDTFPsurvival.R
|
### LDTFPsurvival.R
### Fit a linear dependent TF process for survival data.
###
### Copyright: Alejandro Jara, 2011-2012.
### Last modification: 11-11-2011.
###
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or (at
### your option) any later version.
###
### This program is distributed in the hope that it will be useful, but
### WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
### General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
###
### The authors' contact information:
###
### Alejandro Jara
### Department of Statistics
### Facultad de Matematicas
### Pontificia Universidad Catolica de Chile
### Casilla 306, Correo 22
### Santiago
### Chile
### Voice: +56-2-3544506 URL : http://www.mat.puc.cl/~ajara
### Fax : +56-2-3547729 Email: atjara@uc.cl
###
"LDTFPsurvival" <-
function(y,x,xtf,prediction,prior,mcmc,state,status,grid=seq(0.01,60,len=100),compute.band=FALSE,type.band="PD",
data=sys.frame(sys.parent()),na.action=na.fail,work.dir=NULL)
UseMethod("LDTFPsurvival")
"LDTFPsurvival.default" <-
function(y,
x,
xtf,
prediction,
prior,
mcmc,
state,
status,
grid=seq(0.01,60,len=100),
compute.band=FALSE,
type.band="PD",
data=sys.frame(sys.parent()),
na.action=na.fail,
work.dir=NULL)
{
#########################################################################################
# call parameters
#########################################################################################
m <- mcall <- cl <- match.call()
#########################################################################################
# data structure
#########################################################################################
nrec <- length(y[,1])
xce <- x
pce <- ncol(xce)
ptf <- ncol(xtf)
type <- rep(2,nrec)
type[y[,1]==-999] <- 1
type[y[,2]==-999] <- 3
type[y[,1]==y[,2]] <- 4
#########################################################################################
# change working directory (if requested..)
#########################################################################################
if(!is.null(work.dir))
{
cat("\n Changing working directory to ",work.dir,"\n")
old.dir <- getwd() # by default work in current working directory
setwd(work.dir)
}
#########################################################################################
# prediction
#########################################################################################
ngrid <- length(grid)
xcepred <- prediction$xdenpred
xtfpred <- prediction$xtfdenpred
npredden <- nrow(xcepred)
xcepredm <- prediction$xmedpred
xtfpredm <- prediction$xtfmedpred
npredmed <- nrow(xcepredm)
quans <- prediction$quans
if(is.null(quans)) quans <- c(0.03,0.50,0.97)
cband <- 0
if(compute.band)
{
cband <- 1
}
tband <- 0
if(type.band=="HPD")
{
tband <- 1
}
#########################################################################################
# MLE analysis
#########################################################################################
ymat2 <- y
ymat2[ymat2[,1]==-999,1] <- 0
ymat2[ymat2[,2]==-999,2] <- 10^10
ll <- ymat2[,1]
rr <- ymat2[,2]
#library(survival)
fit0 <- survreg(formula = Surv(time=ll,time2=rr,type="interval2") ~ xce-1, dist = "lognormal")
betace <- coefficients(fit0)
cgkvar <- vcov(fit0)[1:pce,1:pce]
sigma2 <- fit0$scale
#########################################################################################
# Prior information
#########################################################################################
maxm <- prior$maxm
ntprob <- 0
ntlr <- 0
for(i in 1:maxm)
{
ntprob <- ntprob + 2**i
ntlr <- ntlr +2**(i-1)
}
if(is.null(prior$a0))
{
a0b0 <- c(-1,-1)
alpha <- prior$alpha
}
else
{
a0b0 <- c(prior$a0,prior$b0)
alpha <- 1
}
betacepm <- prior$mub
precce <- solve(prior$Sb)
if(is.null(prior$tau1))
{
tau <- c(-1,-1)
}
else
{
tau <- c(prior$tau1,prior$tau2)
}
tfprior <- prior$tfprior
if(is.null(tfprior))tfprior <- 1
if(tfprior==1)
{
gprior <- 2*nrec*solve(t(xtf)%*%xtf)
}
if(tfprior==2)
{
gprior <- 2*(1/nrec)*t(xtf)%*%xtf
}
if(tfprior==3)
{
gprior <- diag(1000,ptf)
}
#########################################################################################
# mcmc specification
#########################################################################################
mcmcvec <- c(mcmc$nburn,mcmc$nskip,mcmc$ndisplay,cband,tband)
nsave <- mcmc$nsave
#########################################################################################
# output
#########################################################################################
cpo <- matrix(0,nrow=nrec,ncol=2)
densm <- matrix(0,nrow=npredden,ncol=ngrid)
densl <- matrix(0,nrow=npredden,ncol=ngrid)
densu <- matrix(0,nrow=npredden,ncol=ngrid)
survmm <- matrix(0,nrow=npredden,ncol=ngrid)
survml <- matrix(0,nrow=npredden,ncol=ngrid)
survmu <- matrix(0,nrow=npredden,ncol=ngrid)
qmm <- matrix(0,nrow=npredmed,3)
qml <- matrix(0,nrow=npredmed,3)
qmu <- matrix(0,nrow=npredmed,3)
thetasave <- matrix(0, nrow=nsave, ncol=(pce+2))
randsave <- matrix(0, nrow=nsave, ncol=((ntlr-1)*ptf))
#########################################################################################
# parameters depending on status
#########################################################################################
if(status)
{
z <- rep(0,nrec)
for(i in 1:nrec)
{
if(type[i]==1) z[i] <- log(y[i,2]/2)
if(type[i]==2) z[i] <- log((y[i,1]+y[i,2])/2)
if(type[i]==3) z[i] <- log(y[i,1]+1)
if(type[i]==4) z[i] <- log(y[i,1])
}
betatf <- matrix(0,nrow=ntlr,ncol=ptf)
}
else
{
z <- state$z
alpha <- state$alpha
betace <- state$betace
sigma2 <- state$sigma2
betatf <- state$betatf
}
#########################################################################################
# working space
#########################################################################################
seed <- c(sample(1:29000,1),sample(1:29000,1))
iflag <- rep(0,pce)
iflagtf <- rep(0,ptf)
nobsbc <- rep(0,ntprob)
obsbc <- matrix(0,nrow=ntprob,ncol=nrec)
c0 <- matrix(0,nrow=ptf,ncol=ptf)
workm1 <- matrix(0,nrow=pce,ncol=pce)
workvh1 <- rep(0,(pce*(pce+1)/2))
workv1 <- rep(0,pce)
worksam <- rep(0,nsave)
worksam2 <- matrix(0,nrow=nsave,ncol=ngrid)
worksam3 <- matrix(0,nrow=nsave,ncol=npredmed)
fs <- rep(0,ngrid)
workm2 <- matrix(0,nrow=ptf,ncol=ptf)
workvh2 <- rep(0,(ptf*(ptf+1)/2))
workv2 <- rep(0,ptf)
workv3 <- rep(0,ptf)
workv4 <- rep(0,ptf)
k <- rep(0,maxm)
prob <- rep(0,2**maxm)
probc <- rep(0,2**maxm)
#########################################################################################
# calling the fortran code
#########################################################################################
foo <- .Fortran("ldtfpsurvival",
nrec = as.integer(nrec),
ptf = as.integer(ptf),
pce = as.integer(pce),
interind = as.integer(type),
xtf = as.double(xtf),
xce = as.double(xce),
y = as.double(y),
ngrid = as.integer(ngrid),
npredden = as.integer(npredden),
npredmed = as.integer(npredmed),
grid = as.double(grid),
xtfpred = as.double(xtfpred),
xcepred = as.double(xcepred),
xtfpredm = as.double(xtfpredm),
xcepredm = as.double(xcepredm),
quans = as.double(quans),
maxm = as.integer(maxm),
ntprob = as.integer(ntprob),
ntlr = as.integer(ntlr),
a0b0 = as.double(a0b0),
betacepm = as.double(betacepm),
gprior = as.double(gprior),
precce = as.double(precce),
tau = as.double(tau),
alpha = as.double(alpha),
betace = as.double(betace),
betatf = as.double(betatf),
sigma2 = as.double(sigma2),
z = as.double(z),
mcmc = as.integer(mcmcvec),
nsave = as.integer(nsave),
seed = as.integer(seed),
cpo = as.double(cpo),
densm = as.double(densm),
densl = as.double(densl),
densu = as.double(densu),
qmm = as.double(qmm),
qml = as.double(qml),
qmu = as.double(qmu),
survmm = as.double(survmm),
survml = as.double(survml),
survmu = as.double(survmu),
thetasave = as.double(thetasave),
randsave = as.double(randsave),
iflag = as.integer(iflag),
iflagtf = as.integer(iflagtf),
nobsbc = as.integer(nobsbc),
obsbc = as.integer(obsbc),
c0 = as.double(c0),
workm1 = as.double(workm1),
workvh1 = as.double(workvh1),
workv1 = as.double(workv1),
worksam = as.double(worksam),
worksam2 = as.double(worksam2),
worksam3 = as.double(worksam3),
fs = as.double(fs),
workm2 = as.double(workm2),
workvh2 = as.double(workvh2),
workv2 = as.double(workv2),
workv3 = as.double(workv3),
workv4 = as.double(workv4),
k = as.integer(k),
prob = as.double(prob),
probc = as.double(probc),
PACKAGE = "DPpackage")
#########################################################################################
# save state
#########################################################################################
if(!is.null(work.dir))
{
cat("\n\n Changing working directory back to ",old.dir,"\n")
setwd(old.dir)
}
model.name <- "Linear dependent TF process model for time-to-event data"
cpom <- matrix(foo$cpo,nrow=nrec,ncol=2)
cpo <- cpom[,1]
fso <- cpom[,2]
densm <- matrix(foo$densm,nrow=npredden,ncol=ngrid)
densl <- NULL
densu <- NULL
qmm <- matrix(foo$qmm,nrow=npredmed,3)
qml <- NULL
qmu <- NULL
survmm <- matrix(foo$survmm,nrow=npredden,ncol=ngrid)
survml <- NULL
survmu <- NULL
if(compute.band)
{
densl <- matrix(foo$densl,nrow=npredden,ncol=ngrid)
densu <- matrix(foo$densu,nrow=npredden,ncol=ngrid)
qml <- matrix(foo$qml,nrow=npredmed,3)
qmu <- matrix(foo$qmu,nrow=npredmed,3)
survml <- matrix(foo$survml,nrow=npredden,ncol=ngrid)
survmu <- matrix(foo$survmu,nrow=npredden,ncol=ngrid)
}
thetasave <- matrix(foo$thetasave,nrow=mcmc$nsave, ncol=(pce+2))
randsave <- matrix(foo$randsave,nrow=mcmc$nsave, ncol=((ntlr-1)*ptf))
colnames(thetasave) <- c(colnames(xce),"sigma2","alpha")
coeff <- apply(thetasave,2,mean)
colnames(randsave) <- rep(colnames(xtf),(ntlr-1))
state <- list(alpha=foo$alpha,
betace=foo$betace,
sigma2=foo$sigma2,
betatf=matrix(foo$betatf,nrow=ntlr,ncol=ptf),
z=foo$z,
nobsbc=foo$nobsbc,
obsbc=matrix(foo$obsbc,nrow=ntprob,ncol=nrec))
save.state <- list(thetasave=thetasave,
randsave=randsave)
z <- list(modelname=model.name,
coefficients=coeff,
call=cl,
compute.band=compute.band,
cpo=cpo,
fso=fso,
prior=prior,
mcmc=mcmc,
state=state,
save.state=save.state,
nrec=foo$nrec,
pce=foo$pce,
ptf=foo$ptf,
y=y,
x=xce,
xtf=xtf,
ngrid=ngrid,
npredden=npredden,
npredmed=npredmed,
grid=grid,
densm=densm,
densl=densl,
densu=densu,
qmm=qmm,
qml=qml,
qmu=qmu,
survmm=survmm,
survml=survml,
survmu=survmu)
cat("\n\n")
class(z) <- c("LDTFPsurvival")
z
}
###
### Tools for LDTFPsurvival: print, summary, plot
###
### Copyright: Alejandro Jara, 2011
### Last modification: 11-11-2011.
"print.LDTFPsurvival" <- function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\n",x$modelname,"\n\nCall:\n", sep = "")
print(x$call)
cat("\n")
cat("Posterior Predictive Distributions (log):\n")
print.default(format(summary(log(x$cpo)), digits = digits), print.gap = 2,
quote = FALSE)
cat("\nPosterior Inference of Parameters:\n")
print.default(format(x$coefficients, digits = digits), print.gap = 2,
quote = FALSE)
cat("\nNumber of Observations:",x$nrec)
cat("\nNumber of Predictors for the Median:",x$pce)
cat("\nNumber of Predictors for the Tailfree Probabilities:",x$ptf,"\n")
cat("\n\n")
invisible(x)
}
"plot.LDTFPsurvival"<-function(x, hpd=TRUE, ask=TRUE, nfigr=2, nfigc=2, param=NULL, col="#bdfcc9", ...)
{
fancydensplot1<-function(x, hpd=TRUE, npts=200, xlab="", ylab="", main="",col="#bdfcc9", ...)
# Author: AJV, 2007
#
{
dens <- density(x,n=npts)
densx <- dens$x
densy <- dens$y
meanvar <- mean(x)
densx1 <- max(densx[densx<=meanvar])
densx2 <- min(densx[densx>=meanvar])
densy1 <- densy[densx==densx1]
densy2 <- densy[densx==densx2]
ymean <- densy1 + ((densy2-densy1)/(densx2-densx1))*(meanvar-densx1)
if(hpd==TRUE)
{
alpha<-0.05
alow<-rep(0,2)
aupp<-rep(0,2)
n<-length(x)
a<-.Fortran("hpd",n=as.integer(n),alpha=as.double(alpha),x=as.double(x),
alow=as.double(alow),aupp=as.double(aupp),PACKAGE="DPpackage")
xlinf<-a$alow[1]
xlsup<-a$aupp[1]
}
else
{
xlinf <- quantile(x,0.025)
xlsup <- quantile(x,0.975)
}
densx1 <- max(densx[densx<=xlinf])
densx2 <- min(densx[densx>=xlinf])
densy1 <- densy[densx==densx1]
densy2 <- densy[densx==densx2]
ylinf <- densy1 + ((densy2-densy1)/(densx2-densx1))*(xlinf-densx1)
densx1 <- max(densx[densx<=xlsup])
densx2 <- min(densx[densx>=xlsup])
densy1 <- densy[densx==densx1]
densy2 <- densy[densx==densx2]
ylsup <- densy1 + ((densy2-densy1)/(densx2-densx1))*(xlsup-densx1)
plot(0.,0.,xlim = c(min(densx), max(densx)), ylim = c(min(densy), max(densy)),
axes = F,type = "n" , xlab=xlab, ylab=ylab, main=main, cex=1.2)
xpol<-c(xlinf,xlinf,densx[densx>=xlinf & densx <=xlsup],xlsup,xlsup)
ypol<-c(0,ylinf,densy[densx>=xlinf & densx <=xlsup] ,ylsup,0)
polygon(xpol, ypol, border = FALSE,col=col)
lines(c(min(densx), max(densx)),c(0,0),lwd=1.2)
segments(min(densx),0, min(densx),max(densy),lwd=1.2)
lines(densx,densy,lwd=1.2)
segments(meanvar, 0, meanvar, ymean,lwd=1.2)
segments(xlinf, 0, xlinf, ylinf,lwd=1.2)
segments(xlsup, 0, xlsup, ylsup,lwd=1.2)
axis(1., at = round(c(xlinf, meanvar,xlsup), 2.), labels = T,pos = 0.)
axis(1., at = round(seq(min(densx),max(densx),length=15), 2.), labels = F,pos = 0.)
axis(2., at = round(seq(0,max(densy),length=5), 2.), labels = T,pos =min(densx))
}
if(is(x, "LDTFPsurvival"))
{
if(is.null(param))
{
coef.p <- x$coefficients
n <- length(coef.p)
pnames <- names(coef.p)
par(ask = ask)
layout(matrix(seq(1,nfigr*nfigc,1), nrow=nfigr , ncol=nfigc ,byrow=TRUE))
for(i in 1:length(coef.p))
{
title1 <- paste("Trace of",pnames[i],sep=" ")
title2 <- paste("Density of",pnames[i],sep=" ")
plot(ts(x$save.state$thetasave[,i]),main=title1,xlab="MCMC scan",ylab=" ")
fancydensplot1(x$save.state$thetasave[,i],hpd=hpd,main=title2,xlab="values", ylab="density",col=col)
}
for(i in 1:x$npredden)
{
if(x$compute.band)
{
title1 <- paste("Survival Prediction #",i,sep=" ")
plot(x$grid,x$survmu[i,],main=title1,lty=2,type='l',lwd=2,xlab="time",ylab="survival",ylim=c(0,1))
lines(x$grid,x$survml[i,],lty=2,lwd=2)
lines(x$grid,x$survmm[i,],lty=1,lwd=3)
}
else
{
title1 <- paste("Survival Prediction #",i,sep=" ")
plot(x$grid,x$survmm[i,],main=title1,lty=1,type='l',lwd=2,xlab="time",ylab="survival",ylim=c(0,1))
}
}
}
else
{
coef.p <- x$coefficients
n <- length(coef.p)
pnames <- names(coef.p)
poss <- 0
for(i in 1:n)
{
if(pnames[i]==param)poss=i
}
if(poss==0 && param !="predictive")
{
stop("This parameter is not present in the original model.\n")
}
par(ask = ask)
layout(matrix(seq(1,nfigr*nfigc,1), nrow=nfigr, ncol=nfigc, byrow = TRUE))
if(param !="predictive")
{
title1 <- paste("Trace of",pnames[poss],sep=" ")
title2 <- paste("Density of",pnames[poss],sep=" ")
plot(ts(x$save.state$thetasave[,poss]),main=title1,xlab="MCMC scan",ylab=" ")
fancydensplot1(x$save.state$thetasave[,poss],hpd=hpd,main=title2,xlab="values", ylab="density",col=col)
}
else
{
for(i in 1:x$npredden)
{
if(x$compute.band)
{
title1 <- paste("Survival Prediction #",i,sep=" ")
plot(x$grid,x$survmu[i,],main=title1,lty=2,type='l',lwd=2,xlab="time",ylab="survival",ylim=c(0,1))
lines(x$grid,x$survml[i,],lty=2,lwd=2)
lines(x$grid,x$survmm[i,],lty=1,lwd=3)
}
else
{
title1 <- paste("Survival Prediction #",i,sep=" ")
plot(x$grid,x$survmm[i,],main=title1,lty=1,type='l',lwd=2,xlab="time",ylab="survival",ylim=c(0,1))
}
}
}
}
}
}
"summary.LDTFPsurvival" <- function(object, hpd=TRUE, ...)
{
stde<-function(x)
{
n<-length(x)
return(sd(x)/sqrt(n))
}
hpdf<-function(x)
{
alpha<-0.05
vec<-x
n<-length(x)
alow<-rep(0,2)
aupp<-rep(0,2)
a<-.Fortran("hpd",n=as.integer(n),alpha=as.double(alpha),x=as.double(vec),
alow=as.double(alow),aupp=as.double(aupp),PACKAGE="DPpackage")
return(c(a$alow[1],a$aupp[1]))
}
pdf<-function(x)
{
alpha<-0.05
vec<-x
n<-length(x)
alow<-rep(0,2)
aupp<-rep(0,2)
a<-.Fortran("hpd",n=as.integer(n),alpha=as.double(alpha),x=as.double(vec),
alow=as.double(alow),aupp=as.double(aupp),PACKAGE="DPpackage")
return(c(a$alow[2],a$aupp[2]))
}
thetasave <- object$save.state$thetasave
ans <- c(object[c("call", "modelname")])
### CPO
ans$cpo <- object$cpo
### Median information
dimen1 <- object$pce
if(dimen1==1)
{
mat <- matrix(thetasave[,1],ncol=1)
}
else
{
mat <- thetasave[,1:dimen1]
}
coef.p <- object$coefficients[1:dimen1]
coef.m <- apply(mat, 2, median)
coef.sd <- apply(mat, 2, sd)
coef.se <- apply(mat, 2, stde)
if(hpd){
limm <- apply(mat, 2, hpdf)
coef.l <- limm[1,]
coef.u <- limm[2,]
}
else
{
limm <- apply(mat, 2, pdf)
coef.l <- limm[1,]
coef.u <- limm[2,]
}
coef.table <- cbind(coef.p, coef.m, coef.sd, coef.se , coef.l , coef.u)
if(hpd)
{
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.", "Naive Std.Error",
"95%HPD-Low","95%HPD-Upp"))
}
else
{
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.", "Naive Std.Error",
"95%CI-Low","95%CI-Upp"))
}
ans$coeff <- coef.table
### Baseline Information
mat <- matrix(thetasave[,(dimen1+1)],ncol=1)
coef.p <- object$coefficients[(dimen1+1)]
coef.m <- apply(mat, 2, median)
coef.sd <- apply(mat, 2, sd)
coef.se <- apply(mat, 2, stde)
if(hpd){
limm <- apply(mat, 2, hpdf)
coef.l <- limm[1,]
coef.u <- limm[2,]
}
else
{
limm <- apply(mat, 2, pdf)
coef.l <- limm[1,]
coef.u <- limm[2,]
}
coef.table <- cbind(coef.p, coef.m, coef.sd, coef.se , coef.l , coef.u)
if(hpd)
{
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.", "Naive Std.Error",
"95%HPD-Low","95%HPD-Upp"))
}
else
{
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.", "Naive Std.Error",
"95%CI-Low","95%CI-Upp"))
}
ans$base <- coef.table
### Precision parameter
if(is.null(object$prior$a0))
{
ans$prec <- NULL
}
else
{
mat <- matrix(thetasave[,(dimen1+2)],ncol=1)
coef.p <- object$coefficients[(dimen1+2)]
coef.m <- apply(mat, 2, median)
coef.sd <- apply(mat, 2, sd)
coef.se <- apply(mat, 2, stde)
if(hpd){
limm <- apply(mat, 2, hpdf)
coef.l <- limm[1,]
coef.u <- limm[2,]
}
else
{
limm <- apply(mat, 2, pdf)
coef.l <- limm[1,]
coef.u <- limm[2,]
}
coef.table <- cbind(coef.p, coef.m, coef.sd, coef.se , coef.l , coef.u)
if(hpd)
{
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.", "Naive Std.Error",
"95%HPD-Low","95%HPD-Upp"))
}
else
{
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.", "Naive Std.Error",
"95%CI-Low","95%CI-Upp"))
}
ans$prec <- coef.table
}
ans$nrec <- object$nrec
ans$pce <- object$pce
ans$ptf <- object$ptf
class(ans) <- "summaryLDTFPsurvival"
return(ans)
}
"print.summaryLDTFPsurvival"<-function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\n",x$modelname,"\n\nCall:\n", sep = "")
print(x$call)
cat("\n")
cat("Posterior Predictive Distributions (log):\n")
print.default(format(summary(log(as.vector(x$cpo))), digits = digits), print.gap = 2,
quote = FALSE)
cat("\nPosterior Inference of Median Regression Parameters:\n")
print.default(format(x$coeff, digits = digits), print.gap = 2,
quote = FALSE)
cat("\nPosterior Inference of Baseline Variance:\n")
print.default(format(x$base, digits = digits), print.gap = 2,
quote = FALSE)
if (length(x$prec)) {
cat("\nPrecision parameter:\n")
print.default(format(x$prec, digits = digits), print.gap = 2,
quote = FALSE)
}
cat("\nNumber of Observations:",x$nrec)
cat("\nNumber of Predictors for the Median:",x$pce)
cat("\nNumber of Predictors for the Tailfree Probabilities:",x$pce,"\n")
cat("\n\n")
invisible(x)
}
|
d7e9cd840078b6f15c23eb16cb709dfb994bdb0b
|
dd1af48ab0f0e40b531c8f4d55c2b1ddebbef887
|
/R/RcppExports.R
|
3fdbd1e8c04d05fe700259c64d65d667a5c17d24
|
[] |
no_license
|
dfalbel/testerror
|
b295947b132e8d8e86608146ed1fd3a2aaf4cea6
|
8ee827bd80239b836e0f78ca704320b46fec1bf1
|
refs/heads/master
| 2020-04-20T11:43:52.669750
| 2019-02-02T11:28:47
| 2019-02-02T11:28:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 327
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
test_error <- function() {
invisible(.Call('_testerror_test_error', PACKAGE = 'testerror'))
}
test_error2 <- function() {
invisible(.Call('_testerror_test_error2', PACKAGE = 'testerror'))
}
|
fffe992ead267222d7695a5e1527fc9b961f3b2c
|
fb509a77664ed87f5a0d1c7114154cc181411757
|
/C_country_pred.r
|
bd40d560f257c25fb50ed750bdba7b3a72221818
|
[] |
no_license
|
wbickelmann/MLProjects
|
e91fefb5af33544fd91829087320e796b4233694
|
9c2acb40d247591510ec2f11e45d06c6598c7623
|
refs/heads/master
| 2020-03-11T05:50:48.711112
| 2018-05-01T05:07:15
| 2018-05-01T05:07:15
| 129,814,262
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,397
|
r
|
C_country_pred.r
|
library(caret)
library(tidyverse)
library(xgboost)
setwd('C:/Users/Willi/OneDrive/Documents/MLProjects')
combi_c<-read.csv("C_hhold_train.csv",stringsAsFactors = TRUE, header = TRUE)
combi_c_indiv <- read.csv("C_indiv_train.csv", stringsAsFactors = TRUE, header = TRUE)
combi_c_indiv <- subset( combi_c_indiv, select = -c(iid, poor,country ) )
combi_c_indiv <- combi_c_indiv[!duplicated(combi_c_indiv$id), ]
combi_c <-plyr::join(combi_c, combi_c_indiv, by='id', type='inner')
combi_c$id<-NULL
combi_c1<-(na.omit(combi_c))
poor<-data.frame(as.factor(combi_c1$poor))
names(poor) <- ("poor")
combi_c1$poor<-NULL
nzv <- nearZeroVar(combi_c1)
combi_c1<-combi_c1[,-nzv]
dim(combi_c1)
combi_c1<-cbind(combi_c1,poor)
levels_detect<-function(x){
if(length(levels(x)) > 34){
return(FALSE)
}else{
return(TRUE)
}
}
excess_levels<-lapply(combi_c1,levels_detect)
name_removec<-names(which(excess_levels==FALSE))
combi_c1<-combi_c1[ , !(names(combi_c1) %in% name_removec)]
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
# run the RFE algorithm
resultsc <- rfe(combi_c1[,1:(length(colnames(combi_c1))-1)],
combi_c1[,length(colnames(combi_c1))],
rfeControl=control, metric = "Accuracy")
predictors(resultsc)->topvarsc
plot(resultsc, type=c("g", "o"))
combi_c1%>%select(topvarsc,'poor')->combi_c2
preProcValues <- preProcess(combi_c2, method = c("center", "scale"))
dfc <- predict(preProcValues, combi_c2)
dfc2=gather(dfc2)
dfc2=as.data.frame(dfc2=!NULL)
inTrain <- createDataPartition(dfc2$poor, p = .80, list = FALSE)
Xtrainc<-dfc2[inTrain,c(1:dim(dfc2)[2]-1)]
Ytrainc <- dfc2[inTrain,dim(dfc2)[2]]
Xtestc <- dfc2[-inTrain,c(1:dim(dfc2)[2]-1)]
Ytestc <- dfc2[-inTrain,dim(dfc2)[2]]
trainc=cbind(Xtrainc,Ytrainc)
names(trainc)[dim(dfc2)[2]]="poor"
fitControl <-trainControl(method = "cv",
number = 5,
savePredictions = TRUE,
classProbs = TRUE,
summaryFunction = twoClassSummary
)
dfc.xgb <- caret::train(poor~.,data=trainc, method = "xgbTree",
trControl=fitControl, metric="ROC")
dfc.logit <- caret::train(poor~.,data=trainc, method = "LogitBoost")
dfc.gbm <- caret::train(poor~.,data=trainc, method = "gbm")
qplot(data=dfc,x=poor,geom = 'bar')
trellis.par.set(caretTheme())
plot(dfc.xgb)
confusionMatrix(Ytestc, predict(dfc.logit, newdata=Xtestc))
|
c65fba069704a6aa67913bb7fdd1212846250421
|
3df087ccb93de55ddde9fc02a1805432a9a21343
|
/man-roxygen/variable-rf.R
|
d61aec9905da57cf0dbae193183078e3f34b312a
|
[] |
no_license
|
thismely/ExpectedReturns
|
efc587e92f60cc2bf966a6e477b68025ed5c7806
|
ba2507aa1572b2a27d2f6639d45f98e5b2533ece
|
refs/heads/master
| 2023-07-06T20:13:46.863687
| 2021-07-11T17:33:30
| 2021-07-11T17:33:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 381
|
r
|
variable-rf.R
|
#' @section RF variable:
#'
#' The `RF` variable refers to the *risk-free rate*. It depends on the period been
#' considered and on the country. For example, for U.S. monthly data series is the
#' one month *T-Bill* return.
#' The `RF` data series distributed by K. R. French with the Fama-French factors
#' data are usually obtained from *Ibbotson Associates Inc. (Morningstar)*.
|
588cea514132aad23d5434b0ef2dee865fc169f1
|
4de2f9cdcd44de1e2b323b2d640c2231c4bf6010
|
/main.R
|
2440592ab205be9921a9edbfef812fb1a854ef13
|
[] |
no_license
|
mjenniferli02/shiny_health_tracker
|
9c9a5a5d1a87d8174d95239ea2df3a039fccaeca
|
759280f632c4e5be32bfa9b162f95f6f78563ca0
|
refs/heads/master
| 2021-04-16T00:45:12.149802
| 2020-03-27T00:24:15
| 2020-03-27T00:24:15
| 249,313,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
r
|
main.R
|
library(data.table)
library(ggplot2)
library(lubridate)
library(tidyverse)
data <- fread("data/sleep-score/sleep_score.csv")
head(data)
data <- data %>% mutate(timestamp=ymd(substr(timestamp, 1, 10)))
ggplot(data)+geom_line(aes(x=timestamp, y=overall_score))
str(data)
|
3a7af803b21352483b76397b50b26dd0039181e6
|
aa7618d72787ca663c3dc461df3cfb111b7fde2f
|
/ReadingData.R
|
66bb2a4ff40041859b42173b87649c7c53737b65
|
[] |
no_license
|
PelzKo/VisMetabo
|
57105af244ca56a06794738db1e1a5a20b6433c3
|
0c3061c93eeeca9cd732bf86e48843bb7417bb2d
|
refs/heads/master
| 2023-03-28T04:03:00.473836
| 2021-03-25T03:00:43
| 2021-03-25T03:00:43
| 276,921,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,539
|
r
|
ReadingData.R
|
library("openxlsx")
#dftest <- "C:/Users/Konstantin/Desktop/Uni/6Semester/BachelorArbeit/BeispielDaten/metabExampleMaleFemale.xlsx"
#df <- read.xlsx("C:/Users/Konstantin/Desktop/Uni/6Semester/BachelorArbeit/BeispielDaten/QMDiab_metabolomics_Preprocessed.xlsx", sheet = 1)
readFile <- function(filePath, fileSheet = 1){
ext <- tools::file_ext(filePath)
if (ext == "xlsx"){
df <- read.xlsx(filePath, sheet = fileSheet)
} else if (ext == "tsv"){
df <- read.csv(filePath,sep = "\t")
} else {
df <- read.csv(filePath)
}
#df <- df[complete.cases(df),]
id_col <- 0
need_id <- TRUE
metab_start <- 1
metab_end <- 1
metab_start_final <- 1
metab_end_final <- 1
#iterating through all columns
for (col in seq_len(ncol(df))){
#if column is unique and we have not found and id column yet
if (need_id&&length(df[[col]])==length(unique(df[[col]]))){
id_col<-col
need_id<-FALSE
}
if (typeof(df[[col]])=="double"){
#append the current metabolite interval if the last column was a double value as well
if (col==metab_end+1){
metab_end<-col
}
else {
#if longer than current longest metabolites
if (metab_end-metab_start>metab_end_final-metab_start_final){
metab_start_final<-metab_start
metab_end_final<-metab_end
}
metab_start<-col
metab_end<-col
}
}
#columns start with a non-double column, otherwise it would be included in first column interval
else if (col==1){
metab_start <- 0
metab_end <- -1
metab_start_final <- 0
metab_end_final <- -1
}
}
#check once at the end
if (metab_end-metab_start>metab_end_final-metab_start_final){
metab_start_final<-metab_start
metab_end_final<-metab_end
}
if (id_col==0){
stop("NO ID COLUMN FOUND")
}
if (id_col>=metab_start_final&&id_col<=metab_end_final){
stop("ID COLUMN FOUND IN METABOLITES")
}
print("Done reading the file")
#metab <- df[c(metab_start_final:metab_end_final)]
#pheno <- df[-c(metab_start_final:metab_end_final)]
#return(list(metab,pheno))
return (list(values = df,id = id_col,metab_start = metab_start_final,metab_end = metab_end_final))
}
readPhenoFile <- function(filePath, fileSheet = 1){
ext <- tools::file_ext(filePath)
if (ext == "xlsx"){
df <- read.xlsx(filePath, sheet = fileSheet)
} else if (ext == "tsv"){
df <- read.csv(filePath,sep = "\t")
} else {
df <- read.csv(filePath)
}
return (df)
}
|
a437434a3a501eb1afe88bdfc51500055c3937d6
|
67dcbff196434716013f8dfcf7a18e28b8db65d1
|
/625-2-MLE.R
|
51bf656ddc95a929d6fb988bfe70e96ea132399b
|
[] |
no_license
|
clabornd/survival_MLE
|
0b85e6bec85239412502baa5ab84ddb7b3879068
|
f311c965bacc56d21d69718ef3d8e4c3b29cb5e3
|
refs/heads/master
| 2021-08-20T10:18:50.721266
| 2017-11-28T21:48:14
| 2017-11-28T21:48:14
| 111,720,399
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,526
|
r
|
625-2-MLE.R
|
library(ggplot2)
library(purrr)
#############Plot Likelihood functions###########
l_true <- function(lambda, x){(lambda^length(x)*exp(-lambda*sum(x)))/max(lambda^length(x)*exp(-lambda*sum(x)))}
l_censored <- function(lambda, x, censor_time){
if(length(censor_time) == 1) {censor_time = rep(censor_time, length(x))}
(lambda^length(x[x < censor_time])*exp(-lambda*sum(x[x < censor_time])))*exp(-lambda*sum(censor_time[censor_time <= x]))/max(lambda^length(x[x < censor_time])*exp(-lambda*sum(x[x < censor_time]))*exp(-lambda*sum(censor_time[censor_time <= x])))
}
n <- 100
#generate survival times and censored times
rsurv <- function(dist_function, n, censor_time, parameter, ...){
surv_times <- dist_function(n, parameter, ...)
censor_times <- replace(surv_times, surv_times > censor_time, censor_time)
list(surv_times, censor_times)
}
#generate random censoring times
rsurv_rand <- function(dist_function, n, parameter, ...){
surv_times <- dist_function(n, parameter, ...)
foo <- runif(n, mean(surv_times)-sd(surv_times), mean(surv_times)+sd(surv_times))
censor_times <- replace(surv_times, surv_times > )
}
s_times <- rexp(n,1)
foo <- runif(n, 0.5, 1.5)
c_times <- s_times
c_times[which(foo < c_times)]<-foo[which(foo < c_times)]
#generate informative censoring times
s_times <- rexp(n,1)
i_times <- 0
i_times[which(s_times <1)]<-runif(length(which(s_times <1)), 0 ,1)
i_times[which(s_times >= 1)]<-runif(length(which(s_times >= 1)), 1 ,2)
c_times <- s_times
c_times[which(i_times < c_times)]<-i_times[which(i_times < c_times)]
######generate 500 survival and censoring times######
s_list <- list()
c_list <- list()
obs_list <- list()
for(i in 1:50){
s_times <- rexp(n,1)
c_times <- s_times
c_times[which(c_times > 1)] <- 1
s_list[[i]]<-s_times
c_list[[i]]<-c_times
}
#random censoring
for(i in 1:50){
s_times <- rexp(n,1)
foo <- runif(100, 0.5, 1.5)
c_times <- s_times
c_times[which(foo < c_times)]<-foo[which(foo < c_times)]
s_list[[i]]<-s_times
c_list[[i]]<-c_times
obs_list[[i]]<-foo
}
#informative censoring
for(i in 1:50){
s_times <- rexp(n,1)
i_times <- 0
i_times[which(s_times <1)] <- runif(length(which(s_times <1)), 0 ,1)
i_times[which(s_times >= 1)] <- runif(length(which(s_times >= 1)), 1 ,2)
c_times <- s_times
c_times[which(i_times < c_times)]<-i_times[which(i_times < c_times)]
s_list[[i]] <- s_times
c_list[[i]] <- c_times
obs_list[[i]] <- i_times
}
###calculate mean or sd of MLE's###
sd(map_dbl(s_list, ~length(.x)/sum(.x)))
sd(map2_dbl(c_list, obs_list, ~(length(.x)-length(which(.x==.y)))/sum(.x)))
##########Create Lists of Stat_function calls and plot all 1500 curves###########
mle1.plots <- map(s_list, ~stat_function(fun = l_true, args = list(.x)))
mle2.plots <- map(c_list, ~stat_function(fun = l_true, col = "red", args = list(.x)))
mle3.plots <- map2(s_list, obs_list, ~stat_function(fun = l_censored, col = "green", args = list(.x, .y)))
plots <- c(mle2.plots, mle3.plots, mle1.plots)
samplist <- list()
for(i in 1:50){
samplist[[i]] <- list(s_list[[i]], obs_list[[i]])
}
multifun_getplots <- function(func, samples_list, ...){
map(samples_list, ~stat_function(fun = func, args = list(.x),...))
}
mle1.plots <- multifun_getplots(l_censored, samplist, col = "green")
mle2.plots <- multifun_getplots(l_true, c_list, col = "red")
mle3.plots <- multifun_getplots(l_true, s_list)
ggplot(data = data.frame(x=c(0,3)), mapping = aes(x = x)) +
xlab("MLE estimates of lambda")+
ylab("Scaled Likelihood")+
plots
|
9d3421c63d13de3fb247fa598d5fb39ac454c762
|
8f549e33631a13e2b3c05fd02605f31a6f5c079c
|
/R/EstimateInHospitalMortality.R
|
a4e3a78d110af8cd13567010f1cb9bbcaa4c6a30
|
[
"MIT"
] |
permissive
|
martingerdin/bengaltiger
|
07e60275560af5ed3c6df090f94a8d427796e29e
|
2662bb36540699a51e6558b542008d07035a98e1
|
refs/heads/master
| 2021-07-03T12:29:20.911428
| 2020-02-25T11:45:47
| 2020-02-25T11:45:47
| 144,838,020
| 3
| 4
|
MIT
| 2020-09-02T10:24:17
| 2018-08-15T10:12:31
|
R
|
UTF-8
|
R
| false
| false
| 8,237
|
r
|
EstimateInHospitalMortality.R
|
#' Estimate in hospital mortality
#'
#' Estimates the proportion of patients who died in hospital with a bootstrap
#' confidence interval if requested.
#' @param study.sample Data frame. The study sample. No default.
#' @param variable.name Character vector of length 1. The name of the in
#' hospital mortality variable. Defaults to "m24h".
#' @param died.level Character vector of length 1. The level of the in hospital
#' mortality variable that indicates in hospital mortality. Defaults to
#' "Yes".
#' @param digits Numeric vector of length 1. Must be a positive integer. The
#' number of digits to use when rounding the proportion, and if applicable,
#' the lower and upper bounds of the confidence interval. Defaults to 3.
#' @param bootstrap.confidence.interval Logical vector of length 1. If TRUE a
#' confidence interval is estimated using an emperical bootstrap. Deafults
#' to TRUE.
#' @param bootstrap.samples.exist Logical vector of length 1. If TRUE bootstrap
#' samples are assumed to have been created using CreateBootstrapSamples,
#' and are therefore read from the file bootstrap.samples.Rds. Defaults to
#' FALSE.
#' @param random.seed.already.set Logical vector of length 1. If TRUE
#' random.seed does not need to be set within this function as it indicates
#' that this has been done (which is good practice) earlier in the
#' code. Defaults to FALSE.
#' @param random.seed Numeric vector of length 1. Has to be an integer. The seed
#' to use for random number generation. Only used if
#' bootstrap.conficence.interval is TRUE and random.seed.already.set is
#' FALSE. Defaults to NULL.
#' @param number.of.bootstrap.samples Numeric vector of length 1. Has to be a
#' positive integer. The number of bootstrap samples to use. Only used it
#' bootstrap.confidence.interval is TRUE. Defaults to 1000.
#' @param save.to.results Logical vector of length 1. If TRUE the table object
#' is saved to a results file on disk using SaveToResults. Defaults to TRUE.
#' @param print.result Logical vector of length 1. If TRUE the result is
#' printed so that you see what is saved to results. Defaults to TRUE.
#' @param return.result Logical vector of length 1. If TRUE the result is
#' returned to the parent environment. Default to FALSE.
#' @export
EstimateInHospitalMortality <- function(study.sample,
variable.name = "m24h",
died.level = "Yes",
digits = 3,
bootstrap.confidence.interval = TRUE,
bootstrap.samples.exist = FALSE,
random.seed.already.set = FALSE,
random.seed = NULL,
number.of.bootstrap.samples = 1000,
save.to.results = TRUE,
print.result = TRUE,
return.result = FALSE) {
## Error handling
if (!is.data.frame(study.sample))
stop("study.sample has to be a data.frame")
if (!is.character(variable.name) | !IsLength1(variable.name))
stop("variable.name has to be a character vector of length 1")
if (!is.character(died.level) | !IsLength1(died.level))
stop("died.level has to be a character vector of length 1")
if (!is.numeric(digits) | !IsLength1(digits) | digits < 0 | as.integer(digits) != digits)
stop("digits has to be a positive integer")
if (!is.logical(bootstrap.confidence.interval) | !IsLength1(bootstrap.confidence.interval))
stop("bootstrap.confidence.interval has to be a logical vector of length 1")
if (!is.logical(bootstrap.samples.exist) | !IsLength1(bootstrap.samples.exist))
stop("bootstrap.samples.exist has to be a logical vector of length 1")
if (!is.logical(random.seed.already.set) | !IsLength1(random.seed.already.set))
stop("random.seed.already.set has to be a logical vector of length 1")
if (!is.null(random.seed))
if (!is.numeric(random.seed) | !IsLength1(random.seed) | as.integer(random.seed) != random.seed)
stop("random.seed has to be an integer")
if (!is.numeric(number.of.bootstrap.samples) | !IsLength1(number.of.bootstrap.samples) | number.of.bootstrap.samples < 0 | as.integer(number.of.bootstrap.samples) != number.of.bootstrap.samples)
stop("number.of.bootstrap.samples has to be a positive integer")
if (!is.logical(save.to.results) | !IsLength1(save.to.results))
stop("save.to.results has to be a logical vector of length 1")
if (!is.logical(print.result) | !IsLength1(print.result))
stop("print.result has to be a logical vector of length 1")
if (!is.logical(return.result) | !IsLength1(return.result))
stop("return.result has to be a logical vector of length 1")
## Calculate proportion of in hospital mortality point estimate
in.hospital.mortality <- study.sample[, variable.name]
estimates <- list(point.estimate = mean(in.hospital.mortality == died.level))
## Estimate bootstrap confidence interval
if (bootstrap.confidence.interval) {
if (is.null(random.seed) & !random.seed.already.set & !bootstrap.samples.exist)
stop("Please provide a random seed to estimate a bootstrap confidence interval")
if (!is.null(random.seed) & random.seed.already.set)
stop ("If a random seed has already been set you should not provide a new one. Run this function again but remove your random.seed argument or set random.seed.already.set to FALSE if that is really the case.")
if (!is.null(random.seed))
set.seed(random.seed)
## Get bootstrap samples
if (bootstrap.samples.exist) {
bootstrap.samples <- readRDS("bootstrap.samples.Rds")
bootstrap.samples <- lapply(bootstrap.samples, function(sample) {
return(sample[, variable.name])
})
} else {
## Get row indices to use to generate bootstrap samples
row.indices <- lapply(1:number.of.bootstrap.samples, function(i) {
sample(1:length(in.hospital.mortality), length(in.hospital.mortality), replace = TRUE)
})
## Generate bootstrap samples
bootstrap.samples <- lapply(row.indices, function(i) {
in.hospital.mortality[i]
})
}
## Estimate bootstrap estimates
bootstrap.estimates <- unlist(lapply(bootstrap.samples, function(in.hospital.mortality.sample) {
mean(in.hospital.mortality.sample == died.level)
}))
## Use the emperical bootstrap method
deviations <- estimates$point.estimate - bootstrap.estimates
quantiles <- quantile(deviations, probs = c(0.975, 0.025))
bounds <- estimates$point.estimate - quantiles
estimates$lower.bound <- min(bounds)
estimates$upper.bound <- max(bounds)
}
## Round estimates
estimates <- lapply(estimates, round, digits = digits)
## Prepare result strings for known in-hospital mortality variable names
prepared.strings <- list(m24h = "The 24-hour in hospital mortality was ",
m30d = "The 30-day in hospital mortality was ")
## Format result
result.string <- "The in hospital mortality was "
if (variable.name %in% names(prepared.strings))
result.string <- prepared.strings[[variable.name]]
content <- paste0(result.string, estimates$point.estimate)
if (bootstrap.confidence.interval)
content <- paste0(content, " (95% CI ", estimates$lower.bound, "-", estimates$upper.bound, ")")
content <- paste0(content, ".")
## Save to results
if (save.to.results)
SaveToResults(content, paste0(variable.name, ".in.hospital.mortality.estimate"))
## Print result
if (print.result)
cat(content, "\n")
## Return result
if (return.result)
return(list(result = content,
estimates = estimates))
}
|
926559f3e361d500a4ee03c36d95667f655025a7
|
a3a59ebe1a41f1bc23d641e0f26673c684ecf72b
|
/tests/testthat.R
|
5cd0178e157a78a638ba1f43394cc8f8059ea27a
|
[] |
no_license
|
gabrielodom/pathwayPCA
|
78c801aaf51c6f16eaac1e2bbbd7c7bb743492c8
|
552e1f378040e6080aa3ac13a7f8a302e579532d
|
refs/heads/master
| 2023-07-08T14:17:13.486479
| 2023-06-28T17:29:22
| 2023-06-28T17:29:22
| 107,602,989
| 12
| 2
| null | 2019-03-28T19:43:40
| 2017-10-19T21:57:30
|
R
|
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(pathwayPCA)
test_check("pathwayPCA")
|
0e685dfc3bce332bdeac302c5a726be2cf67bd4c
|
9b57bf7e2fb3f68221875091db1bd3e93cc85e41
|
/old/full_1_5_2nd/create_main_files.R
|
1f1f883976e4ecef9cab0030e0537e953c52b3ae
|
[] |
no_license
|
mastoffel/imputation_eddie
|
7d617ee7f1badf5c69e91cc53161d6ff2b9fb760
|
8945fab33ec30a8b7c317d9ae7f8243ea77537c6
|
refs/heads/master
| 2020-05-31T15:38:51.923234
| 2020-05-25T15:13:24
| 2020-05-25T15:13:24
| 190,361,962
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,988
|
r
|
create_main_files.R
|
# Creating the genotype files and other files for AlphaImpute
# this script outputs three files:
# (1) Genotypes.txt: complete genotype data, can be filter for chromosomes
# (2) to_be_imputed_index.txt: index of SNPs which have to be masked and imputed
# (3) to_be_imputed.txt: names of SNPs
# (4) AlphaImputeLinux
# (5) Pedigree.txt
library(snpStats)
library(tidyverse)
library(data.table)
source("create_spec_file_merged.R")
#library(gdata)
#library("WGCNA")
# browseVignettes("snpStats")
# capture command line arguments
# either 1-27 for the chromosomes or all_chr for full data
args <- commandArgs(trailingOnly=TRUE)
if (purrr::is_empty(args)) {
stop("Provide a chromosome number as argument")
chr_num <- NULL
} else if (args[[1]] == "all_chr") { # command for full dataset
stop("Only accepts chromosome numbers from 1-26 at the moment")
chr_num <- NULL
} else if ((as.numeric(args[[1]]) < 27) & (as.numeric(args[[1]]) >= 1)) { # do not allow the sex chromosome for now
chr_num <- as.numeric(args[[1]])
} else {
stop("command line arguments specified wrongly, check R script")
}
### INPUT FOLDER ###
### Contains PLINK FILES, AlphaImputeLinux and Pedigree.txt ###
# on mac
#plink_geno_path <- "../sheep/data/SNP_chip/"
# on eddie, also contains the AlphaImputeLinux file and the Pedigree.txt
plink_geno_path <- "/exports/csce/eddie/biology/groups/pemberton/martin/plink_genotypes/"
####################
### OUTPUT FOLDER ###
# on mac
#output_path_chr <- paste0("all_chr_cv/chr_", chr_num)
#output_path_main_files <- paste0(output_path_chr, "/AI_main_files/")
# on eddie
output_path_chr <- paste0("/exports/eddie/scratch/v1mstoff/full_1_5_2nd/chr_", chr_num) # main folder
output_path_main_files <- paste0(output_path_chr, "/AI_main_files/") # main files for chr1
if (!dir.exists(output_path_chr)) dir.create(output_path_chr, recursive = TRUE)
if (!dir.exists(output_path_main_files)) dir.create(output_path_main_files)
#####################
# plink name
sheep_plink_name <- "merged_sheep_geno"
# read merged plink data
sheep_bed <- paste0(plink_geno_path, sheep_plink_name, ".bed")
sheep_bim <- paste0(plink_geno_path, sheep_plink_name, ".bim")
sheep_fam <- paste0(plink_geno_path, sheep_plink_name, ".fam")
full_sample <- read.plink(sheep_bed, sheep_bim, sheep_fam)
# filter names of snps on one chromosome
all_chr_snps <- full_sample$map %>% filter(chromosome == chr_num) %>% .$snp.name
# filter those snps from full dataset and coerce from raw to numeric
sheep_geno <- as(full_sample$genotypes[, all_chr_snps], Class = "numeric")
# plink puts double ids when merging, extract unique ids here
sheep_ids <- unlist(lapply(str_split(rownames(full_sample$genotypes), "\\.", 2), function(x) x[[2]]))
rownames(sheep_geno) <- sheep_ids
# clear some space
rm(full_sample)
# make tibble and put rownames as ID column
sheep_geno <- as_tibble(sheep_geno, rownames = "ID")
###### merge individuals on both ld and hd chip #####
#dup_ids <- which(duplicated(sheep_ids))
setDT(sheep_geno)
# function to merge SNP data from the same individual, when it is both
# in the HD and LD chip
# if genotypoe is missing on one chip, take the existing genotype
# if genotypes differ between chips, set NA
merge_geno <- function(vec) {
# vec <- as.numeric(vec)
if (length(vec) == 1) return(as.numeric(vec))
if (sum(is.na(vec)) == 2) return(as.numeric(NA))
if (sum(is.na(vec)) == 1) return(vec[!is.na(vec)])
if (sum(is.na(vec)) == 0) {
if (vec[1] == vec[2]){
return(vec[1])
} else {
print(vec)
return(as.numeric(NA))
}
}
}
sheep_geno_merged <- sheep_geno[, lapply(.SD, merge_geno), by=ID]
##### create spec file / has to source create_spec_file_AI.R
# this is specific to every chromosome
if (!exists("create_spec_file")) stop("Make sure create_spec_file_AI.R script is in the directory")
create_spec_file(output_path_main_files, ncol(sheep_geno_merged)-1)
#####
# which SNPs are present in the LD but not the HD SNP chip and have to be imputed?
geno_missing <- colSums(is.na(sheep_geno_merged))
# which SNPs are missing in more than 50% individuals (LD chip SNPs)
to_be_imputed <- names(geno_missing[geno_missing > 0.5 * nrow(sheep_geno_merged)])
write_lines(to_be_imputed, path = paste0(output_path_main_files,"to_be_imputed.txt"))
# AlphaImpute wants 9 instead of NA
setDT(sheep_geno_merged)
# replace NAs with 9
repl_na <- function(DT) {
for (j in seq_len(ncol(DT)))
set(DT,which(is.na(DT[[j]])),j,9)
}
repl_na(sheep_geno_merged)
# filter individuals which are not in pedigree due to some ID error
not_in_ped <- as.character(c(39,4302,9240,10446,10448,10449,10450,
10451,11076,11077,11079,11388))
sheep_geno_filt <- sheep_geno_merged[!(ID %chin% not_in_ped)]
# write to file with col names for masking script
fwrite(sheep_geno_filt, paste0(output_path_main_files, "Genotypes.txt"),
sep = " ", col.names = TRUE)
|
7f56078375522854e267cd3c43af38d0d8a1ab00
|
16b3a68ca34ca6eaf6b5e5264a8944355ff9fef2
|
/TestingFile2.R
|
5b03b732a13ce740b46e2dd1187ebca445ae354f
|
[] |
no_license
|
sanjayram77003/Testing
|
78a2d4ba41d234f1984bfce03ae32af6f15b1a7d
|
73442ec6f2b03b8370f2c867365ccf7eea9e90d3
|
refs/heads/master
| 2022-12-04T17:38:30.910883
| 2020-08-26T06:04:53
| 2020-08-26T06:04:53
| 290,405,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19
|
r
|
TestingFile2.R
|
Print("Successful")
|
e433de97facaf7eef0efe9ec68204ec7241ea3f2
|
4fb31dff9ccd46ff6c0020c208a5d204cea619c1
|
/data_validation.R
|
85f00e2664f48f2b8a4d039a06ebd294f6cac679
|
[
"Apache-2.0"
] |
permissive
|
ctsit/nSOFA_calculation
|
4e99784fb75ac86e3461c51972815d10e86da55a
|
c433d743be2a52930735283ef7d63dd9b59c5df6
|
refs/heads/master
| 2023-04-18T22:50:48.275326
| 2020-09-28T16:34:33
| 2020-09-28T16:34:33
| 259,997,057
| 0
| 2
|
Apache-2.0
| 2020-07-10T17:53:36
| 2020-04-29T17:37:39
|
R
|
UTF-8
|
R
| false
| false
| 2,625
|
r
|
data_validation.R
|
# IMPORTANT: make_nsofa_dataset.R must first be run to create the nsofa_scores dataset
source("functions.R")
load_libraries()
# compare to nsofa data provided by irb ---------------------------------------
# data from 2018 onwards
read_irb_nsofa <- get_data("nsofa_scores.csv") %>%
mutate(q1hr = floor_date(recorded_time, "1 hour")) %>%
group_by(child_mrn_uf, q1hr) %>%
filter(recorded_time == max(recorded_time)) %>%
ungroup()
unique_id <- read_irb_nsofa %>%
distinct(child_mrn_uf)
filtered_irb_nsofa <- read_irb_nsofa %>%
filter(child_mrn_uf %in% unique_id$child_mrn_uf) %>%
select(child_mrn_uf, q1hr, inotropes:steroids) %>%
fill(c(platelets, steroids, inotropes, oxygenation),
.direction = "down") %>%
mutate(cv = case_when(inotropes == 0 & steroids == 0 ~ 0,
inotropes == 0 & steroids == 1 ~ 1,
inotropes == 1 & steroids == 0 ~ 2,
(inotropes >= 2 & steroids == 0) |
(inotropes == 1 & steroids == 1) ~ 3,
inotropes >= 2 & steroids == 1 ~ 4)) %>%
# how is nosfa claculated if inotropic score is used
mutate(nsofa_score = platelets + oxygenation + cv)
# Read input dataset if not already in environment
read_nsofa_scores <- vroom(here("output", "nsofa_scores.csv"), delim = ",")
nsofa_scores <- read_nsofa_scores %>%
rename(inotropes = inotrope_score) %>%
select(-number_inotropic_drugs)
# this only compares data beginning in 2018
compare_irb_ctsi <- filtered_irb_nsofa %>%
inner_join(nsofa_scores, by = c("child_mrn_uf", "q1hr"), suffix = c("_irb", "_ctsi")) %>%
select(child_mrn_uf, q1hr, starts_with("inotrope"),
starts_with("oxygenation"),
starts_with("platelets"), starts_with("steroids"),
starts_with("nsofa")
)
write.xlsx(compare_irb_ctsi, here("output", "compare_irb_ctsi.xlsx"), na = "")
# compare scores
nsofa_score_compare <- compare_irb_ctsi %>%
filter(nsofa_score_irb != nsofa_score_ctsi)
nrow(nsofa_score_compare)/nrow(compare_irb_ctsi)
inotropes_compare <- compare_irb_ctsi %>%
filter(inotropes_irb != inotropes_ctsi)
nrow(inotropes_compare)/nrow(compare_irb_ctsi)
oxygenation_compare <- compare_irb_ctsi %>%
filter(oxygenation_irb != oxygenation_ctsi)
nrow(oxygenation_compare)/nrow(compare_irb_ctsi)
platelets_compare <- compare_irb_ctsi %>%
filter(platelets_irb != platelets_ctsi)
nrow(platelets_compare)/nrow(compare_irb_ctsi)
steroids_compare <- compare_irb_ctsi %>%
filter(steroids_irb != steroids_ctsi)
nrow(steroids_compare)/nrow(compare_irb_ctsi)
|
bed951cb1215acea62368848b1b2fdf72ca3f5e7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rbokeh/examples/tool_wheel_zoom.Rd.R
|
579392e789bc7564b377cc84895e1fc4feff7dd3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 269
|
r
|
tool_wheel_zoom.Rd.R
|
library(rbokeh)
### Name: tool_wheel_zoom
### Title: Add "wheel_zoom" tool to a Bokeh figure
### Aliases: tool_wheel_zoom
### ** Examples
## No test:
# only zoom on x axis
figure() %>% ly_points(1:10) %>%
tool_wheel_zoom(dimensions = "height")
## End(No test)
|
5904e08326347db541f75014d4bc0b05ac97bb25
|
e0b165551ab06067e6eb33199a058a54922e6471
|
/dom.R
|
80c2bfedc2dbddc153c571429188dd519c74aadc
|
[] |
no_license
|
brunocarlin/R_Faculdade
|
62b2a8e08201c4e4bed26bf3b16234dbfc8ed202
|
649df8cb1470a6347ac049bff040b75d70c558dc
|
refs/heads/master
| 2020-03-25T11:19:42.240008
| 2018-10-25T12:35:18
| 2018-10-25T12:35:18
| 143,727,900
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,463
|
r
|
dom.R
|
# PNAD 2015 domicílios
rm(list = ls())
library(tidyverse)
# cuidado com o tamanho do terceiro campo
col_sizes <- c(4, 2, 6, 3, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1, 12, 12, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 4, 2, 12, 3, 12, 6, 9, 3, 5, 12, 4, 7, 7, 2, 12, 2, 1, 8)
col_names <- c("V0101", "UF", "V0102", "V0103", "V0104", "V0105", "V0106", "V0201", "V0202", "V0203", "V0204", "V0205", "V0206", "V0207", "V0208", "V0209", "V0210", "V0211", "V0212", "V0213", "V0214", "V0215", "V0216", "V2016", "V0217", "V0218", "V0219", "V0220", "V2020", "V0221", "V0222", "V0223", "V0224", "V0225", "V0226", "V0227", "V02270", "V02271", "V02272", "V02273", "V02274", "V2027", "V0228", "V0229", "V0230", "V0231", "V0232", "V02321", "V02322", "V02323", "V02324", "V02325", "V02326", "V02327", "V02424", "V02425", "V02426", "V2032", "V4105", "V4107", "V4600", "V4601", "V4602", "V4604", "V4605", "V4606", "V4607", "V4608", "V4609", "V4610", "V4611", "V4614", "UPA", "V4617", "V4618", "V4620", "V4621", "V4622", "V4624", "V9992")
dom <- read_fwf("DOM2015.txt", fwf_widths(col_sizes, col_names))
unidades <- tibble(UF = c(11, 12, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33, 35, 41, 42, 43, 50, 51, 52, 53),
unidade = c("Rondônia", "Acre", "Amazonas", "Roraima", "Pará", "Amapá", "Tocantins", "Maranhão", "Piauí", "Ceará", "Rio Grande do Norte", "Paraíba", "Pernambuco", "Alagoas", "Sergipe", "Bahia", "Minas Gerais", "Espírito Santo", "Rio de Janeiro", "São Paulo", "Paraná", "Santa Catarina", "Rio Grande do Sul", "Mato Grosso do Sul", "Mato Grosso", "Goiás", "Distrito Federal"))
tipo_domicilios <- tibble(cod = c(2, 4, 6),
tipo_dom = c("Casa", "Apartamento", "Cômodo"))
dom %>%
#filter(UF %in% c(14, 15)) %>%
left_join(unidades,by = "UF") %>%
left_join(tipo_domicilios, by = c("V0202" = "cod")) %>%
select(UF,unidade,tipo_dom) %>%
filter(!is.na(tipo_dom)) %>%
ggplot() +
aes(unidade, fill = tipo_dom) +
geom_bar(position = "fill") +
coord_flip() +
theme_bw()
dom %>%
#filter(UF %in% c(14, 15)) %>%
left_join(unidades,by = "UF") %>%
left_join(tipo_domicilios, by = c("V0202" = "cod")) %>%
select(UF,unidade,tipo_dom) %>%
filter(!is.na(tipo_dom)) %>%
group_by(UF,tipo_dom) %>%
summarise(n = n()) %>%
mutate(perc = n/sum(n) * 100) %>%
View()
|
c9f740a0db3e87e01e1ef44ecb36eba7f4e84bad
|
31ea8595b1b023988c18875d71ce2a5202c5f3ea
|
/exdata/PA1/plot2.R
|
5ce22173132c20e5a19003596ff3e18b8d2bcab7
|
[] |
no_license
|
datawrecker/datasciencecoursera
|
3fef8322c062442e2a8222e36bdf187462c295b3
|
ce1d0940fec6c0f4123d48b51a30598c24bbf074
|
refs/heads/master
| 2020-04-05T15:20:08.066152
| 2015-03-21T15:10:58
| 2015-03-21T15:10:58
| 31,636,947
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 819
|
r
|
plot2.R
|
epc <- read.table("household_power_consumption.txt",
header=T, sep=";", colClasses=
c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"),
na.strings="?"
)[66637:(66637+2879), ]
dt <- paste(epc$Date, epc$Time)
# have to take this clumsy way to construct the POSIXlt vector
# as.Date.POSIXct, as.POSIXlt, lapply, they all don't work
Datetime <- strptime(dt[1], "%e/%m/%Y %H:%M:%S")
for (i in 2:length(dt))
Datetime <- c(Datetime, strptime(dt[i], "%e/%m/%Y %H:%M:%S"))
nepc <- cbind(Datetime, epc)
png("plot2.png", height=480, width=480)
plot(nepc$Global_active_power ~ nepc$Datetime,
type="l", xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
|
bd87fb2a8f4c0ddd32406de327ee9cd99fff32c5
|
a2401f4ec2060730abf0f8ce3d98dd1f19800e1f
|
/Annotation/UCSC_hg19_sequences.R
|
c6d092de11816236a9798daa3392379bce73cac4
|
[
"CC0-1.0"
] |
permissive
|
ahalfpen727/Bioconductor-Resources
|
a7705d4d66dedf01a4359cf1c27fe93b0083ed5c
|
de405694e31b4da5f8709f61bd57ab8d518f9318
|
refs/heads/master
| 2021-07-04T01:30:32.430821
| 2020-10-20T01:55:51
| 2020-10-20T01:55:51
| 190,797,935
| 1
| 0
| null | 2019-10-09T04:36:10
| 2019-06-07T19:27:29
|
R
|
UTF-8
|
R
| false
| false
| 32,176
|
r
|
UCSC_hg19_sequences.R
|
Full genome sequences for Homo sapiens (UCSC version hg38)
Description
Full genome sequences for Homo sapiens (Human) as provided by UCSC (hg38, Dec. 2013) and stored in Biostrings objects.
Note
This BSgenome data package was made from the following source data files:
hg38.2bit from http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/
See ?BSgenomeForge and the BSgenomeForge vignette (vignette("BSgenomeForge")) in the BSgenome software package for how to make a BSgenome data package.
Author(s)
The Bioconductor Dev Team
See Also
BSgenome objects and the available.genomes function in the BSgenome software package.
DNAString objects in the Biostrings package.
The BSgenomeForge vignette (vignette("BSgenomeForge")) in the BSgenome software package for how to make a BSgenome data package.
Examples
BSgenome.Hsapiens.UCSC.hg38
genome <- BSgenome.Hsapiens.UCSC.hg38
seqlengths(genome)
genome$chr1 # same as genome[["chr1"]]
## ---------------------------------------------------------------------
## Extract the upstream sequences
## ---------------------------------------------------------------------
## The upstream sequences located in
## http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/
## are based on RefSeq genes (RefSeq Genes track in the Genome Browser).
## Upstream sequences based on UCSC genes (UCSC Genes track in the
## Genome Browser) can easily be extracted from the full genome
## sequences with:
library(TxDb.Hsapiens.UCSC.hg38.knownGene)
knownGene_txdb <- TxDb.Hsapiens.UCSC.hg38.knownGene
knownGene_up1000seqs <- extractUpstreamSeqs(genome, knownGene_txdb)
## Or, to get upstream sequences based on RefSeq genes:
refGene_txdb <- makeTxDbFromUCSC("hg38", "refGene")
refGene_up1000seqs <- extractUpstreamSeqs(genome, refGene_txdb)
## Note that you can make a TxDb object from various annotation
## resources. See the makeTxDbFromUCSC(), makeTxDbFromBiomart(), and
## makeTxDbFromGFF() functions in the GenomicFeatures package for more
## information.
## IMPORTANT: Make sure you use a TxDb package (or TxDb object) that
## contains a gene model based on hg38 or on a compatible genome (i.e.
## a genome with sequences identical to the sequences in hg38). See
## ?extractUpstreamSeqs in the GenomicFeatures package for more
## information.
## ---------------------------------------------------------------------
## Genome-wide motif searching
## ---------------------------------------------------------------------
## See the GenomeSearching vignette in the BSgenome software
## package for some examples of genome-wide motif searching using
## Biostrings and the BSgenome data packages:
if (interactive())
vignette("GenomeSearching", package="BSgenome")
Results
R version 3.3.1 (2016-06-21) -- "Bug in Your Hair"
Copyright (C) 2016 The R Foundation for Statistical Computing
Platform: x86_64-pc-linux-gnu (64-bit)
R is free software and comes with ABSOLUTELY NO WARRANTY.
You are welcome to redistribute it under certain conditions.
Type 'license()' or 'licence()' for distribution details.
R is a collaborative project with many contributors.
Type 'contributors()' for more information and
'citation()' on how to cite R or R packages in publications.
Type 'demo()' for some demos, 'help()' for on-line help, or
'help.start()' for an HTML browser interface to help.
Type 'q()' to quit R.
> library(BSgenome.Hsapiens.UCSC.hg38)
Loading required package: BSgenome
Loading required package: BiocGenerics
Loading required package: parallel
Attaching package: 'BiocGenerics'
The following objects are masked from 'package:parallel':
clusterApply, clusterApplyLB, clusterCall, clusterEvalQ,
clusterExport, clusterMap, parApply, parCapply, parLapply,
parLapplyLB, parRapply, parSapply, parSapplyLB
The following objects are masked from 'package:stats':
IQR, mad, xtabs
The following objects are masked from 'package:base':
Filter, Find, Map, Position, Reduce, anyDuplicated, append,
as.data.frame, cbind, colnames, do.call, duplicated, eval, evalq,
get, grep, grepl, intersect, is.unsorted, lapply, lengths, mapply,
match, mget, order, paste, pmax, pmax.int, pmin, pmin.int, rank,
rbind, rownames, sapply, setdiff, sort, table, tapply, union,
unique, unsplit
Loading required package: S4Vectors
Loading required package: stats4
Attaching package: 'S4Vectors'
The following objects are masked from 'package:base':
colMeans, colSums, expand.grid, rowMeans, rowSums
Loading required package: IRanges
Loading required package: GenomeInfoDb
Loading required package: GenomicRanges
Loading required package: Biostrings
Loading required package: XVector
Loading required package: rtracklayer
> png(filename="/home/ddbj/snapshot/RGM3/R_BC/result/BSgenome.Hsapiens.UCSC.hg38/package.Rd_%03d_medium.png", width=480, height=480)
> ### Name: BSgenome.Hsapiens.UCSC.hg38
> ### Title: Full genome sequences for Homo sapiens (UCSC version hg38)
> ### Aliases: BSgenome.Hsapiens.UCSC.hg38-package
> ### BSgenome.Hsapiens.UCSC.hg38 Hsapiens
> ### Keywords: package data
>
> ### ** Examples
>
> BSgenome.Hsapiens.UCSC.hg38
Human genome:
# organism: Homo sapiens (Human)
# provider: UCSC
# provider version: hg38
# release date: Dec. 2013
# release name: Genome Reference Consortium GRCh38
# 455 sequences:
# chr1 chr2 chr3
# chr4 chr5 chr6
# chr7 chr8 chr9
# chr10 chr11 chr12
# chr13 chr14 chr15
# ... ... ...
# chrUn_KI270744v1 chrUn_KI270745v1 chrUn_KI270746v1
# chrUn_KI270747v1 chrUn_KI270748v1 chrUn_KI270749v1
# chrUn_KI270750v1 chrUn_KI270751v1 chrUn_KI270752v1
# chrUn_KI270753v1 chrUn_KI270754v1 chrUn_KI270755v1
# chrUn_KI270756v1 chrUn_KI270757v1
# (use 'seqnames()' to see all the sequence names, use the '$' or '[[' operator
# to access a given sequence)
> genome <- BSgenome.Hsapiens.UCSC.hg38
> seqlengths(genome)
chr1 chr2 chr3
248956422 242193529 198295559
chr4 chr5 chr6
190214555 181538259 170805979
chr7 chr8 chr9
159345973 145138636 138394717
chr10 chr11 chr12
133797422 135086622 133275309
chr13 chr14 chr15
114364328 107043718 101991189
chr16 chr17 chr18
90338345 83257441 80373285
chr19 chr20 chr21
58617616 64444167 46709983
chr22 chrX chrY
50818468 156040895 57227415
chrM chr1_GL383518v1_alt chr1_GL383519v1_alt
16569 182439 110268
chr1_GL383520v2_alt chr1_KI270759v1_alt chr1_KI270760v1_alt
366580 425601 109528
chr1_KI270761v1_alt chr1_KI270762v1_alt chr1_KI270763v1_alt
165834 354444 911658
chr1_KI270764v1_alt chr1_KI270765v1_alt chr1_KI270766v1_alt
50258 185285 256271
chr1_KI270892v1_alt chr2_GL383521v1_alt chr2_GL383522v1_alt
162212 143390 123821
chr2_GL582966v2_alt chr2_KI270767v1_alt chr2_KI270768v1_alt
96131 161578 110099
chr2_KI270769v1_alt chr2_KI270770v1_alt chr2_KI270771v1_alt
120616 136240 110395
chr2_KI270772v1_alt chr2_KI270773v1_alt chr2_KI270774v1_alt
133041 70887 223625
chr2_KI270775v1_alt chr2_KI270776v1_alt chr2_KI270893v1_alt
138019 174166 161218
chr2_KI270894v1_alt chr3_GL383526v1_alt chr3_JH636055v2_alt
214158 180671 173151
chr3_KI270777v1_alt chr3_KI270778v1_alt chr3_KI270779v1_alt
173649 248252 205312
chr3_KI270780v1_alt chr3_KI270781v1_alt chr3_KI270782v1_alt
224108 113034 162429
chr3_KI270783v1_alt chr3_KI270784v1_alt chr3_KI270895v1_alt
109187 184404 162896
chr3_KI270924v1_alt chr3_KI270934v1_alt chr3_KI270935v1_alt
166540 163458 197351
chr3_KI270936v1_alt chr3_KI270937v1_alt chr4_GL000257v2_alt
164170 165607 586476
chr4_GL383527v1_alt chr4_GL383528v1_alt chr4_KI270785v1_alt
164536 376187 119912
chr4_KI270786v1_alt chr4_KI270787v1_alt chr4_KI270788v1_alt
244096 111943 158965
chr4_KI270789v1_alt chr4_KI270790v1_alt chr4_KI270896v1_alt
205944 220246 378547
chr4_KI270925v1_alt chr5_GL339449v2_alt chr5_GL383530v1_alt
555799 1612928 101241
chr5_GL383531v1_alt chr5_GL383532v1_alt chr5_GL949742v1_alt
173459 82728 226852
chr5_KI270791v1_alt chr5_KI270792v1_alt chr5_KI270793v1_alt
195710 179043 126136
chr5_KI270794v1_alt chr5_KI270795v1_alt chr5_KI270796v1_alt
164558 131892 172708
chr5_KI270897v1_alt chr5_KI270898v1_alt chr6_GL000250v2_alt
1144418 130957 4672374
chr6_GL000251v2_alt chr6_GL000252v2_alt chr6_GL000253v2_alt
4795265 4604811 4677643
chr6_GL000254v2_alt chr6_GL000255v2_alt chr6_GL000256v2_alt
4827813 4606388 4929269
chr6_GL383533v1_alt chr6_KB021644v2_alt chr6_KI270758v1_alt
124736 185823 76752
chr6_KI270797v1_alt chr6_KI270798v1_alt chr6_KI270799v1_alt
197536 271782 152148
chr6_KI270800v1_alt chr6_KI270801v1_alt chr6_KI270802v1_alt
175808 870480 75005
chr7_GL383534v2_alt chr7_KI270803v1_alt chr7_KI270804v1_alt
119183 1111570 157952
chr7_KI270805v1_alt chr7_KI270806v1_alt chr7_KI270807v1_alt
209988 158166 126434
chr7_KI270808v1_alt chr7_KI270809v1_alt chr7_KI270899v1_alt
271455 209586 190869
chr8_KI270810v1_alt chr8_KI270811v1_alt chr8_KI270812v1_alt
374415 292436 282736
chr8_KI270813v1_alt chr8_KI270814v1_alt chr8_KI270815v1_alt
300230 141812 132244
chr8_KI270816v1_alt chr8_KI270817v1_alt chr8_KI270818v1_alt
305841 158983 145606
chr8_KI270819v1_alt chr8_KI270820v1_alt chr8_KI270821v1_alt
133535 36640 985506
chr8_KI270822v1_alt chr8_KI270900v1_alt chr8_KI270901v1_alt
624492 318687 136959
chr8_KI270926v1_alt chr9_GL383539v1_alt chr9_GL383540v1_alt
229282 162988 71551
chr9_GL383541v1_alt chr9_GL383542v1_alt chr9_KI270823v1_alt
171286 60032 439082
chr10_GL383545v1_alt chr10_GL383546v1_alt chr10_KI270824v1_alt
179254 309802 181496
chr10_KI270825v1_alt chr11_GL383547v1_alt chr11_JH159136v1_alt
188315 154407 200998
chr11_JH159137v1_alt chr11_KI270826v1_alt chr11_KI270827v1_alt
191409 186169 67707
chr11_KI270829v1_alt chr11_KI270830v1_alt chr11_KI270831v1_alt
204059 177092 296895
chr11_KI270832v1_alt chr11_KI270902v1_alt chr11_KI270903v1_alt
210133 106711 214625
chr11_KI270927v1_alt chr12_GL383549v1_alt chr12_GL383550v2_alt
218612 120804 169178
chr12_GL383551v1_alt chr12_GL383552v1_alt chr12_GL383553v2_alt
184319 138655 152874
chr12_GL877875v1_alt chr12_GL877876v1_alt chr12_KI270833v1_alt
167313 408271 76061
chr12_KI270834v1_alt chr12_KI270835v1_alt chr12_KI270836v1_alt
119498 238139 56134
chr12_KI270837v1_alt chr12_KI270904v1_alt chr13_KI270838v1_alt
40090 572349 306913
chr13_KI270839v1_alt chr13_KI270840v1_alt chr13_KI270841v1_alt
180306 191684 169134
chr13_KI270842v1_alt chr13_KI270843v1_alt chr14_KI270844v1_alt
37287 103832 322166
chr14_KI270845v1_alt chr14_KI270846v1_alt chr14_KI270847v1_alt
180703 1351393 1511111
chr15_GL383554v1_alt chr15_GL383555v2_alt chr15_KI270848v1_alt
296527 388773 327382
chr15_KI270849v1_alt chr15_KI270850v1_alt chr15_KI270851v1_alt
244917 430880 263054
chr15_KI270852v1_alt chr15_KI270905v1_alt chr15_KI270906v1_alt
478999 5161414 196384
chr16_GL383556v1_alt chr16_GL383557v1_alt chr16_KI270853v1_alt
192462 89672 2659700
chr16_KI270854v1_alt chr16_KI270855v1_alt chr16_KI270856v1_alt
134193 232857 63982
chr17_GL000258v2_alt chr17_GL383563v3_alt chr17_GL383564v2_alt
1821992 375691 133151
chr17_GL383565v1_alt chr17_GL383566v1_alt chr17_JH159146v1_alt
223995 90219 278131
chr17_JH159147v1_alt chr17_JH159148v1_alt chr17_KI270857v1_alt
70345 88070 2877074
chr17_KI270858v1_alt chr17_KI270859v1_alt chr17_KI270860v1_alt
235827 108763 178921
chr17_KI270861v1_alt chr17_KI270862v1_alt chr17_KI270907v1_alt
196688 391357 137721
chr17_KI270908v1_alt chr17_KI270909v1_alt chr17_KI270910v1_alt
1423190 325800 157099
chr18_GL383567v1_alt chr18_GL383568v1_alt chr18_GL383569v1_alt
289831 104552 167950
chr18_GL383570v1_alt chr18_GL383571v1_alt chr18_GL383572v1_alt
164789 198278 159547
chr18_KI270863v1_alt chr18_KI270864v1_alt chr18_KI270911v1_alt
167999 111737 157710
chr18_KI270912v1_alt chr19_GL000209v2_alt chr19_GL383573v1_alt
174061 177381 385657
chr19_GL383574v1_alt chr19_GL383575v2_alt chr19_GL383576v1_alt
155864 170222 188024
chr19_GL949746v1_alt chr19_GL949747v2_alt chr19_GL949748v2_alt
987716 729520 1064304
chr19_GL949749v2_alt chr19_GL949750v2_alt chr19_GL949751v2_alt
1091841 1066390 1002683
chr19_GL949752v1_alt chr19_GL949753v2_alt chr19_KI270865v1_alt
987100 796479 52969
chr19_KI270866v1_alt chr19_KI270867v1_alt chr19_KI270868v1_alt
43156 233762 61734
chr19_KI270882v1_alt chr19_KI270883v1_alt chr19_KI270884v1_alt
248807 170399 157053
chr19_KI270885v1_alt chr19_KI270886v1_alt chr19_KI270887v1_alt
171027 204239 209512
chr19_KI270888v1_alt chr19_KI270889v1_alt chr19_KI270890v1_alt
155532 170698 184499
chr19_KI270891v1_alt chr19_KI270914v1_alt chr19_KI270915v1_alt
170680 205194 170665
chr19_KI270916v1_alt chr19_KI270917v1_alt chr19_KI270918v1_alt
184516 190932 123111
chr19_KI270919v1_alt chr19_KI270920v1_alt chr19_KI270921v1_alt
170701 198005 282224
chr19_KI270922v1_alt chr19_KI270923v1_alt chr19_KI270929v1_alt
187935 189352 186203
chr19_KI270930v1_alt chr19_KI270931v1_alt chr19_KI270932v1_alt
200773 170148 215732
chr19_KI270933v1_alt chr19_KI270938v1_alt chr20_GL383577v2_alt
170537 1066800 128386
chr20_KI270869v1_alt chr20_KI270870v1_alt chr20_KI270871v1_alt
118774 183433 58661
chr21_GL383578v2_alt chr21_GL383579v2_alt chr21_GL383580v2_alt
63917 201197 74653
chr21_GL383581v2_alt chr21_KI270872v1_alt chr21_KI270873v1_alt
116689 82692 143900
chr21_KI270874v1_alt chr22_GL383582v2_alt chr22_GL383583v2_alt
166743 162811 96924
chr22_KB663609v1_alt chr22_KI270875v1_alt chr22_KI270876v1_alt
74013 259914 263666
chr22_KI270877v1_alt chr22_KI270878v1_alt chr22_KI270879v1_alt
101331 186262 304135
chr22_KI270928v1_alt chrX_KI270880v1_alt chrX_KI270881v1_alt
176103 284869 144206
chrX_KI270913v1_alt chr1_KI270706v1_random chr1_KI270707v1_random
274009 175055 32032
chr1_KI270708v1_random chr1_KI270709v1_random chr1_KI270710v1_random
127682 66860 40176
chr1_KI270711v1_random chr1_KI270712v1_random chr1_KI270713v1_random
42210 176043 40745
chr1_KI270714v1_random chr2_KI270715v1_random chr2_KI270716v1_random
41717 161471 153799
chr3_GL000221v1_random chr4_GL000008v2_random chr5_GL000208v1_random
155397 209709 92689
chr9_KI270717v1_random chr9_KI270718v1_random chr9_KI270719v1_random
40062 38054 176845
chr9_KI270720v1_random chr11_KI270721v1_random chr14_GL000009v2_random
39050 100316 201709
chr14_GL000194v1_random chr14_GL000225v1_random chr14_KI270722v1_random
191469 211173 194050
chr14_KI270723v1_random chr14_KI270724v1_random chr14_KI270725v1_random
38115 39555 172810
chr14_KI270726v1_random chr15_KI270727v1_random chr16_KI270728v1_random
43739 448248 1872759
chr17_GL000205v2_random chr17_KI270729v1_random chr17_KI270730v1_random
185591 280839 112551
chr22_KI270731v1_random chr22_KI270732v1_random chr22_KI270733v1_random
150754 41543 179772
chr22_KI270734v1_random chr22_KI270735v1_random chr22_KI270736v1_random
165050 42811 181920
chr22_KI270737v1_random chr22_KI270738v1_random chr22_KI270739v1_random
103838 99375 73985
chrY_KI270740v1_random chrUn_GL000195v1 chrUn_GL000213v1
37240 182896 164239
chrUn_GL000214v1 chrUn_GL000216v2 chrUn_GL000218v1
137718 176608 161147
chrUn_GL000219v1 chrUn_GL000220v1 chrUn_GL000224v1
179198 161802 179693
chrUn_GL000226v1 chrUn_KI270302v1 chrUn_KI270303v1
15008 2274 1942
chrUn_KI270304v1 chrUn_KI270305v1 chrUn_KI270310v1
2165 1472 1201
chrUn_KI270311v1 chrUn_KI270312v1 chrUn_KI270315v1
12399 998 2276
chrUn_KI270316v1 chrUn_KI270317v1 chrUn_KI270320v1
1444 37690 4416
chrUn_KI270322v1 chrUn_KI270329v1 chrUn_KI270330v1
21476 1040 1652
chrUn_KI270333v1 chrUn_KI270334v1 chrUn_KI270335v1
2699 1368 1048
chrUn_KI270336v1 chrUn_KI270337v1 chrUn_KI270338v1
1026 1121 1428
chrUn_KI270340v1 chrUn_KI270362v1 chrUn_KI270363v1
1428 3530 1803
chrUn_KI270364v1 chrUn_KI270366v1 chrUn_KI270371v1
2855 8320 2805
chrUn_KI270372v1 chrUn_KI270373v1 chrUn_KI270374v1
1650 1451 2656
chrUn_KI270375v1 chrUn_KI270376v1 chrUn_KI270378v1
2378 1136 1048
chrUn_KI270379v1 chrUn_KI270381v1 chrUn_KI270382v1
1045 1930 4215
chrUn_KI270383v1 chrUn_KI270384v1 chrUn_KI270385v1
1750 1658 990
chrUn_KI270386v1 chrUn_KI270387v1 chrUn_KI270388v1
1788 1537 1216
chrUn_KI270389v1 chrUn_KI270390v1 chrUn_KI270391v1
1298 2387 1484
chrUn_KI270392v1 chrUn_KI270393v1 chrUn_KI270394v1
971 1308 970
chrUn_KI270395v1 chrUn_KI270396v1 chrUn_KI270411v1
1143 1880 2646
chrUn_KI270412v1 chrUn_KI270414v1 chrUn_KI270417v1
1179 2489 2043
chrUn_KI270418v1 chrUn_KI270419v1 chrUn_KI270420v1
2145 1029 2321
chrUn_KI270422v1 chrUn_KI270423v1 chrUn_KI270424v1
1445 981 2140
chrUn_KI270425v1 chrUn_KI270429v1 chrUn_KI270435v1
1884 1361 92983
chrUn_KI270438v1 chrUn_KI270442v1 chrUn_KI270448v1
112505 392061 7992
chrUn_KI270465v1 chrUn_KI270466v1 chrUn_KI270467v1
1774 1233 3920
chrUn_KI270468v1 chrUn_KI270507v1 chrUn_KI270508v1
4055 5353 1951
chrUn_KI270509v1 chrUn_KI270510v1 chrUn_KI270511v1
2318 2415 8127
chrUn_KI270512v1 chrUn_KI270515v1 chrUn_KI270516v1
22689 6361 1300
chrUn_KI270517v1 chrUn_KI270518v1 chrUn_KI270519v1
3253 2186 138126
chrUn_KI270521v1 chrUn_KI270522v1 chrUn_KI270528v1
7642 5674 2983
chrUn_KI270529v1 chrUn_KI270530v1 chrUn_KI270538v1
1899 2168 91309
chrUn_KI270539v1 chrUn_KI270544v1 chrUn_KI270548v1
993 1202 1599
chrUn_KI270579v1 chrUn_KI270580v1 chrUn_KI270581v1
31033 1553 7046
chrUn_KI270582v1 chrUn_KI270583v1 chrUn_KI270584v1
6504 1400 4513
chrUn_KI270587v1 chrUn_KI270588v1 chrUn_KI270589v1
2969 6158 44474
chrUn_KI270590v1 chrUn_KI270591v1 chrUn_KI270593v1
4685 5796 3041
chrUn_KI270741v1 chrUn_KI270742v1 chrUn_KI270743v1
157432 186739 210658
chrUn_KI270744v1 chrUn_KI270745v1 chrUn_KI270746v1
168472 41891 66486
chrUn_KI270747v1 chrUn_KI270748v1 chrUn_KI270749v1
198735 93321 158759
chrUn_KI270750v1 chrUn_KI270751v1 chrUn_KI270752v1
148850 150742 27745
chrUn_KI270753v1 chrUn_KI270754v1 chrUn_KI270755v1
62944 40191 36723
chrUn_KI270756v1 chrUn_KI270757v1
79590 71251
> genome$chr1 # same as genome[["chr1"]]
248956422-letter "DNAString" instance
seq: NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN...NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN
>
> ## ---------------------------------------------------------------------
> ## Extract the upstream sequences
> ## ---------------------------------------------------------------------
> ## The upstream sequences located in
> ## http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/
> ## are based on RefSeq genes (RefSeq Genes track in the Genome Browser).
> ## Upstream sequences based on UCSC genes (UCSC Genes track in the
> ## Genome Browser) can easily be extracted from the full genome
> ## sequences with:
>
> library(TxDb.Hsapiens.UCSC.hg38.knownGene)
Loading required package: GenomicFeatures
Loading required package: AnnotationDbi
Loading required package: Biobase
Welcome to Bioconductor
Vignettes contain introductory material; view with
'browseVignettes()'. To cite Bioconductor, see
'citation("Biobase")', and for packages 'citation("pkgname")'.
> knownGene_txdb <- TxDb.Hsapiens.UCSC.hg38.knownGene
> knownGene_up1000seqs <- extractUpstreamSeqs(genome, knownGene_txdb)
>
> ## Or, to get upstream sequences based on RefSeq genes:
>
> refGene_txdb <- makeTxDbFromUCSC("hg38", "refGene")
Download the refGene table ... OK
Download the refLink table ... OK
Extract the 'transcripts' data frame ... OK
Extract the 'splicings' data frame ... OK
Download and preprocess the 'chrominfo' data frame ... OK
Prepare the 'metadata' data frame ... OK
Make the TxDb object ... OK
Warning message:
In .extractCdsLocsFromUCSCTxTable(ucsc_txtable, exon_locs) :
UCSC data anomaly in 545 transcript(s): the cds cumulative length is
not a multiple of 3 for transcripts 'NM_001305275' 'NM_017940'
'NM_001289974' 'NM_001291281' 'NM_001134939' 'NM_001301371'
'NM_016178' 'NM_001145051' 'NM_001128929' 'NM_001075' 'NM_001144767'
'NM_001322371' 'NM_032470' 'NM_004197' 'NM_032454' 'NM_016098'
'NM_001788' 'NM_001172437' 'NM_001184961' 'NM_015068' 'NM_001159995'
'NM_001159999' 'NM_001160001' 'NM_001005336' 'NM_001288737'
'NM_001288738' 'NM_001288739' 'NM_004408' 'NM_020469' 'NM_001001676'
'NM_033380' 'NM_053005' 'NM_001013356' 'NM_173600' 'NM_006400'
'NM_001130048' 'NM_001318849' 'NM_015296' 'NM_006220' 'NM_001282494'
'NM_001282490' 'NM_001301302' 'NM_002537' 'NM_001278425' 'NM_052892'
'NM_130464' 'NM_001277332' 'NM_182705' 'NM_001291471' 'NM_001291472'
'NM_001291473' 'NM_001291474' 'NM_001291475' 'NM_001123392'
'NM_001291462' 'NM_001291463' 'NM_001291465' 'NM_000068'
'NM_001174080' 'NM_023035' 'NM_001736' 'NM_001301020' 'NM_00415 [... truncated]
> refGene_up1000seqs <- extractUpstreamSeqs(genome, refGene_txdb)
>
> ## Note that you can make a TxDb object from various annotation
> ## resources. See the makeTxDbFromUCSC(), makeTxDbFromBiomart(), and
> ## makeTxDbFromGFF() functions in the GenomicFeatures package for more
> ## information.
> ## IMPORTANT: Make sure you use a TxDb package (or TxDb object) that
> ## contains a gene model based on hg38 or on a compatible genome (i.e.
> ## a genome with sequences identical to the sequences in hg38). See
> ## ?extractUpstreamSeqs in the GenomicFeatures package for more
> ## information.
>
> ## ---------------------------------------------------------------------
> ## Genome-wide motif searching
> ## ---------------------------------------------------------------------
> ## See the GenomeSearching vignette in the BSgenome software
> ## package for some examples of genome-wide motif searching using
> ## Biostrings and the BSgenome data packages:
> #if (interactive())
> vignette("GenomeSearching", package="BSgenome")
|
04f31a603a0ddd9cf4198ec5f99d021a05f5f910
|
fdfc22afa8f51ac83096fc8dbb145909d9c61edc
|
/man/refresh_covidregionaldata_canada.Rd
|
fdf667b8f0f34af730c374c6011ccd46e1eaa74c
|
[
"MIT"
] |
permissive
|
GuilhermeShinobe/covidregionaldata
|
1581cc8fbc92123646f934854a25e5452710bfe0
|
c1f0b5c6ab5284ac6cf9608b64e002c757bd1da7
|
refs/heads/master
| 2022-12-05T20:36:09.681272
| 2020-09-02T12:41:17
| 2020-09-02T12:41:17
| 292,276,446
| 0
| 0
|
NOASSERTION
| 2020-09-02T12:27:20
| 2020-09-02T12:27:19
| null |
UTF-8
|
R
| false
| true
| 510
|
rd
|
refresh_covidregionaldata_canada.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covid19R_wrappers.R
\name{refresh_covidregionaldata_canada}
\alias{refresh_covidregionaldata_canada}
\title{Get daily Canada COVID-19 count data by Province/Territory}
\usage{
refresh_covidregionaldata_canada()
}
\value{
A tibble of COVID cases by province in Canada.
}
\description{
Fetches COVID-19 count data, stratified by date and province.
Data sourced from https://health-infobase.canada.ca/src/data/covidLive/covid19.csv.
}
|
46288819c26c216d860b65ece688d787b6f16171
|
248f9e3cb1784c975b34229f9aa48a01dfadc0bc
|
/project1/plot3.R
|
6800cf5a9814aadccf04e142dff095977ecc635b
|
[] |
no_license
|
shengbing/ExData_Plotting1
|
bc9d9da56fb30be8bd1e70fb3f89d4eb3dfd5bcb
|
32557019b4ae7c6065be4564b728d886420ca9c5
|
refs/heads/master
| 2021-01-19T06:55:35.236228
| 2014-09-04T04:10:32
| 2014-09-04T04:10:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,318
|
r
|
plot3.R
|
#project1
#download dataset mannually
setwd("C:/Users/Shengbing/Documents/R/expl_data_ana")
#loadint the data. It is important to specify that sep=';'; missing data are automatically converted to 'NA'
raw.data = read.csv(file = 'household_power_consumption.txt', sep = ';')
##convert Date and Time variables
raw.data$Date = as.Date(raw.data$Date, "%d/%m/%Y")
#subset data base on '2007-02-01', '2007-02-02'
my_data = subset(raw.data, raw.data$Date %in% as.Date(c('2007-02-01', '2007-02-02')))
#paste date and time of my_data, and format it as a time type
my_data$Time <- paste(my_data$Date , my_data$Time)
my_data$Time <-strptime(my_data$Time, '%Y-%m-%d %H:%M:%S')
#make plot 3
png('plot3.png', 480, 480)
with(data = my_data, plot(my_data$Time, as.numeric(as.character(my_data$Sub_metering_1)), type = 'n', xlab = '', ylab = 'Energy sub metering'))
with(data = my_data, lines(my_data$Time, as.numeric(as.character(my_data$Sub_metering_1)), col = 'black'))
with(data = my_data, lines(my_data$Time, as.numeric(as.character(my_data$Sub_metering_2)), col ='red'))
with(data = my_data, lines(my_data$Time, as.numeric(as.character(my_data$Sub_metering_3)), col = 'blue'))
legend('topright', lty= c(1, 1, 1), col = c('black', 'red', 'blue'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
dev.off()
|
2875824faf0f7ac71e6ab54c1ed17aa8686dc230
|
c6a6b77f3b71ea68f1281b043dd60f17dd85381c
|
/R/methods-SnpSet.R
|
67f373cbed55d030ce10688d650de77b1d13bd82
|
[] |
no_license
|
benilton/oligoClasses
|
df76a4ee4d755342ae32b07c9acb5355153e3f4f
|
be0e1088c52ee8827c86f061e80ffe9b44982a88
|
refs/heads/master
| 2021-01-10T21:40:35.903511
| 2019-11-23T12:22:08
| 2019-11-23T12:22:08
| 1,779,156
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,595
|
r
|
methods-SnpSet.R
|
##
## Directly from Biobase
##
setMethod("initialize", "SnpSet2",
function(.Object,
assayData = assayDataNew(call = call,
callProbability = callProbability, ...),
phenoData = annotatedDataFrameFrom(assayData, byrow=FALSE),
featureData,## = annotatedDataFrameFrom(assayData, byrow=TRUE),
experimentData = new("MIAME"),
annotation = character(),
protocolData = phenoData[,integer(0)],
call = new("matrix"),
callProbability = matrix(numeric(),
nrow=nrow(call), ncol=ncol(call),
dimnames=dimnames(call)),
genome=c("hg19", "hg18"),
...) {
genome <- match.arg(genome)
if(missing(featureData))
featureData <- GenomeAnnotatedDataFrameFrom(assayData, annotation, genome=genome)
callNextMethod(.Object,
assayData = assayData,
phenoData = phenoData,
featureData = featureData,
experimentData = experimentData,
annotation = annotation,
protocolData = protocolData, ...)
})
setMethod(snpCall, "SnpSet2", function(object, ...) {
assayDataElement(object, "call")
})
setMethod(snpCallProbability, "SnpSet2", function(object, ...) {
assayDataElement(object, "callProbability")
})
setReplaceMethod("snpCall", c("SnpSet2", "matrix"),
function(object, ..., value){
assayDataElementReplace(object, "call", value)
})
setReplaceMethod("snpCallProbability", c("SnpSet2", "matrix"),
function(object, ..., value){
assayDataElementReplace(object, "callProbability", value)
})
##-----------------------
## new methods for SnpSet2
##
setMethod("calls", "SnpSet2", function(object) assayData(object)$call)
setReplaceMethod("calls", signature(object="SnpSet2", value="matrix"),
function(object, value)
assayDataElementReplace(object, "call", value))
setMethod("calls", "SnpSet", function(object) assayData(object)$call)
setReplaceMethod("calls", signature(object="SnpSet", value="matrix"),
function(object, value)
assayDataElementReplace(object, "call", value))
p2i <- function(p)
as.integer(-1000*log(1-p))
i2p <- function(i)
1-exp(-i/1000)
warningMsg <- function(X){
.class=class(X)
warning("callProbability slot is of class ", .class, ".\n")
cat("\nTo obtain the confidence scores, the data needs to be extracted from disk and represented as a matrix. The '[' method does both. For example,\n", fill=TRUE)
message("> x <- confs(object)[,] ## 'x' is a matrix\n")
cat("* Note however that 'x' may be very large and swamp the available RAM. A better approach would be to specify which rows (i) and columns (j) are read only those rows and columns from disk.\n", fill=TRUE)
message("> x < confs(object)[i, j] \n")
message("Finally, 'x' still needs to be translated to a probability. This can be done by", fill=TRUE)
message("> p <- i2p(x)")
}
setMethod("confs", "SnpSet2", function(object, transform=TRUE) {
X <- snpCallProbability(object)
if(is(X, "ff_matrix") | is(X, "ffdf")){
warningMsg(X)
return(X)
}
if (transform){
X <- i2p(X)
}
return(X)
})
setReplaceMethod("confs", signature(object="SnpSet2", value="matrix"),
function(object, value){
##convert probability to integer
if(max(value) > 1){
X <- matrix(p2i(value), nrow(X), ncol(X),
dimnames=dimnames(value))
} else {
X <- value
}
assayDataElementReplace(object, "callProbability", X)
})
setMethod("confs", "SnpSet", function(object, transform=TRUE) {
X <- snpCallProbability(object)
if(is(X, "ff_matrix") | is(X, "ffdf")){
warningMsg(X)
return(X)
}
if (transform){
X <- i2p(X)
}
return(X)
})
setReplaceMethod("confs", signature(object="SnpSet", value="matrix"),
function(object, value){
##convert probability to integer
if(max(value) > 1){
X <- matrix(p2i(value), nrow(X), ncol(X),
dimnames=dimnames(value))
} else {
X <- value
}
assayDataElementReplace(object, "callProbability", X)
})
setMethod("combine", signature=signature(x="SnpSet2", y="SnpSet2"),
function(x, y, ...){
##Check that both x and y are valid objects
if(!validObject(x)) stop("x is not a valid object")
if(!validObject(y)) stop("y is not a valid object")
annot <- paste(sort(c(annotation(x), annotation(y))), collapse=",")
annotation(x) <- annotation(y) <- annot
if(class(x) != class(y)){
stop("objects must have the same class")
}
if(storageMode(assayData(x)) != storageMode(assayData(y))){
stop("objects must have same storage mode for assayData")
}
fd <- combine(featureData(x), featureData(y))
pd <- combine(phenoData(x), phenoData(y))
ad.x <- as.list(assayData(x))
ad.y <- as.list(assayData(y))
ad.xy <- mapply(rbind, ad.x, ad.y, SIMPLIFY=FALSE)
id.x <- match(rownames(ad.xy[[1]]), featureNames(fd))
ee <- combine(experimentData(x), experimentData(y))
assayData(x) <- ad.xy
storageMode(assayData(x)) <- storageMode(assayData(y))
experimentData(x) <- ee
featureData(x) <- fd
phenoData(x) <- pd
x
})
setMethod("featuresInRange", signature(object="SnpSet2", range="RangedDataCNV"),
function(object, range, FRAME=0, FRAME.LEFT, FRAME.RIGHT, ...){
.Defunct("featuresInRange has been deprecated. Use findOverlaps.")
})
|
923acdb1feb491c001234061ee2a6c7bbdb53f68
|
0f75e8fa0c7c8d700245f66e8434d67f52e1ae54
|
/man/bvarpost.Rd
|
0ee6a5581d47086c156d7f5f5d0707cecc095119
|
[] |
no_license
|
franzmohr/bvartools
|
a7ff34088268911d00e398afc24b054ed85d4c5d
|
ea828293eaabe2895357bb4302842427ff3e95de
|
refs/heads/master
| 2023-08-31T01:48:14.192410
| 2023-08-30T18:54:10
| 2023-08-30T18:54:10
| 155,453,026
| 23
| 14
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,613
|
rd
|
bvarpost.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bvarpost.R
\name{bvarpost}
\alias{bvarpost}
\title{Posterior Simulation for BVAR Models}
\usage{
bvarpost(object)
}
\arguments{
\item{object}{an object of class \code{"bvarmodel"}, usually, a result of a call to \code{\link{gen_var}}
in combination with \code{\link{add_priors}}.}
}
\value{
An object of class \code{"bvar"}.
}
\description{
Produces draws from the posterior distributions of Bayesian VAR models.
}
\details{
The function implements commonly used posterior simulation algorithms for Bayesian VAR models with
both constant and time varying parameters (TVP) as well as stochastic volatility. It can produce posterior
draws for standard BVAR models with independent normal-Wishart priors, which can be augmented by stochastic
search variable selection (SSVS) as proposed by Geroge et al. (2008) or Bayesian variable selection (BVS)
as proposed in Korobilis (2013). Both SSVS or BVS can also be applied to the covariances of the error term.
The implementation follows the descriptions in Chan et al. (2019), George et al. (2008) and Korobilis (2013).
For all approaches the SUR form of a VAR model is used to obtain posterior draws. The algorithm is implemented
in C++ to reduce calculation time.
The function also supports structural BVAR models, where the structural coefficients are estimated from
contemporary endogenous variables, which corresponds to the so-called (A-model). Currently, only
specifications are supported, where the structural matrix contains ones on its diagonal and all lower
triangular elements are freely estimated. Since posterior draws are obtained based on the SUR form of
the VAR model, the structural coefficients are drawn jointly with the other coefficients.
}
\examples{
# Get data
data("e1")
e1 <- diff(log(e1)) * 100
# Create model
model <- gen_var(e1, p = 2, deterministic = "const",
iterations = 50, burnin = 10)
# Number of iterations and burnin should be much higher.
# Add priors
model <- add_priors(model)
# Obtain posterior draws
object <- bvarpost(model)
}
\references{
Chan, J., Koop, G., Poirier, D. J., & Tobias J. L. (2019). \emph{Bayesian econometric methods}
(2nd ed.). Cambridge: Cambridge University Press.
George, E. I., Sun, D., & Ni, S. (2008). Bayesian stochastic search for VAR model
restrictions. \emph{Journal of Econometrics, 142}(1), 553--580.
\doi{10.1016/j.jeconom.2007.08.017}
Korobilis, D. (2013). VAR forecasting using Bayesian variable selection.
\emph{Journal of Applied Econometrics, 28}(2), 204--230. \doi{10.1002/jae.1271}
}
|
a17d5a40974e568a938a19f167a10a5d861952ab
|
62b54d124457474124c5fe950e66619a068c33e9
|
/scripts/run_archr.R
|
1876fd5fe1684ca823757e41d5332e43cf2c8cbb
|
[] |
no_license
|
juliabelk/brioschi_2023
|
aeb90f3114fa6a6bf4c7689c2287627f77564711
|
9a10d8d4ba95d0d73892288c28607c7f6089de1b
|
refs/heads/main
| 2023-04-15T17:50:31.514310
| 2022-11-26T03:38:40
| 2022-11-26T03:38:40
| 565,594,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,216
|
r
|
run_archr.R
|
suppressMessages({
library(ArchR)
library(dplyr)
library(parallel)
library(hexbin)
library(BSgenome.Hsapiens.UCSC.hg19)
library(BSgenome.Hsapiens.UCSC.hg38)
library(BSgenome.Mmusculus.UCSC.mm10)
})
getGenomeInfo <- function(genome_id) {
if (genome_id == "mm10") {
data("geneAnnoMm10")
data("genomeAnnoMm10")
geneAnno <- geneAnnoMm10
genomeAnno <- genomeAnnoMm10
} else if (genome_id == "hg38") {
data("geneAnnoHg38")
data("genomeAnnoHg38")
geneAnno <- geneAnnoHg38
genomeAnno <- genomeAnnoHg38
} else if (genome_id == "hg19") {
data("geneAnnoHg19")
data("genomeAnnoHg19")
geneAnno <- geneAnnoHg19
genomeAnno <- genomeAnnoHg19
} else {
stop("genome not found")
}
return(list(geneAnno=geneAnno, genomeAnno=genomeAnno))
}
subsetProj <- function(origProj, origClus, sel, newPrefix, clustering=NA) {
proj_pth <- paste0("r_objects/", newPrefix, "_proj.rds")
if (file.exists(proj_pth)) {
proj <- readRDS(proj_pth)
} else {
df <- data.frame(origProj@cellColData)
cellNames <- rownames(df[which(sel(df)),])
print(length(cellNames))
proj <- subsetArchRProject(origProj, cells=cellNames, outputDirectory=paste0("ArchRSubset_",newPrefix),dropCells=FALSE)
proj <- addClustering(proj, newPrefix, clustering=clustering)
saveRDS(proj, proj_pth)
}
if (!is.na(clustering) & !(clustering %in% colnames(proj@cellColData))) {
print("updating clustering")
proj <- addClustering(proj, newPrefix, clustering=clustering)
saveRDS(proj, proj_pth)
}
return(proj)
}
createProj <- function(root, genome_id, names, clusters=NA) {
proj_pth <- "r_objects/All_proj.rds"
if (file.exists(proj_pth)) {
proj <- readRDS(proj_pth)
} else {
g <- getGenomeInfo(genome_id)
ArrowFiles <- paste0("Arrow/",names,".arrow")
print(ArrowFiles)
proj <- ArchRProject(
ArrowFiles = ArrowFiles, geneAnnotation = g$geneAnno,
genomeAnnotation = g$genomeAnno, outputDirectory = "ArchRProject"
)
#proj <- filterDoublets(proj)
proj <- addClustering(proj, "All",clustering=clusters)
proj <- addGroupCoverages(ArchRProj = proj, groupBy = "Sample")#,threads=1)
getGroupBW(proj,groupBy="Sample",threads=1)
saveRDS(proj, proj_pth)
}
return(proj)
}
addClustering <- function(proj, prefix, clustering) {
tmp <- strsplit(clustering,"_")[[1]]
dims <- c(as.numeric(tmp[2]))
res_opt <- c(as.numeric(tmp[3])/1000)
print(res_opt)
print(dims)
for (d in dims) {
nm <- paste0(prefix,"_",d)
if (!(paste0(nm,"_LSI_UMAP") %in% names(proj@embeddings))) {
proj <- addIterativeLSI(proj,name=paste0(nm,"_LSI"),dimsToUse=1:d)
proj <- addUMAP(proj,reducedDims=paste0(nm,"_LSI"),name=paste0(nm,"_LSI_UMAP"))
}
for (res in res_opt) {
nm2 <- paste0(nm,"_",res*1000)
nm3 <- paste0(nm2,"_LSI")
if (!(nm3 %in% colnames(proj@cellColData))) { #is.na(clustering)) {
print(paste0("clustering ",nm2))
proj <- addClusters(proj,reducedDims=paste0(nm,"_LSI"),name=paste0(nm2,"_LSI"),resolution=res)
}
}
}
return(proj)
}
addMatrices <- function(proj, dr, clustering) {
proj_pth <- paste0("r_objects/", clustering, "_proj.rds")
if (file.exists(proj_pth)) {
proj <- readRDS(proj_pth)
} else {
getGroupBW(proj,groupBy=clustering)
proj <- addImputeWeights(proj, reducedDims=dr)
proj <- addGroupCoverages(proj, groupBy=clustering)#,threads=1)
proj <- addReproduciblePeakSet(proj, groupBy=clustering)
proj <- addPeakMatrix(proj)
proj <- addMotifAnnotations(proj,force=TRUE)
proj <- addDeviationsMatrix(proj,force=TRUE)
proj <- addCoAccessibility(ArchRProj=proj,reducedDims=dr,maxDist=500000)
saveRDS(proj,proj_pth)
}
return(proj)
}
initProj <- function(input_files, genome_id) {
g <- getGenomeInfo(genome_id)
print(input_files$files)
print(input_files$names)
ArrowFiles <- createArrowFiles(
inputFiles = input_files$files,
sampleNames = input_files$names,
geneAnno = g$geneAnno,
genomeAnno = g$genomeAnno,
maxFrags = 1000000
)
doubScores <- addDoubletScores(ArrowFiles)
}
addArchRThreads(threads = 1)
args = commandArgs(trailingOnly=TRUE)
source(paste0("scripts/",args[1],".R"))
addArchRThreads(threads = 1)
print(root)
if (!dir.exists(root)) {
dir.create(root)
}
if (!dir.exists(paste0(root,"Arrow"))) {
print("creating arrow files...")
dir.create(paste0(root,"Arrow"))
wd <- paste0(root, "Arrow")
print(wd)
setwd(wd)
initProj(input_files,genome_id)
setwd("..")
} else {
setwd(root)
}
if (!dir.exists("r_objects")) { dir.create("r_objects") }
main_proj <- createProj(root, genome_id, input_files$names, clusters=main_clusters)
print(colnames(main_proj@cellColData))
if (!is.na(main_clusters)) {
proj_dir <- paste0(root,"ArchRProject/")
main_proj <- addMatrices(main_proj, main_dr, main_clusters)
}
if (!is.na(sub_prefix)) {
sub_proj <- subsetProj(main_proj, main_clusters, sub_sel, sub_prefix, sub_clusters)
if (!is.na(sub_clusters)) {
proj_dir <- paste0(root,"ArchRSubset_",sub_prefix,"/")
sub_proj <- addMatrices(sub_proj, sub_dr, sub_clusters)
}
}
|
4d24dd6cc36cdfa7d4485d8ffcdf59e0be78b5a5
|
43590cd9ec1bfa7d9b5fa1b066c0b17c7811de06
|
/man/getACHO.Rd
|
927014be18041a062ed3921a5eb3b440b7496f5c
|
[] |
no_license
|
Tr1n0m/acho
|
bc713f145e5c74ded22b9d04449ccef07b7916f0
|
8359b2ecd28019776afe1052f9990fbe040e0431
|
refs/heads/main
| 2022-12-25T20:07:44.898144
| 2020-10-01T02:16:04
| 2020-10-01T02:16:04
| 300,114,680
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 832
|
rd
|
getACHO.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAcho.R
\name{getACHO}
\alias{getACHO}
\title{The base ACHO function}
\usage{
getACHO(data1, data2, acho_plot = FALSE)
}
\arguments{
\item{data1}{first dataset (1D/2D).}
\item{data2}{second dataset (1D/2D).}
\item{acho_plot}{Determines if the convex hull overlap behaviour plot is shown. Default TRUE.}
}
\value{
The "ACHO" value of the datasets.
}
\description{
This function calculates the "Averaged Convex Hull Overlap" for the input and can also show a plot for the behaviour of the reducing overlaps.
}
\examples{
n <- 1000
x1 <- rnorm(n, 0, 1)
x2 <- rnorm(n, 1, 1.5)
plot(density(x1), main="", xlab="", ylab="", type="n", bty="n")
lines(density(x1), col="blue")
lines(density(x2), col="red")
getACHO(x1, x2)
getACHO(x1, x2, acho_plot=TRUE)
}
|
7a27279d9dc4e072ade56b7d80a9e8ca7019c4f1
|
e7118b8251f67f440b8ba4634ba6c40440dd43e8
|
/instagram_get.R
|
d89aee043fadd3a4d053cf82f513d9505856b58f
|
[] |
no_license
|
furukama/instagram
|
f27c3de6471ccbec5d445ae45e358526263cccc7
|
bfb8147a9d3f49c41f65a5f82bcf2a200e559906
|
refs/heads/master
| 2021-01-19T14:10:07.359934
| 2014-01-02T11:39:49
| 2014-01-02T11:39:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,322
|
r
|
instagram_get.R
|
#-------------------------------------------------------------------
# Downloading Instagram pictures for a hashtag or location
# Benedikt Koehler, 2013
# @furukama
#-------------------------------------------------------------------
library(RCurl)
library(RJSONIO)
# Login credentials
token <- "" # API key for Instagram API goes here
# Search data: either hashtag or location
hashtag <- "winter"
lat <- "48.14384"
lng <- "11.578259"
# Search parameters
p <- 10
limit <- as.numeric(Sys.time())
today <- format(Sys.time(), "%Y-%m-%d")
imgdir <- paste(getwd(), "/", today, "-", hashtag, sep="")
dir.create(imgdir)
if (hashtag != "") {
api <- paste("https://api.instagram.com/v1/tags/", hashtag, "/media/recent?access_token=", token, sep="")
} else if (lat != "") {
api <- paste("https://api.instagram.com/v1/media/search?lat=", lat, "&lng=", lng, "&access_token=", token, "&max_timestamp=", limit, sep="")
}
for (i in 1:p) {
print(api)
raw_data <- getURL(api, ssl.verifypeer = FALSE)
data <- fromJSON(raw_data)
url <- lapply(data$data, function(x) c(x$images$thumbnail$url))
api <- data$pagination["next_url"]
for (u in 1:length(url)) {
id <- data$data[u][[1]]$id
temp <- paste(imgdir, "/", id, ".jpg", sep="")
download.file(url[[u]], temp, mode="wb")
}
}
|
3c219a5e172c4160b9fb9aff350698988e3b739c
|
a65a5ed2eef4df4551df848938bc375198e2054c
|
/wnt_pathway/align_network.R
|
db748755f3d71c2de37d3fb1884b309dcea79999
|
[] |
no_license
|
wenrurumon/directed_network
|
b7e103f71c14e9f2c253f02547428dd871c1f733
|
1ea8489bb8587be2cf007964abfe68fc469289f4
|
refs/heads/master
| 2020-07-16T20:03:46.237202
| 2019-11-27T09:31:41
| 2019-11-27T09:31:41
| 73,941,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 417
|
r
|
align_network.R
|
ref <- read.table('clipboard',header=T)
g.ref <- graph_from_data_frame(ref)
from <- sem[1,1]
to <- sem[1,2]
valii <- function(from,to,g.ref){
ifrom <- which(names(V(g.ref))==from)
ito <- which(names(V(g.ref))==to)
shortest_paths(g.ref,V(g.ref)[ifrom],V(g.ref)[ito])
}
test <- lapply(1:nrow(sem),function(i){
from <- sem[i,1]
to <- sem[i,2]
valii(from,to,g.ref)$vpath[[1]]
})
mean(sapply(test,length)>0)
|
a8a757fdf2ee26587058096ff18b5eb1ee91de4b
|
5e9de5406a07f31bac45f88dd7335b37051cb780
|
/scripts/paper1/drought experiment models.R
|
c2275b04c498a2c959758b1a33451e431df80074
|
[] |
no_license
|
alanaroseo/fogdata
|
1887323215c6a5ccaced85279299ee4e560aea54
|
3d961adab6a1d2da2983f599a8964ebf9b132059
|
refs/heads/master
| 2021-07-02T21:19:29.623811
| 2020-08-25T02:04:08
| 2020-08-25T02:04:08
| 144,756,073
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,221
|
r
|
drought experiment models.R
|
#write.csv(data.frame(summary(mod)$coefficients), file="model_table.csv")
library("stargazer")
library(gridExtra)
library(grid)
library(investr)
library(AICcmodavg)
library(nlme)
library(lme4)
library(lmerTest)
library(lattice)
#coefficients(mod) # model coefficients
#confint(mod, level=0.95) # CIs for model parameters
#modted(mod) # predicted values
#residuals(mod) # residuals
#anova(mod) # anova table
#vcov(mod) # covariance matrix for model parameters
#influence(mod) # regression diagnostics
# K-fold cross-validation:
library(DAAG)
#cv.lm(df=mydata, mod, m=3) # 3 fold cross-validation
#####
#TTv_per_m2
mod1 <- lm(TTv_per_m2 ~ ind_pred_gs, TT)
summary(mod1)
AICc(mod1)
mod6 <- lm(TTv_per_m2 ~ height, TT)
summary(mod6)
AICc(mod6)
anova(mod1,mod6)
mod7 <- lm(TTv_per_m2 ~ height, TTp)
summary(mod7)
AICc(mod7)
mod2 <- lm(TTv_per_m2 ~ ind_pred_gs+position, TT)
summary(mod2)
AICc(mod2)
lmm1 <- lmer(TTv_per_m2 ~ ind_pred_gs+(1|position), TT)
summary(lmm1)
AICc(lmm1)
lmm2 <- lmer(TTv_per_m2 ~ ind_pred_gs+(1|position), TTp)
summary(lmm2)
AICc(lmm2)
########################################################################
lmm9 <- lmer(TTv_per_m2 ~ ind_pred_gs+height+(1|position), TTp)#best, lowest AICc and most likely
summary(lmm9)
AICc(lmm9)
summary(lmm9)$sigma^2
summary(lmm9)$varcor$position[1]
#####################################################################
cv.lm(data=TTp, mod3, m=4)
anova(lmm9,lmm2)
lmm10 <- lmer(TTv_per_m2 ~ height+(1|position), TTp)
summary(lmm10)
AICc(lmm10)
lmm6 <- lmer(TTv_per_m2 ~ ind_pred_gs+(1|tree), TTp)
summary(lmm6)
AICc(lmm6)
anova(lmm2,lmm6)
lmm5 <- lmer(TTv_per_m2 ~ ind_pred_gs+height+(1|position), TTp)
summary(lmm5)
AICc(lmm5)
anova(lmm2,lmm5)
#######
#mol released per m2
mod3 <- lm(mol_rel.m2 ~ ind_pred_gs, TTp)
summary(mod3)
AICc(mod3)
summary(mod3)$sigma^2
lmm3 <- lmer(mol_rel.m2 ~ ind_pred_gs+(1|position), TT)
summary(lmm3)
AICc(lmm3)
lmm8 <- lmer(mol_rel.m2 ~ ind_pred_gs+(1|position), TTp)
summary(lmm8)
AICc(lmm8)
summary(lmm8)$sigma^2
summary(lmm8)$varcor$position[1]
anova(lmm8,lmm7)
lmm4 <- lmer(mol_rel.m2 ~ ind_pred_gs+(1|tree:position), TT)#best model structure with all data
summary(lmm4)
AICc(lmm4)
summary(lmm4)$sigma^2
summary(lmm4)$varcor$tree[1]
#############################################################################
lmm7 <- lmer(mol_rel.m2 ~ ind_pred_gs+(1|tree:position), TTp)#even better with paired dataset (varying intercepts only) top model for mol_rel.m2 with linear fit
summary(lmm7)
AICc(lmm7)
summary(lmm7)$sigma^2#residual variance should be low
summary(lmm7)$varcor$tree[1]#how var much is explained by tree
############################################################################
#y~(-c*x^2)+b*x-a polynominal curve
polymod1 <- nls((mol_rel.m2 ~ I((-c*height^2))+(b*height)-a),start = #this is the very best model for mol_rel.m2 based on AICc with either data set but best with full
list(a=1, b=1, c=.001), TT)
summary(polymod1)
AICc(polymod1)
summary(polymod1)$sigma^2
summary(polymod1)$varcor$position[1]
##########################################################################################
polymod2 <- nls((mol_rel.m2 ~ I((-c*ind_pred_gs^2))+(b*ind_pred_gs)-a),start = #this is also quite good, but not as good
list(a=1, b=1, c=.001), TT)
summary(polymod2)
AICc(polymod2)
summary(polymod2)$sigma^2
summary(polymod2)$varcor$position[1]
lmm10 <- lmer(mol_rel.m2 ~ ind_pred_gs+(1+tree|position), TTp)#more likely but higher AIC
summary(lmm10)
AICc(lmm10)
summary(lmm10)$sigma^2
summary(lmm10)$varcor$position[1]
anova(lmm7,lmm11)
mod4 <- lm(mol_rel.m2 ~ height, TT)
summary(mod4)
AICc(mod4)
mod5 <- lm(mol_rel.m2 ~ height, TTp)#neither related to height but paired data even less so
summary(mod5)
AICc(mod5)
lmm12 <- lmer(mol_rel.m2 ~ height+(1|tree:position), TTp)#decent fit but high AICc
summary(lmm12)
AICc(lmm12)
lmm14 <- lmer(mol_rel.m2 ~ ind_pred_gs+height+(1|tree:position), TTp)#the most var explained for mol rel per m2, looks like the best with AIC, but AICc is higher, BIC however is a tiny bit lower but by less than 2
summary(lmm14)
AICc(lmm14)
summary(lmm14)$sigma^2
summary(lmm14)$varcor$tree[1]
anova(lmm14, lmm7)
######################
#seconds open
mod8 <- lm(ind_sec_open ~ height, TTp)#best
summary(mod8)
AICc(mod8)
summary(mod8)$sigma^2
mod9 <- lm(ind_sec_open ~ TSF, TTp)
summary(mod9)
AICc(mod9)
summary(mod9)$sigma^2
lmm15 <- lmer(ind_sec_open ~ height+(1|tree:position), TTp)
AICc(lmm15)
summary(lmm15)$sigma^2
summary(lmm15)$varcor$tree[1]
##################
#volume released per xylem volume
mod10 <- lm(vol_rel_per_xy_vol ~ height, TTp)
summary(mod10)
AICc(mod10)
summary(mod10)$sigma^2
lmm16 <- lmer(vol_rel_per_xy_vol ~ height+(1|tree:position), TTp)#nice
summary(lmm16)
AICc(lmm16)
summary(lmm16)$sigma^2
summary(lmm16)$varcor$tree[1]
lmm17 <- lmer(vol_rel_per_xy_vol ~ height+(0+height|tree), TTp)#better
summary(lmm17)
AICc(lmm17)
summary(lmm17)$sigma^2
summary(lmm17)$varcor$tree[1]
##################
mod11 <- lm(vol_rel_per_xy_vol ~ tree, TTp)#best model is tree alone, effect of height is small
summary(mod11)
AICc(mod11)
summary(mod11)$sigma^2
###############
mod12 <- lm(vol_rel_per_xy_vol ~ tree+(1|position), TTp)#very close
summary(mod12)
AICc(mod12)
summary(mod12)$sigma^2
####################################################
#vol_rel
mod13 <- lm(vol_rel ~ xy_vol , TTp)#positive relationship to xy_vol, xy_vol is better related to height, but not by much
summary(mod13)
AICc(mod13)
summary(mod13)$sigma^2
mod14 <- lm(vol_rel ~ height , TTp)#better (negative) relationship to height
summary(mod14)
AICc(mod14)
summary(mod14)$sigma^2
###################################################################
mod15 <- lm(vol_rel ~ xy_vol+height , TTp)#best
summary(mod15)
AICc(mod15)
summary(mod15)$sigma^2
###################################################################
mod14 <- lm(xy_vol ~ height , TTp)#better (negative) relationship to height
summary(mod14)
AICc(mod14)
summary(mod14)$sigma^2
lmm18 <- lmer(vol_rel ~ xy_vol +height+(1|tree), TTp)#positive relationship to xy_vol, xy_vol is better related to height, but not by much
summary(lmm18)
AICc(lmm18)
summary(lmm18)$sigma^2
|
01cb62de8aea90dd1aac97581696243bd84a3118
|
eae783ecfcdeb9a969ec6b2087ecb1772f682f38
|
/UserInterface_InputData/ui.R
|
76f253f265cb919da8d641b253f643524ad80e22
|
[] |
no_license
|
Assemi/MAGMA
|
2963fe28190318176180c4855e23566db99e4894
|
35e50cf8e9cb57f4524fcac68fb908a43fe85955
|
refs/heads/master
| 2023-03-18T18:16:44.653048
| 2020-01-14T17:48:09
| 2020-01-14T17:48:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,826
|
r
|
ui.R
|
library(shiny)
shinyUI(
fluidPage(
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Heading Text ----
# Create and display heading text
fluidRow( column( 8, headerPanel('MAGMA Input CSV Creator'), offset = 2 )
),
fluidRow( column(4, h5("Example HTML Report:", a("example_html", href="https://github.com/NREL/MAGMA/blob/master/Examples/RTS-2016/reports/HTML_output.html")) ),
column(4, h3("Sections to Run:", position = 'center') )
),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Sections to Run Checkboxes ----
# Creates the 3 parts of the input object "sectionsToRun1, sectionsToRun2, sectionsToRun3" which tell the rest of the interface what input optinos to show.
fluidRow(
column(4, checkboxGroupInput('sectionsToRun1', "", sectionList[1:ceiling(length(sectionList)/3)]) ),
column(4, checkboxGroupInput('sectionsToRun2', '', sectionList[(ceiling(length(sectionList)/3)+1):(ceiling(length(sectionList)/3)+ceiling(length(sectionList)/3))]) ),
column(4, checkboxGroupInput('sectionsToRun3', "", sectionList[(ceiling(length(sectionList)/3)+ceiling(length(sectionList)/3)+1):length(sectionList)]) )
),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# PLEXOS Gen Category Mapping ----
# This whole section is the list that allows you to reassign generation type from PLEXOS categories to whatever type is desired.
# It is only displayed if assigning gen type by plexos category, AND reassign plexos gen category is selected.
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory1',
label='PLEXOS Category:') ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType1',
label='Generation Type:') ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory2', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType2', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory3', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType3', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory4', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType4', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory5', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType5', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory6', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType6', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory7', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType7', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory8', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType8', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory9', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType9', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory10', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType10', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory11', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType11', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory12', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType12', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory13', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType13', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory14', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType14', label=NULL) ) ) ),
fluidRow( column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('plexosCategory15', label=NULL) ) ),
column(4, conditionalPanel( condition = "input.reassignPlexosGenTypes == 1 && input.genTypeMapping == 2 && output.genType", textInput('genType15', label=NULL) ) ) ),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Order and Color of Generation for Stacks ----
# This section sets the order of generation type for the dispatch stacks as well as the plot color for the stacks.
# It also asks if each generation type should be considered for curtailment calculations, and if selected, asks if each type should be shown in the DA-RT plots.
# Only show genStackPlot if the sections that produce a generation dispatch stack are selected.
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", h4('Generation Stack Order (Top to Bottom):'))),
column(3, conditionalPanel( condition = "output.genStackPlot", h4('Generation Type Plot Color:'))),
column(3, conditionalPanel( condition = "output.genStackPlot", h4('Consider as RE for curtailment calculation?'))),
column(3, conditionalPanel( condition = "output.DA_RT", h4('Show in DA-RT committment/dispatch plots?'))) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder1', label=NULL, value='Curtailment') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor1', choices=plotColors, label=NULL, selected='red') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType1', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type1', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder2', label=NULL, value='PV') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor2', choices=plotColors, label=NULL, selected='goldenrod1') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType2', label=NULL, value=TRUE) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type2', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder3', label=NULL, value='CSP') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor3', choices=plotColors, label=NULL, selected='darkorange2') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType3', label=NULL, value=TRUE) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type3', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder4', label=NULL, value='Wind') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor4', choices=plotColors, label=NULL, selected='steelblue3') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType4', label=NULL, value=TRUE) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type4', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder5', label=NULL, value='Storage') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor5', choices=plotColors, label=NULL, selected='gray45') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType5', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type5', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder6', label=NULL, value='Other') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor6', choices=plotColors, label=NULL, selected='mediumpurple3') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType6', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type6', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder7', label=NULL, value='Geothermal') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor7', choices=plotColors, label=NULL, selected='khaki1') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType7', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type7', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder8', label=NULL, value='Gas CT') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor8', choices=plotColors, label=NULL, selected='lightpink') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType8', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type8', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder9', label=NULL, value='Gas CC') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor9', choices=plotColors, label=NULL, selected='darkolivegreen4') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType9', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type9', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder10', label=NULL, value='Hydro') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor10', choices=plotColors, label=NULL, selected='lightblue') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType10', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type10', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder11', label=NULL, value='Coal') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor11', choices=plotColors, label=NULL, selected='gray20') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType11', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type11', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder12', label=NULL, value='Nuclear') ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor12', choices=plotColors, label=NULL, selected='firebrick') ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType12', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type12', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder13', label=NULL) ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor13', choices=plotColors, label=NULL) ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType13', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type13', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder14', label=NULL) ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor14', choices=plotColors, label=NULL) ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType14', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type14', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder15', label=NULL) ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor15', choices=plotColors, label=NULL) ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType15', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type15', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder16', label=NULL) ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor16', choices=plotColors, label=NULL) ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType16', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type6', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder17', label=NULL) ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor17', choices=plotColors, label=NULL) ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType17', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type17', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder18', label=NULL) ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor18', choices=plotColors, label=NULL) ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType18', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type18', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder19', label=NULL) ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor19', choices=plotColors, label=NULL) ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType19', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type19', label=NULL) ), offset=2 ) ),
fluidRow( column(3, conditionalPanel( condition = "output.genStackPlot", textInput('genOrder20', label=NULL) ) ),
column(3, conditionalPanel( condition = "output.genStackPlot", selectInput('genTypeColor20', choices=plotColors, label=NULL) ) ),
column(1, conditionalPanel( condition = "output.genStackPlot", checkboxInput('reType20', label=NULL) ), offset=1 ),
column(1, conditionalPanel( condition = "output.DA_RT", checkboxInput('DA_RT_Type20', label=NULL) ), offset=2 ) ),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Key Period Sections ----
# If any sections involving key periods are selected, this list allows input of key period names and start/end times.
fluidRow( column(3, conditionalPanel( condition = "output.keyPeriodPlots", h4('Key Period Name:')) ),
column(5, conditionalPanel( condition = "output.keyPeriodPlots", h4('Period Date Range:')) ) ),
fluidRow( column(3, conditionalPanel( condition = "output.keyPeriodPlots", textInput('keyPeriodName1', label=NULL, value='Winter Week')) ),
column(5, conditionalPanel( condition = "output.keyPeriodPlots", dateRangeInput('keyPeriodRange1', label=NULL, startview='decade', start='2030-01-01', end='2030-01-07')) ) ),
fluidRow( column(3, conditionalPanel( condition = "output.keyPeriodPlots", textInput('keyPeriodName2', label=NULL, value='Spring Week')) ),
column(5, conditionalPanel( condition = "output.keyPeriodPlots", dateRangeInput('keyPeriodRange2', label=NULL, startview='decade', start='2030-04-01', end='2030-04-07')) ) ),
fluidRow( column(3, conditionalPanel( condition = "output.keyPeriodPlots", textInput('keyPeriodName3', label=NULL, value='Summer Week')) ),
column(5, conditionalPanel( condition = "output.keyPeriodPlots", dateRangeInput('keyPeriodRange3', label=NULL, startview='decade', start='2030-07-01', end='2030-07-07')) ) ),
fluidRow( column(3, conditionalPanel( condition = "output.keyPeriodPlots", textInput('keyPeriodName4', label=NULL, value='Fall Week')) ),
column(5, conditionalPanel( condition = "output.keyPeriodPlots", dateRangeInput('keyPeriodRange4', label=NULL, startview='decade', start='2030-10-01', end='2030-10-07')) ) ),
fluidRow( column(3, conditionalPanel( condition = "output.keyPeriodPlots", textInput('keyPeriodName5', label=NULL)) ),
column(5, conditionalPanel( condition = "output.keyPeriodPlots", dateRangeInput('keyPeriodRange5', label=NULL, startview='decade')) ) ),
fluidRow( column(3, conditionalPanel( condition = "output.keyPeriodPlots", textInput('keyPeriodName6', label=NULL)) ),
column(5, conditionalPanel( condition = "output.keyPeriodPlots", dateRangeInput('keyPeriodRange6', label=NULL, startview='decade')) ) ),
fluidRow( column(3, conditionalPanel( condition = "output.keyPeriodPlots", textInput('keyPeriodName7', label=NULL)) ),
column(5, conditionalPanel( condition = "output.keyPeriodPlots", dateRangeInput('keyPeriodRange7', label=NULL, startview='decade')) ) ),
fluidRow( column(3, conditionalPanel( condition = "output.keyPeriodPlots", textInput('keyPeriodName8', label=NULL)) ),
column(5, conditionalPanel( condition = "output.keyPeriodPlots", dateRangeInput('keyPeriodRange8', label=NULL, startview='decade')) ) ),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Exclude regions or zones from plots ----
# If generation stacks are going to be created, these check boxes allow the user to select regions or zones to be exluded from the plots.
fluidRow( column(4, conditionalPanel( condition = "output.genStackPlot", checkboxInput('ignoreZones', label="Exclude any zones from generation stack plots?")) ),
column(4, conditionalPanel( condition = "output.genStackPlot", checkboxInput('ignoreRegions', label="Exclude any regions from generation stack plots?")) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreZones == 1", h5('List zone names to exclude:')) ),
column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreRegions == 1", h5('List region names to exclude:')) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreZones == 1", textInput('ignoreZone1', label=NULL)) ),
column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreRegions == 1", textInput('ignoreRegion1', label=NULL)) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreZones == 1", textInput('ignoreZone2', label=NULL)) ),
column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreRegions == 1", textInput('ignoreRegion2', label=NULL)) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreZones == 1", textInput('ignoreZone3', label=NULL)) ),
column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreRegions == 1", textInput('ignoreRegion3', label=NULL)) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreZones == 1", textInput('ignoreZone4', label=NULL)) ),
column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreRegions == 1", textInput('ignoreRegion4', label=NULL)) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreZones == 1", textInput('ignoreZone5', label=NULL)) ),
column(4, conditionalPanel( condition = "output.genStackPlot && input.ignoreRegions == 1", textInput('ignoreRegion5', label=NULL)) ) ),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Interfaces to query ----
# If sections involving interface data are selected, this list allows the user to input the interface names they want data for.
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface1', label='Interfaces to create results for:')) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface2', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface3', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface4', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface5', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface6', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface7', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface8', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface9', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.interfacePlots", textInput('interface10', label=NULL)) ) ),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Lines to query ----
# If sections involving line data are selected, this list allows the user to input the line names they want data for.
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line1', label='Lines to create results for:')) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line2', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line3', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line4', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line5', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line6', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line7', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line8', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line9', label=NULL)) ) ),
fluidRow( column(5, conditionalPanel( condition = "output.linePlots", textInput('line10', label=NULL)) ) ),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Generation to consider curtailment ----
# If sections involving curtailment are selected but none that produce generation stacks, this list asks what type of generation should be considered as curtailment.
fluidRow( column(4, conditionalPanel( condition = "output.curtailmentCalcs && output.genStackPlot == false",
textInput('reType1', label='Renewable types for curtailment calculations:', value='PV')) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.curtailmentCalcs && output.genStackPlot == false", textInput('reType2', label=NULL, value='CSP')) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.curtailmentCalcs && output.genStackPlot == false", textInput('reType3', label=NULL, value='Wind')) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.curtailmentCalcs && output.genStackPlot == false", textInput('reType4', label=NULL)) ) ),
fluidRow( column(4, conditionalPanel( condition = "output.curtailmentCalcs && output.genStackPlot == false", textInput('reType5', label=NULL)) ) ),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Reference Scenario ----
# Option to enter reference scenario name
fluidRow( column(3, checkboxInput('referenceBox', label = 'Comparing Multiple Scenarios?' ) ),
column(3, conditionalPanel( condition = "input.referenceBox == 1", textInput('referenceName', label = 'Reference Scenario Name' ) ) ),
column(3, conditionalPanel( condition = "input.referenceBox == 1", numericInput('numScenarios', label = 'Number of Scenarios', value=1) ) )
),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Scenario Name(s) ----
# Names for scenarios in HTML Report
fluidRow( column(2, textInput('scenario1', label = 'Scenario Name' ) ),
column(2, conditionalPanel( condition = "input.numScenarios > 1", textInput('scenario2', label = "Scenario 2 Name" ) ) ),
column(2, conditionalPanel( condition = "input.numScenarios > 2", textInput('scenario3', label = "Scenario 3 Name" ) ) ),
column(2, conditionalPanel( condition = "input.numScenarios > 3", textInput('scenario4', label = "Scenario 4 Name" ) ) ),
column(2, conditionalPanel( condition = "input.numScenarios > 4", textInput('scenario5', label = "Scenario 5 Name" ) ) ),
column(2, conditionalPanel( condition = "input.numScenarios > 5", textInput('scenario6', label = "Scenario 6 Name" ) ) )
),
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create CSV file button ----
# Button to create the input CSV file.
fluidRow( column(4, textInput('csvLocation', label='Directory to save input CSV:') ) ),
fluidRow( column(4, actionButton('createCSV', label='Create input CSV file.') ) )
))
|
ed528d24c3107a7651acfd2b0401853794e1e2f9
|
acb0fffc554ae76533ba600f04e4628315b1cd95
|
/R/pCO2_computation_script.R
|
9af23f896b7044b8127272e8361ada78d6642b08
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
lukeloken/USBRDelta
|
83826e12a5b5a2e81adeb2119e9c2599a5f8b870
|
fd6569385776d4579748b6422b5153e64606e0ba
|
refs/heads/master
| 2021-06-09T19:08:01.976985
| 2020-05-28T21:51:10
| 2020-05-28T21:51:10
| 145,152,807
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,852
|
r
|
pCO2_computation_script.R
|
## Functions and script to compute pCO2 concentration from headspace gas samples
#Adapted by A. Smits from Matlab code
# Adapted by S. Sadro from R code
# created by J. Coloso based on what J. Cole used at the Carry Institute
# and an excel program created by S. Hamilton 2003
# computations assume negligible salinity of water samples
#Calculations for determining concentration (in umoles/L) of gas in the
#original liquid (i.e. the lake) of a headspace equilibration
#Need to have the gas concentration of the headspace in ppm
#Need to have the liquid volume in the sampling syringe (WaterVol)
#Need to have the headspace volume (HSVol) (in L) for each equilibrium
#Note: the default source gas ppm (sgmixing) is set to compute in air (must be measured).
#Set value to zero if the gas in the headspace is pure (e.g. N2 or He).
#NOTE: this returns the concentration of the gas in the lake water by
#default.
#References for solubility:
#Yamamoto, S., J.B. Alcauskas, and T.E. Crozier. 1976. Solubility of methane in distilled water and seawater. J. Chem. Eng. Data 21: 78-80.
#Weiss, R.F. 1970. The solubility of nitrogen, oxygen and argon in water and seawater. Deep-Sea Res. 17: 721-735.
#Weiss, R.F. 1974. Carbon dioxide in water and seawater: The solubility of a non-ideal gas. Mar. Chem. 2: 203-215.
#Benson, B.B. and D. Krause, Jr. 1984. The concentration and isotopic fractionation of gases dissolved in freshwater in equilibrium with the atmosphere. 1. Oxygen. Limnol. Oceanogr. 25: 662-671.
#Weiss, R.F. and B.A. Price. 1980. Nitrous oxide solubility in water and seawater. Mar. Chem. 8: 347-359
#Validated by S. Sadro 12/2/2013
#Last updated 10/23/2017 by A. Smits
# List of variables to be input
#Organize variables in an excel file in the order they are shown below for loading:
#SampleCode = unique identification code
#WaterVol = volume of water used in mixing (L)
#HSVol = volume of air or gas used in mixing (L)
#temp = temperature at which equilibrium performed(C)
#Note: for samples taken at depth, temp can either be surface water temp, temp at depth, or some weighted average of the two
#bp = barometric pressure during equilibrium, assumed to be atmospheric (atm)
#sgmixing = source gas mixing ratios (ppmv, of CO2 in source gas; computed from GC or IRGA data as air sample)
#HSmixing = final mixing ratio (ppmv, of CO2 at equilibrium in source gas; computed from GC or IRGA data as water sample)
##
# clear all
##Load pCO2 data:
data <-read.csv(file="02_pCO2_data_cleaned_2017_2018.csv",header=TRUE)
names(data)
Water_Temp <- data$Water_Temp
SGmixing <- data$SGmixing
HSmixing <- data$HSmixing
BP_atm <- data$BP_atm
WaterVol <- data$WaterVol
HSVol <- data$HSVol
AirT <- 20 #Assumed air temperature in headspace
##Calculate CO2 concentration
tempK = Water_Temp +273.15
#temp at which equilibrium carried out in K
tempairK = AirT + 273.15
co2bunsen = (exp(-58.0931+(90.5069*(100/tempK))+(22.294*log(tempK/100))))*((0.0821*tempK)+((-1636.75+(12.0408*tempK)-(3.27957*0.01*tempK*tempK)+(3.16528*0.00001*tempK*tempK*tempK))/1000))
#Bunsen solubility coefficients for headspace equilibration (L/L*atm)
sourcegas = SGmixing/(0.0821*tempairK)
#source gas conc. umol/L, 0.0821 is R, the ideal gas constant L*atm/T*mol (T is temp in K)
finalHSconc = HSmixing/(0.0821*tempairK)
#final headspace conc (umol/L)
finalWaterconc = HSmixing*co2bunsen*BP_atm*(1/(0.0821*tempK))
#final concentration in water used for equilibrium(umol/L)
totalgas = (finalHSconc*HSVol)+(finalWaterconc*WaterVol)
#total gas in system (umoles)
CO2uM = (totalgas-(sourcegas*HSVol))/WaterVol
#concentration of gas in lake water (umol/L)
# Calculate the solubility potential for CO2 at field pressure and temp
# based on Henry's law adjusted for temp, field pressure, and atmospheric
# concentration of gas
#KH_t= Henry's law adjusted for temp Units= mol/L*atm
KH_t=0.034*exp(2400*((1/tempK)-1/(298.15)))
#Saturation concentration of CO2 at ambient temp and pressure
#units: umol/L
CO2sat= SGmixing* BP_atm * KH_t
#Departure from saturation of CO2 as uM
CO2dep=CO2uM - CO2sat
#Saturation concentration of CO2 represented as a percent
CO2sat_pct= CO2dep/ CO2sat*100
#Express CO2 concentration in alternate units
pCO2uatm=tempK*0.082057*CO2uM
pCO2ppmv=pCO2uatm/BP_atm
##Export csv with pCO2 values in different units:
pCO2.export <- data.frame(Lake_Name=data$Lake_Name,Coll_Date=data$Coll_Date, Time=data$Time, Depth_m=data$Depth_m, CO2uM=CO2uM, CO2sat_pct=CO2sat_pct, pCO2uatm=pCO2uatm, pCO2ppmv=pCO2ppmv)
write.csv(pCO2.export, file='03_computed_pCO2_2017_2018.csv')
|
be06ec49782181b79b9cced1f6d67df8b1a48a84
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/metafor/examples/dat.nielweise2008.Rd.R
|
a2f3d59375b810310f5654a8b86425b8b1f63de5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 723
|
r
|
dat.nielweise2008.Rd.R
|
library(metafor)
### Name: dat.nielweise2008
### Title: Studies on Anti-Infective-Treated Central Venous Catheters for
### Prevention of Catheter-Related Bloodstream Infections
### Aliases: dat.nielweise2008
### Keywords: datasets
### ** Examples
### load data
dat <- get(data(dat.nielweise2008))
### standard (inverse-variance) random-effects model
res <- rma(measure="IRR", x1i=x1i, t1i=t1i, x2i=x2i, t2i=t2i, data=dat)
print(res, digits=3)
predict(res, transf=exp, digits=2)
### random-effects conditional Poisson model
## Not run:
##D res <- rma.glmm(measure="IRR", x1i=x1i, t1i=t1i, x2i=x2i, t2i=t2i, data=dat, model="CM.EL")
##D print(res, digits=3)
##D predict(res, transf=exp, digits=2)
## End(Not run)
|
5ad58e24237f56d6481cae90ee3074d63a7cc440
|
f9762620931c83c67a5d82a4245b6da355e909a3
|
/R/BiocNeighborParam-class.R
|
6a4c6ac1121594c6a374ed1e733c7c849767991f
|
[] |
no_license
|
LTLA/BiocNeighbors
|
0a5bbdb50a7c6283c04677bbdd79d0b6a8940fba
|
8a58137641e9413393553f8f03f0ac3207ba738f
|
refs/heads/master
| 2022-11-24T12:17:57.745775
| 2022-11-08T19:21:31
| 2022-11-08T19:21:31
| 137,922,813
| 5
| 10
| null | 2020-11-14T06:55:51
| 2018-06-19T17:19:14
|
R
|
UTF-8
|
R
| false
| false
| 2,887
|
r
|
BiocNeighborParam-class.R
|
#' The BiocNeighborParam class
#'
#' A virtual class for specifying the type of nearest-neighbor search algorithm and associated parameters.
#'
#' @details
#' The BiocNeighborParam class is a virtual base class on which other parameter objects are built.
#' There are currently 4 concrete subclasses:
#' \describe{
#' \item{}{\code{\link{KmknnParam}}: exact nearest-neighbor search with the KMKNN algorithm.}
#' \item{}{\code{\link{VptreeParam}}: exact nearest-neighbor search with the VP tree algorithm.}
#' \item{}{\code{\link{AnnoyParam}}: approximate nearest-neighbor search with the Annoy algorithm.}
#' \item{}{\code{\link{HnswParam}}: approximate nearest-neighbor search with the HNSW algorithm.}
#' }
#'
#' These objects hold parameters specifying how each algorithm should be run on an arbitrary data set.
#' See the associated documentation pages for more details.
#'
#' @section Methods:
#' In the following code snippets, \code{x} and \code{object} are BiocNeighborParam objects.
#' \describe{
#' \item{\code{show(object)}:}{Display the class and arguments of \code{object}.}
#' \item{\code{bndistance(object)}:}{Return a string specifying the distance metric to be used for searching.
#' This should be one of \code{"Euclidean"}, \code{"Manhattan"} or \code{"Cosine"}.}
#' \item{\code{x[[i]]}:}{Return the value of slot \code{i}, as used in the constructor for \code{x}.}
#' \item{\code{x[[i]] <- value}:}{Set slot \code{i} to the specified \code{value}.}
#' }
#'
#' @seealso
#' \code{\link{KmknnParam}},
#' \code{\link{VptreeParam}},
#' \code{\link{AnnoyParam}},
#' and \code{\link{HnswParam}} for constructors.
#'
#' \code{\link{buildIndex}}, \code{\link{findKNN}} and \code{\link{queryKNN}} for dispatch.
#'
#' @author
#' Aaron Lun
#'
#' @aliases
#' BiocNeighborParam-class
#' show,BiocNeighborParam-method
#' bndistance,BiocNeighborParam-method
#' [[,BiocNeighborParam-method
#' [[<-,BiocNeighborParam-method
#'
#' @name BiocNeighborParam
NULL
#' @export
#' @importFrom methods show
setMethod("show", "BiocNeighborParam", function(object) {
cat(sprintf("class: %s\n", class(object)))
cat(sprintf("distance: %s\n", bndistance(object)))
})
#' @export
setMethod("bndistance", "BiocNeighborParam", function(x) x@distance)
#' @importFrom S4Vectors setValidity2
setValidity2("BiocNeighborParam", function(object) {
msg <- character(0)
if (length(bndistance(object))!=1L) {
msg <- c(msg, "'distance' must be a string")
}
if (length(msg)) return(msg)
return(TRUE)
})
#' @export
setMethod("[[", "BiocNeighborParam", function(x, i, j, ...) {
# Provides a layer of protection that we can use to update
# the object or intercept slot queries if the class changes.
slot(x, i)
})
#' @export
setReplaceMethod("[[", "BiocNeighborParam", function(x, i, j, ..., value) {
slot(x, i) <- value
x
})
|
925619c2463e1cfabbfa83fb38857042460287c3
|
46816a1aca9538d4ec1344a40c16253378bd77e7
|
/Scraper.R
|
c9166373ef26ec9562a183d7d8b709e0912ee53f
|
[] |
no_license
|
srepho/US-Sports-Scraping
|
1fe304ceb796a440c680a16680e495fb75330636
|
d717dca8657db07ab592cf1e5630de7a1fba0cf6
|
refs/heads/master
| 2016-09-07T18:59:52.154065
| 2014-05-03T12:51:17
| 2014-05-03T12:51:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 735
|
r
|
Scraper.R
|
############################################
# Web Scraper
# by Stephen Oates
############################################
#We will start off using the XML library (even though its HTML!)
library(XML)
library(stringr)
url1 <- "http://scores.espn.go.com/ncf/playbyplay?gameId=333650245&period=0"
html1 <- htmlTreeParse(url1, useInternalNodes=T)
url2 <- "http://scores.espn.go.com/ncf/playbyplay?gameId=333650245&period=0"
html2 <- htmlTreeParse(url1, useInternalNodes=T)
#We will use XPath to traverse the html structure
xpathSApply(html, "//t d", xmlValue)
#The other thing we need to work on is to automate the search and loading of all the data you want - might leave this for part two
|
3db47e0f475cfe6cfb8c097105d7e5afb490a41f
|
dde0bdc929870e5a049b9f81589f405b2b2a1280
|
/modelCode/SI_simulations/plot_richness_variability_8species.R
|
01f328974cdff56529e8bf847d072ee35b71bc55
|
[
"MIT"
] |
permissive
|
atredennick/Coexistence-Stability
|
9ee87d09f11269188f321b977576685a9dac2eb1
|
55ea5912f28655d82b4098e3cf950fc75908ddcc
|
refs/heads/master
| 2020-04-03T20:04:41.255744
| 2017-09-26T13:07:01
| 2017-09-26T13:07:01
| 24,862,907
| 2
| 0
| null | 2017-04-18T19:36:32
| 2014-10-06T20:11:43
|
R
|
UTF-8
|
R
| false
| false
| 4,600
|
r
|
plot_richness_variability_8species.R
|
## plot_richness_variability.R
####
#### LOAD LIBRARIES
####
library(ggplot2)
library(ggthemes)
library(gridExtra)
library(plyr)
library(reshape2)
library(synchrony)
library(RColorBrewer)
library(viridis)
####
#### INITIALIZATIONS
####
# Select path to the results and figures
path2results <- "../../simulationResults/SI_results/"
path2figs <- "../../manuscript/components/"
seasons_to_exclude <- 500
mycols <- brewer.pal(3, "Set2")
my_theme <- theme_few()+
theme(axis.text = element_text(size=12, color="grey35"),
axis.title = element_text(size=14),
strip.text = element_text(size=12, color="grey35"),
legend.title = element_text(size=12),
legend.text = element_text(size=10, color="grey35"),
legend.key.size = unit(0.3, "cm"))
####
#### SPECIES RICHNESS - ENVIRONMENTAL VARIABILITY RELATIONSHIP; STORAGE EFFECT
####
## Read in simulation results
rho0_storage_effect <- readRDS(paste0(path2results,"storage_effect_8species_regional.RDS"))
n_sig_e <- 100 # Number of cue variance levels
sig_e_vec <- pretty(seq(0, 10, length.out=n_sig_e), n_sig_e) # Make a pretty vector
save_multispp_rho0 <- list() # empty storage list
for(i in 1:length(rho0_storage_effect)){
tmp <- as.data.frame(rho0_storage_effect[[i]])
names(tmp) <- c("D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8",
"N1", "N2", "N3", "N4", "N5", "N6", "N7", "N8","R")
livestates <- grep("N", colnames(tmp))
tmp_totbiomass <- rowSums(tmp[seasons_to_exclude:nrow(tmp),livestates])
tmp_cv <- sd(tmp_totbiomass) / mean(tmp_totbiomass)
tmp_sppavg <- colMeans(tmp[seasons_to_exclude:nrow(tmp),livestates])
tmp_spprich <- length(which(tmp_sppavg > 1))
tmp_out <- data.frame(rho=(-1/8),
sigE=sig_e_vec[i],
cv=tmp_cv,
spprich=tmp_spprich,
sdev=sd(tmp_totbiomass),
avg=mean(tmp_totbiomass))
save_multispp_rho0 <- rbind(save_multispp_rho0, tmp_out)
}
# write.csv(save_multispp_rho0, "../derivedSimulationStats/storage_effect_8species_regional_div-stab.csv")
avg_cv_per_rich <- ddply(save_multispp_rho0, .(spprich), summarise,
avg_cv = mean(cv))
regional <- ggplot()+
geom_jitter(data=save_multispp_rho0, aes(x=spprich, y=cv), shape=21, color="grey40", fill=mycols[1], size=2, width=0.05, alpha=0.5)+
# geom_point(data = avg_cv_per_rich, aes(x=spprich, y=avg_cv), shape=19, color="grey40", size=2)+
# geom_line(data = avg_cv_per_rich, aes(x=spprich, y=avg_cv), color="grey40")+
geom_smooth(data=save_multispp_rho0, aes(x=spprich, y=cv), method="loess", se=FALSE, color=mycols[1])+
xlab("Number of Species")+
ylab("Variability of Total\nCommunity Biomass (CV)")+
scale_x_continuous(breaks=c(1:8), labels = c(1:8))+
theme_bw()+
my_theme
# ggsave(paste0(path2figs,"regional_diversity_stability_storage_effect_8species.png"), width = fig.width, height = 60, units = "mm", dpi = 200)
############## #################
############## SECOND PART -- FROM HPC SAVED FILES #################
############## #################
sim_files <- list.files(paste0(path2results,"eightspp_local/"))
sim_files <- sim_files[grep("*.RDS", sim_files)]
eight_spp_local <- list()
for(i in 1:length(sim_files)){
tmp <- as.data.frame(readRDS(paste0(path2results,"eightspp_local/",sim_files[i])))
names(tmp) <- c("D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8",
"N1", "N2", "N3", "N4", "N5", "N6", "N7", "N8","R")
livestates <- grep("N", colnames(tmp))
tmp_totbiomass <- rowSums(tmp[seasons_to_exclude:nrow(tmp),livestates])
tmp_cv <- sd(tmp_totbiomass) / mean(tmp_totbiomass)
tmp_sppavg <- colMeans(tmp[seasons_to_exclude:nrow(tmp),livestates])
tmp_spprich <- length(which(tmp_sppavg > 1))
tmp_out <- data.frame(cv=tmp_cv,
spprich=tmp_spprich)
eight_spp_local <- rbind(eight_spp_local, tmp_out)
}
local <- ggplot(eight_spp_local, aes(x=spprich, y=cv))+
geom_jitter(shape=21, color="grey40", fill=mycols[1], size=2, width=0.05, alpha=0.5) +
geom_smooth(method="loess", se=FALSE, color=mycols[1])+
xlab("Number of Species")+
ylab("Variability of Total\nCommunity Biomass (CV)")+
scale_x_continuous(breaks=c(1:8), labels = c(1:8))+
theme_bw()+
my_theme
png(filename = paste0(path2figs,"SI_storage_effect_eightspp_local_regional.png"), width = 8, height=3, units = "in", res=100)
grid.arrange(regional,local,ncol=2)
dev.off()
|
4d406fb15e1e6e7e434dec4274375a08f0f1a718
|
1a8b647c530b69766f5a715891c8c1fe7dec3d04
|
/man/CloudProvider-class.Rd
|
1996449a0321b58150554a393161c090659eb1d2
|
[] |
no_license
|
cran/DockerParallel
|
0b15d2fbb833182f1870484a0266d148d58f372c
|
e6278f65ba3b1c2633b69d97d34c0373eff02a92
|
refs/heads/master
| 2023-06-09T00:10:57.303177
| 2021-06-23T12:00:02
| 2021-06-23T12:00:02
| 364,306,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 277
|
rd
|
CloudProvider-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AAA.R
\docType{class}
\name{CloudProvider-class}
\alias{CloudProvider-class}
\alias{.CloudProvider}
\title{The root class of the cloud provider}
\description{
The root class of the cloud provider
}
|
ec2712a3e7bf0912e1316ede121ea3dcca86cf7f
|
57aa23b02213a3216e3cb1d153673b7e12160757
|
/SHA/YT data/01 - YT_demographic_setup.R
|
3be4a4815343e686585d7ff52f92ec96fd0ec7cb
|
[
"MIT"
] |
permissive
|
PHSKC-APDE/Housing
|
38c96d8502dc50518601cf2012631a49ad159d79
|
2a3951fa16e79d8d83a7cfe32880d115a7c30213
|
refs/heads/main
| 2023-09-02T19:48:15.033921
| 2023-08-11T20:30:13
| 2023-08-11T20:30:13
| 68,620,327
| 5
| 6
|
MIT
| 2023-09-12T22:44:01
| 2016-09-19T15:42:58
|
R
|
UTF-8
|
R
| false
| false
| 7,061
|
r
|
01 - YT_demographic_setup.R
|
###############################################################################
# OVERVIEW:
# Code to examine Yesler Terrace and Scattered sites data (housing and health)
#
# STEPS:
# 01 - Set up YT parameters in combined PHA/Medicaid data ### (THIS CODE) ###
# 02 - Conduct demographic analyses and produce visualizations
# 03 - Analyze movement patterns and geographic elements (optional)
# 03 - Bring in health conditions and join to demographic data
# 04 - Conduct health condition analyses (multiple files)
#
# Alastair Matheson (PHSKC-APDE)
# alastair.matheson@kingcounty.gov
# 2017-06-30
#
###############################################################################
#### Set up global parameter and call in libraries ####
# Turn scientific notation off and other settings
options(max.print = 700, scipen = 100, digits = 5)
library(housing) # contains many useful functions for analyses
library(openxlsx) # Used to import/export Excel files
library(tidyverse) # Used to manipulate data
library(odbc) # Connect to SQL
housing_path <- "//phdata01/DROF_DATA/DOH DATA/Housing"
db_apde51 <- dbConnect(odbc(), "PH_APDEStore51")
#### Bring in combined PHA/Medicaid data with some demographics already run ####
# Currently using stage schema but eventually switch to final
pha_mcaid_final <- dbGetQuery(
db_apde51,
"SELECT pid2, hh_id_new_h, startdate_c, enddate_c, enroll_type, dual_elig_m,
full_benefit_m, id_mcaid,
dob_c, race_c, hisp_c, ethn_c, gender_c, lang_m,
agency_new, major_prog, prog_type, subsidy_type, operator_type, vouch_type_final,
unit_add_h, property_id, property_name, portfolio_final, zip_c,
start_housing, start_pha, hh_inc,
age12, age13, age14, age15, age16, age17, age18,
length12, length13, length14, length15, length16, length17, length18,
pt12, pt13, pt14, pt15, pt16, pt17, pt18
FROM stage.mcaid_pha")
# Bring in timevar table to find people at YT or SS addresses
pha_timevar <- dbGetQuery(
db_apde51,
"SELECT id_apde, from_date, to_date,
pha_subsidy, pha_voucher, pha_operator, pha_portfolio, geo_add1, geo_city
FROM final.mcaid_mcare_pha_elig_timevar
WHERE pha = 1")
# Bring in property IDs etc
pha_property <- dbGetQuery(
db_apde51,
"SELECT DISTINCT unit_add_new, unit_city_new, property_id, property_name
FROM stage.pha
WHERE property_id IS NOT NULL OR property_name IS NOT NULL"
)
# Can use pre-calculated calendar year table and join to YT/SS IDs
pha_claims_calyear <- dbGetQuery(
db_apde51,
"SELECT year, id_apde, age_yr, gender_me, race_me, race_eth_me,
enroll_type, full_criteria,
pha_agency, pha_subsidy, pha_voucher, pha_operator, pha_portfolio,
geo_zip, pt
FROM stage.mcaid_mcare_pha_elig_calyear
WHERE pha = 1 AND pop_ever = 1")
#### JOIN DATA ####
#### Set up key variables ####
### Yesler Terrace and scattered sites indicators
yt_mcaid_final <- yt_flag(pha_mcaid_final, unit = pid2, prop_id = property_id,
prop_name = property_name, address = unit_add_h)
### Movements within data
# Use simplified system for describing movement
# First letter of start_type describes previous address,
# Second letter of start_type describes current address
# First letter of end_type describes current address,
# Second letter of end_type describes next address
# K = KCHA
# N = YT address (new unit)
# O = non-YT, non-scattered site SHA unit
# S = SHA scattered site
# U = unknown (i.e., new into SHA system, mostly people who only had Medicaid but not PHA coverage)
# Y = YT address (old unit)
yt_mcaid_final <- yt_mcaid_final %>%
arrange(pid2, startdate_c, enddate_c) %>%
mutate(
# First ID the place for that row
place = case_when(
is.na(agency_new) | agency_new == "Non-PHA" ~ "U",
agency_new == "KCHA" & !is.na(agency_new) ~ "K",
agency_new == "SHA" & !is.na(agency_new) & yt == 0 & ss == 0 ~ "O",
agency_new == "SHA" & !is.na(agency_new) & yt_old == 1 ~ "Y",
agency_new == "SHA" & !is.na(agency_new) & yt_new == 1 ~ "N",
agency_new == "SHA" & !is.na(agency_new) & yt == 0 & ss == 1 ~ "S"
),
start_type = case_when(
pid2 != lag(pid2, 1) | is.na(lag(pid2, 1)) ~ paste0("U", place),
pid2 == lag(pid2, 1) & !is.na(lag(pid2, 1)) ~ paste0(lag(place, 1), place)
),
end_type = case_when(
pid2 != lead(pid2, 1) | is.na(lead(pid2, 1)) ~ paste0(place, "U"),
pid2 == lead(pid2, 1) & !is.na(lead(pid2, 1)) ~ paste0(place, lead(place, 1))
)
)
### Age
# Make groups of ages
yt_mcaid_final <- yt_mcaid_final %>%
mutate_at(
vars(age12, age13, age14, age15, age16, age17, age18),
list(grp = ~ case_when(
. < 18 ~ "<18",
between(., 18, 24.99) ~ "18-24",
between(., 25, 44.99) ~ "25-44",
between(., 45, 61.99) ~ "45-61",
between(., 62, 64.99) ~ "62-64",
. >= 65 ~ "65+",
is.na(.) ~ "Unknown"
)
)
)
### Time in housing
# Make groups of time in housing
yt_mcaid_final <- yt_mcaid_final %>%
mutate_at(
vars(length12, length13, length14, length15, length16, length17, length18),
funs(grp = case_when(
. < 3 ~ "<3 years",
between(., 3, 5.99) ~ "3-<6 years",
. >= 6 ~ "6+ years",
is.na(.) ~ "Unknown")
)
)
### Household income
# Add in latest income for each calendar year
# Kludgy workaround for now, look upstream to better track annual income
# Also slow function, look to optimize
hh_inc_f <- function(df, year) {
pt <- rlang::sym(paste0("pt", quo_name(year)))
hh_inc_yr <- rlang::sym(paste0("hh_inc_", quo_name(year)))
df_inc <- df %>%
filter((!!pt) > 0) %>%
arrange(pid2, desc(startdate_c)) %>%
group_by(pid2) %>%
mutate((!!hh_inc_yr) := first(hh_inc)) %>%
ungroup() %>%
arrange(pid2, startdate_c) %>%
select(pid2, startdate_c, (!!hh_inc_yr))
df <- left_join(df, df_inc, by = c("pid2", "startdate_c"))
return(df)
}
yt_mcaid_final <- hh_inc_f(yt_mcaid_final, 12)
yt_mcaid_final <- hh_inc_f(yt_mcaid_final, 13)
yt_mcaid_final <- hh_inc_f(yt_mcaid_final, 14)
yt_mcaid_final <- hh_inc_f(yt_mcaid_final, 15)
yt_mcaid_final <- hh_inc_f(yt_mcaid_final, 16)
yt_mcaid_final <- hh_inc_f(yt_mcaid_final, 17)
yt_mcaid_final <- hh_inc_f(yt_mcaid_final, 18)
### Set up income per capita
yt_mcaid_final <- yt_mcaid_final %>%
group_by(hh_id_new_h, startdate_c) %>%
mutate(hh_size_new = n_distinct(pid2)) %>%
ungroup() %>%
mutate_at(vars(starts_with("hh_inc_1")), list(cap = ~ . / hh_size_new))
### Save point
#### Write to SQL for joining with claims ####
dbRemoveTable(db_apde51, name = DBI::Id(schema = "stage", table = "mcaid_pha_yt"))
system.time(dbWriteTable(db_apde51, name = DBI::Id(schema = "stage", table = "mcaid_pha_yt"),
value = as.data.frame(yt_mcaid_final), overwrite = T,
field.types = c(
startdate_c = "date", enddate_c = "date",
dob_c = "date", start_housing = "date", start_pha = "date"))
)
rm(pha_mcaid_final)
rm(hh_inc_f)
gc()
|
061de232d25b03635e42e0f349cbfc9b25f8abf4
|
a3c78700a65f10714471a0d307ab984e8a71644d
|
/modules/assim.sequential/man/assess.params.Rd
|
24c873b53b26cb1e75794255f0c2cec216ede627
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PecanProject/pecan
|
e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f
|
ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c
|
refs/heads/develop
| 2023-08-31T23:30:32.388665
| 2023-08-28T13:53:32
| 2023-08-28T13:53:32
| 6,857,384
| 187
| 217
|
NOASSERTION
| 2023-09-14T01:40:24
| 2012-11-25T23:48:26
|
R
|
UTF-8
|
R
| false
| true
| 617
|
rd
|
assess.params.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assess.params.R
\name{assess.params}
\alias{assess.params}
\alias{assessParams}
\title{assess.params}
\usage{
assessParams(dat, Xt, wts = NULL, mu_f_TRUE = NULL, P_f_TRUE = NULL)
}
\arguments{
\item{dat}{MCMC output}
\item{Xt}{ensemble output matrix}
\item{wts}{ensemble weights}
\item{mu_f_TRUE}{muf before tobit2space}
\item{P_f_TRUE}{Pf before tobit2space}
}
\value{
make plots
}
\description{
Assessing parameter estimations after mapping model output to tobit space
}
\author{
Michael Dietze and Ann Raiho \email{dietze@bu.edu}
}
|
6d3aaaa1e088e840924db7a989b176dc0666f6e9
|
b56ad2e238af61a08368b52a06b86649724d57e7
|
/CODE_CHUNKS/make_net2.R
|
7dce6d203906866afdeede2397782ad96fe68396
|
[
"BSD-3-Clause"
] |
permissive
|
ryscott5/eparTextTools
|
113b835df4df2f97be55a32a41f8d7778ad304c6
|
7849d9bcaabb8001a3b04d35aea48369014f265c
|
refs/heads/master
| 2021-05-01T04:44:54.727927
| 2017-10-02T19:13:43
| 2017-10-02T19:13:43
| 63,177,507
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,142
|
r
|
make_net2.R
|
#start with first noun
#build tree to all verbs, all nouns
library(devtools)
library(roxygen2)
library(stringr)
library(data.table)
library(igraph)
library(networkD3)
library(RSQLite)
matchtable<-readRDS("../Research.Grants/matchtable.rds")
igraphob_object<-function(WORD,mtable,W,inputWord=TRUE,sankey=FALSE,verbfilter=c(),strictlimit=FALSE){
# WORD='production'
#mtable<-matchtable
#W<-10
if(inputWord==TRUE){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
}
else {
ntr<-dplyr::filter(mtable,str_detect(tolower(object.keywords),tolower(WORD)))
}
dt <- data.table(ntr)
dt$object.keywords<-sapply(dt$object.keywords,function(X) paste(unlist(X),collapse=";"))
dt$subject.keywords<-sapply(dt$subject.keywords,function(X) paste(unlist(X),collapse=";"))
dt$action.lemmatized<-sapply(dt$action.lemmatized,function(X) paste(unlist(X),collapse=";"))
dt$action.verb.text<-sapply(dt$action.verb.text,function(X) paste(unlist(X),collapse=";"))
dt<-dt[,list(action.verb.text = unlist(strsplit(action.verb.text,";")),object.keywords=unlist(strsplit(object.keywords,";")),subject.keywords=unlist(strsplit(subject.keywords,";"))),by = sentence]
dt<-na.omit(dt)
if(length(verbfilter)>0){
dt<-dplyr::filter(dt,action.verb.text%in%verbfilter)
}
if(strictlimit==TRUE){
if(inputWord==TRUE){
dt<-dplyr::filter(dt,str_detect(tolower(subject.keywords),tolower(WORD)))} else {dt<-dplyr::filter(dt,str_detect(tolower(object.keywords),tolower(WORD)))}
}
net1<-graph_from_data_frame(rbind(data.frame("In"=dt$subject.keywords,"Out"=dt$action.verb.text),data.frame("In"=dt$action.verb.text,"Out"=dt$object.keywords)))
E(net1)$weight <- 1
netsimp<-simplify(net1,edge.attr.comb=list(weight="sum","ignore"))
E(netsimp)$width <- E(netsimp)$weight
netsimp <- delete_edges(netsimp, E(netsimp)[weight<=W])
netsimp<-simplify(netsimp,edge.attr.comb=list(weight="sum","ignore"))
bad.vs<-V(netsimp)[degree(netsimp) == 0]
netsimp <-delete.vertices(netsimp, bad.vs)
netsimp}
igraphob_object2<-function(WORD,mtable,W,inputWord=TRUE,sankey=FALSE,verbfilter=c(),strictlimit=FALSE){
# WORD='production'
#mtable<-matchtable
#W<-10
if(inputWord==TRUE){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
}
else {
ntr<-dplyr::filter(mtable,str_detect(tolower(object.keywords),tolower(WORD)))
}
dt <- data.table(ntr)
dt$object.keywords<-sapply(dt$object.keywords,function(X) paste(unlist(X),collapse=";"))
dt$subject.keywords<-sapply(dt$subject.keywords,function(X) paste(unlist(X),collapse=";"))
dt$action.lemmatized<-sapply(dt$action.lemmatized,function(X) paste(unlist(X),collapse=";"))
dt$action.verb.text<-sapply(dt$action.verb.text,function(X) paste(unlist(X),collapse=";"))
dt<-dt[,list(action.verb.text = tolower(unlist(strsplit(action.verb.text,";"))),object.keywords=tolower(unlist(strsplit(object.keywords,";"))),subject.keywords=tolower(unlist(strsplit(subject.keywords,";")))),by = sentence]
dt<-na.omit(dt)
if(length(verbfilter)>0){
dt<-dplyr::filter(dt,action.verb.text%in%verbfilter)
}
if(strictlimit==TRUE){
if(inputWord==TRUE){
dt<-dplyr::filter(dt,str_detect(tolower(subject.keywords),tolower(WORD)))} else {dt<-dplyr::filter(dt,str_detect(tolower(object.keywords),tolower(WORD)))}
}
net1<-graph_from_data_frame(data.frame("In"=dt$subject.keywords,"Out"=paste(dt$action.verb.text,dt$object.keywords)))
E(net1)$weight <- 1
netsimp<-simplify(net1,edge.attr.comb=list(weight="sum","ignore"))
E(netsimp)$width <- E(netsimp)$weight
netsimp <- delete_edges(netsimp, E(netsimp)[weight<=W])
netsimp<-simplify(netsimp,edge.attr.comb=list(weight="sum","ignore"))
bad.vs<-V(netsimp)[degree(netsimp) == 0]
netsimp <-delete.vertices(netsimp, bad.vs)
netsimp}
temp<-igraphob_object2("nutri",matchtable,W=0,strictlimit=T)
temp<-as_adjacency_matrix(temp)
igraphob_object3<-function(WORD,mtable,W,inputWord=TRUE,sankey=FALSE,verbfilter=c(),strictlimit=FALSE){
#WORD='potato'
#mtable<-matchtable
# W<-0
do.call(union,lapply(unique(mtable$TopTopics),function(TP){
#TP<-unique(mtable$TopTopics)[2]
do.call(union,lapply(unique(dplyr::filter(mtable, TopTopics==TP)$Orig),function(OP){
#OP<-unique(dplyr::filter(mtable, TopTopics==TP)$Orig)[1]
tryCatch({
mtable<-dplyr::filter(mtable, TopTopics==TP,Orig==OP)
if(inputWord==TRUE){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
}
else {
ntr<-dplyr::filter(mtable,str_detect(tolower(object.keywords),tolower(WORD)))
}
dt <- data.table(ntr)
dt$object.keywords<-sapply(dt$object.keywords,function(X) paste(unlist(X),collapse=";"))
dt$subject.keywords<-sapply(dt$subject.keywords,function(X) paste(unlist(X),collapse=";"))
dt$action.lemmatized<-sapply(dt$action.lemmatized,function(X) paste(unlist(X),collapse=";"))
dt$action.verb.text<-sapply(dt$action.verb.text,function(X) paste(unlist(X),collapse=";"))
dt<-dt[,list(action.verb.text = tolower(unlist(strsplit(action.verb.text,";"))),object.keywords=tolower(unlist(strsplit(object.keywords,";"))),subject.keywords=tolower(unlist(strsplit(subject.keywords,";")))),by = sentence]
dt<-na.omit(dt)
if(length(verbfilter)>0){
dt<-dplyr::filter(dt,action.verb.text%in%verbfilter)
}
if(strictlimit==TRUE){
if(inputWord==TRUE){
dt<-dplyr::filter(dt,str_detect(tolower(subject.keywords),tolower(WORD)))} else {dt<-dplyr::filter(dt,str_detect(tolower(object.keywords),tolower(WORD)))}
}
net1<-graph_from_data_frame(data.frame("In"=dt$subject.keywords,"Out"=paste(dt$action.verb.text,dt$object.keywords),vertices=data.frame("Name"=dt$subject.keywords,"Topic"=rep(TP,nrow(dt)),"Orig"=rep(OP,nrow(dt)))))
E(net1)$weight <- 1
net1},error=function(e){graph.empty()})}))}))}
igraphob_object4<-function(WORD,mtable,W,inputWord=TRUE,sankey=FALSE,verbfilter=c(),strictlimit=FALSE){
#WORD='potato'
#mtable<-matchtable
#W<-0
lapply(unique(mtable$TopTopics),function(TP){
#TP<-unique(mtable$TopTopics)[3]
lapply(unique(dplyr::filter(mtable, TopTopics==TP)$Orig),function(OP){
#OP<-unique(dplyr::filter(mtable, TopTopics==TP)$Orig)[1]
list("Topic"=TP,"Orig"=OP,"Net"=tryCatch({
mtable<-dplyr::filter(mtable, TopTopics==TP,Orig==OP)
if(inputWord==TRUE){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
}
else {
ntr<-dplyr::filter(mtable,str_detect(tolower(object.keywords),tolower(WORD)))
}
dt <- data.table(ntr)
dt$object.keywords<-sapply(dt$object.keywords,function(X) paste(unlist(X),collapse=";"))
dt$subject.keywords<-sapply(dt$subject.keywords,function(X) paste(unlist(X),collapse=";"))
dt$action.lemmatized<-sapply(dt$action.lemmatized,function(X) paste(unlist(X),collapse=";"))
dt$action.verb.text<-sapply(dt$action.verb.text,function(X) paste(unlist(X),collapse=";"))
dt<-dt[,list(action.verb.text = tolower(unlist(strsplit(action.verb.text,";"))),object.keywords=tolower(unlist(strsplit(object.keywords,";"))),subject.keywords=tolower(unlist(strsplit(subject.keywords,";"))),Topic=TopTopics,Orig=Orig),by = sentence]
dt<-na.omit(dt)
if(length(verbfilter)>0){
dt<-dplyr::filter(dt,action.verb.text%in%verbfilter)
}
if(strictlimit==TRUE){
if(inputWord==TRUE){
dt<-dplyr::filter(dt,str_detect(tolower(subject.keywords),tolower(WORD)))} else {dt<-dplyr::filter(dt,str_detect(tolower(object.keywords),tolower(WORD)))}
}
net1<-graph_from_data_frame(data.frame("In"=dt$subject.keywords,"Out"=paste(dt$action.verb.text,dt$object.keywords)))
E(net1)$weight <- 1
net1},error=function(e){graph.empty()}))})
})}
igraphob_object5<-function(WORD,mtable,W,inputWord=TRUE,sankey=FALSE,verbfilter=c(),strictlimit=FALSE){
#WORD='potato'
#mtable<-matchtable
#W<-0
lapply(unique(mtable$TopTopics),function(TP){
#TP<-unique(mtable$TopTopics)[3]
lapply(unique(dplyr::filter(mtable, TopTopics==TP)$Orig),function(OP){
#OP<-unique(dplyr::filter(mtable, TopTopics==TP)$Orig)[1]
list("Topic"=TP,"Orig"=OP,"Net"=tryCatch({
mtable<-dplyr::filter(mtable, TopTopics==TP,Orig==OP)
if(inputWord==TRUE){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
}
else {
ntr<-dplyr::filter(mtable,str_detect(tolower(object.keywords),tolower(WORD)))
}
dt <- data.table(ntr)
dt$object.keywords<-sapply(dt$object.keywords,function(X) paste(unlist(X),collapse=";"))
dt$subject.keywords<-sapply(dt$subject.keywords,function(X) paste(unlist(X),collapse=";"))
dt$action.lemmatized<-sapply(dt$action.lemmatized,function(X) paste(unlist(X),collapse=";"))
dt$action.verb.text<-sapply(dt$action.verb.text,function(X) paste(unlist(X),collapse=";"))
dt<-dt[,list(action.verb.text = tolower(unlist(strsplit(action.verb.text,";"))),object.keywords=tolower(unlist(strsplit(object.keywords,";"))),subject.keywords=tolower(unlist(strsplit(subject.keywords,";"))),Topic=TopTopics,Orig=Orig),by = sentence]
dt<-na.omit(dt)
if(length(verbfilter)>0){
dt<-dplyr::filter(dt,action.verb.text%in%verbfilter)
}
if(strictlimit==TRUE){
if(inputWord==TRUE){
dt<-dplyr::filter(dt,str_detect(tolower(subject.keywords),tolower(WORD)))} else {dt<-dplyr::filter(dt,str_detect(tolower(object.keywords),tolower(WORD)))}
}
net1<-graph_from_data_frame(data.frame("In"=dt$subject.keywords,"Out"=paste(dt$action.verb.text,dt$object.keywords)))
E(net1)$weight <- 1
igraph::as_edgelist(net1)
net1},error=function(e){as_edgelist(graph.empty())}))})
})}
igraphob_object6<-function(WORD,mtable,W,inputWord=TRUE,sankey=FALSE,verbfilter=c(),strictlimit=FALSE){
#WORD='potato'
#mtable<-matchtable
#W<-0
lapply(unique(mtable$TopTopics),function(TP){
#TP<-unique(mtable$TopTopics)[3]
lapply(unique(dplyr::filter(mtable, TopTopics==TP)$Orig),function(OP){
#OP<-unique(dplyr::filter(mtable, TopTopics==TP)$Orig)[1]
list("Topic"=TP,"Orig"=OP,"Net"=tryCatch({
mtable<-dplyr::filter(mtable, TopTopics==TP,Orig==OP)
if(inputWord==TRUE){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
}
else {
ntr<-dplyr::filter(mtable,str_detect(tolower(object.keywords),tolower(WORD)))
}
dt <- data.table(ntr)
dt$object.keywords<-sapply(dt$object.keywords,function(X) paste(unlist(X),collapse=";"))
dt$subject.keywords<-sapply(dt$subject.keywords,function(X) paste(unlist(X),collapse=";"))
dt$action.lemmatized<-sapply(dt$action.lemmatized,function(X) paste(unlist(X),collapse=";"))
dt$action.verb.text<-sapply(dt$action.verb.text,function(X) paste(unlist(X),collapse=";"))
dt<-dt[,list(action.verb.text = tolower(unlist(strsplit(action.verb.text,";"))),object.keywords=tolower(unlist(strsplit(object.keywords,";"))),subject.keywords=tolower(unlist(strsplit(subject.keywords,";"))),Topic=TopTopics,Orig=Orig),by = sentence]
dt<-na.omit(dt)
if(length(verbfilter)>0){
dt<-dplyr::filter(dt,action.verb.text%in%verbfilter)
}
if(strictlimit==TRUE){
if(inputWord==TRUE){
dt<-dplyr::filter(dt,str_detect(tolower(subject.keywords),tolower(WORD)))} else {dt<-dplyr::filter(dt,str_detect(tolower(object.keywords),tolower(WORD)))}
}
net1<-graph_from_data_frame(data.frame("In"=dt$subject.keywords,"Out"=dt$object.keyword,'edgename'=dt$action.verb.text))
net1},error=function(e){graph.empty()}))})
})}
dt2 <- data.table(matchtable)
dt2$object.keywords<-sapply(dt2$object.keywords,function(X) paste(unlist(X),collapse=";"))
dt2$subject.keywords<-sapply(dt2$subject.keywords,function(X) paste(unlist(X),collapse=";"))
dt2$action.lemmatized<-sapply(dt2$action.lemmatized,function(X) paste(unlist(X),collapse=";"))
dt2$action.verb.text<-sapply(dt2$action.verb.text,function(X) paste(unlist(X),collapse=";"))
dt2<-dt2[,list(action.verb.text = tolower(unlist(strsplit(action.verb.text,";"))),object.keywords=tolower(unlist(strsplit(object.keywords,";"))),subject.keywords=tolower(unlist(strsplit(subject.keywords,";"))),Topic=TopTopics,Orig=Orig),by = sentence]
dt2<-dt2[,list(action.lemmatized = tolower(unlist(strsplit(action.lemmatized,";"))),object.keywords=tolower(unlist(strsplit(object.keywords,";"))),subject.keywords=tolower(unlist(strsplit(subject.keywords,";"))),Topic=TopTopics,Orig=Orig),by = sentence]
dt2<-na.omit(dt2)
dt3<-unique(dt2)
rm(dt3)
dt2<-dplyr::select(dt2, -sentence)
library("wordnet")
library(pbapply)
head(dt2)
callwnetverbs<-pblapply(unique(dt2$action.verb.text),function(X){
#X<-unique(dt2$action.verb.text)[3]
X2<-getTermFilter("WildcardFilter", X, TRUE) %>% getIndexTerms("VERB", 1, .)
if(length(X2)>=1){
getSynonyms(X2[[1]])} else {list()}
})
library(dplyr)
install_wordNetsql<-function()
my_db <- dplyr::src_sqlite("../sqlite-31.db", create = F)
wordtab<-tbl(my_db, sql("SELECT * from words"))
wordtab<-dplyr::filter(wordtab, lemma%in%dt2$action.verb.text)
dt2$action.lemmatized<-sapply(str_split(dt2$action.lemmatized," "), function(X) X[length(X)])
wordtab<-filter(wordtab, lemma%in%dt2$action.lemmatized)
vtab<-select(tbl(my_db, sql("SELECT * from verbnetroles")) %>% filter(., wordid%in%collect(select(wordtab,wordid))$wordid),wordid,class)
vtab<-unique(collect(vtab))
vtab<-left_join(collect(wordtab),collect(vtab))
head(vtab)
str_extract(vtab$class, "[0-9]+")
base_guideline<-read.csv(textConnection(RCurl::getURL("https://docs.google.com/spreadsheets/d/1JBkTjHTW7YTRfHuJ-lZqjotK0XkvtdyI5tld-PUaQkw/pub?gid=0&single=true&output=csv")))
head(base_guideline)
head(vtab)
colnames(base_guideline)[3]<-"class"
vtab<-left_join(vtab,base_guideline)
head(vtab)
cldf<-filter(collect(tbl(my_db,sql("SELECT * from vnclasses"))),str_detect(class,"\\b([0-9][0-9]$|[0-9][0-9]\\.1)\\b$"))
cldf<-filter(collect(tbl(my_db,sql("SELECT * from vnclasses"))),str_detect(class,"[0-9]+$"))
head(cldf)
#cldf<-filter(cldf,nchar(str_extract(class,"[\\d\\.]+"))<=4)
base_guideline<-read.csv(textConnection(RCurl::getURL("https://docs.google.com/spreadsheets/d/1JBkTjHTW7YTRfHuJ-lZqjotK0XkvtdyI5tld-PUaQkw/pub?gid=0&single=true&output=csv")))
head(base_guideline)
head(cldf)
cldf$classid<-str_extract(cldf$class,"[0-9]+")
cldf$word<-str_extract(cldf$class,"[a-z]+")
head(cldf)
vtab$Verb.Class<-vtab$class
nrow(vtab)
vtab$classid<-str_extract(vtab$class,"[0-9]+")
head(vtab)
vtab<-dplyr::left_join(vtab,select(cldf,c(classid,word)))
head(vtab)
vtab<-unique(vtab)
head(vtab)
vtab<-na.omit(vtab)
dt2$lemma<-dt2$lemma
head(vtab)
dt2<-plyr::join(dt2,select(vtab,c(lemma,word)),match="first")
dt2$word[which(dt2$lemma=="be")]<-"exist"
callwn2L<-lapply(names(table(dt2$word)),function(X){
callwn2<-graph_from_data_frame(data.frame(select(filter(dt2,word==X),c(subject.keywords,object.keywords))),directed=TRUE)
E(callwn2)$weight <- 1
callwn2<-simplify(callwn2, edge.attr.comb=list(weight="sum"))
callwn2
})
names(callwn2L)<-names(table(dt2$word))
callwn2L$acquiesce
callwn2<-lapply(callwnetverbs,function(X) c(X))
names(callwn2)<-unique(dt2$action.verb.text)
callwn2<-callwn2[sapply(callwn2,length)>0]
callwn2<-lapply(names(callwn2),function(X) data.frame("word"=X,"match"=c(callwn2[[X]])))
callwn2<-do.call(rbind,callwn2)
callwn2<-graph_from_data_frame(callwn2,directed=FALSE)
E(callwn2)$weight <- 1
callwn2<-simplify(callwn2, edge.attr.comb=list(weight="sum")
edge_attr(callwn2)
get.edge.attribute(callwn2,"weight")
ig<-as.matrix(get.adjacency(callwn2, type="both",attr="weight",edges=TRUE,names=TRUE))
stp = shortest.paths(callwn2)
stp[which(is.infinite(stp))]<-100
kverbs<-kmeans(ig,500)
kverbs$cluster
library(matrixStats)
clustrename<-data.frame("clust"=1:500,"newword"=unlist(sapply(1:500,function(k) names(kverbs$cluster[kverbs$cluster==k])[which.max(kverbs$centers[k,c(which(kverbs$cluster==k))])])))
clnams<-data.frame("oldword"=names(kverbs$cluster),"newword"=clustrename$newword[as.numeric(kverbs$cluster)])
clnams[4,]
getSynonyms
hypeval<-system2("wn",args=c(dt2$action.verb.text[1],"-hypev"))
kverbs$cluster[kverbs$cluster==6]
?getWord
function (indexterm)
{
hypers <- .jcall(indexterm, "[Lcom/nexagis/jawbone/Synset;",
"getHypernym")
sort(unique(unlist(lapply(hypers, getWord))))
}
src_sqlite("my_db.sqlite3", create = T)
tenverbs<-cutree(hclust(as.dist(stp^2)),k=500)
?cutree
tenverbs$cluster[tenverbs$cluster==5]
clusplot(mydata, fit$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
bc1<-max(ig)-min(ig)
ig<-ig-min(ig)
ig<-ig/bc1
diag(ig)<-1
ig<-round(ig*100)
tmcl<-mcl(as.matrix(ig),addLoops=F,allow1=F,max.iter=10)
?kmeans
fit <- kmeans(mydata, 5)
callwn2<-intergraph::asNetwork(callwn2)
library("latentnet")
callwn2<-network::as.matrix.network(callwn2,"adjacency")
install.packages("MCL")
library(MCL)
test1<-ergmm(callwn2~bilinear(d = 2), tofit = c("mle"))
igraph::get.edge.attribute(callwn2,"weight")
related <- getRelatedSynsets(synsets[[1]], "!")
net1<-graph_from_data_frame(data.frame("In"=dt2$subject.keywords,"Out"=dt2$object.keyword,'edgename'=dt2$action.verb.text,"topic"=dt2$Topic,"Orig"=dt2$Topic))
E(net1)$weight <- 1
simplify(net1, edge.attr.comb=list(weight="sum"))
install.packages("intergraph")
net1net<-intergraph::asNetwork(net1)
summary(net1net)[[1]]
drs<-net1net %v% "Decision.Rank.Score"
list.edge.attributes(net1net)
ideg <- degree(net1net, cmode="indegree") # Indegree for MIDs
odeg <- degree(net1net, cmode="outdegree")
clo <- closeness(net1net)
list.edge.attributes(net1net)
get.edge.attribute(net1net,"Orig")
?summary
summary(net1net~edgecov(net1net,"topic"))
degrees<-igraph::degree(net1)
summary(net1)
sort(degrees,decreasing=T)[1:10]
test<-igraphob_object6("potato",matchtable,0)
tempsmall<-lapply(test,function(K) K[sapply(K, function(X) "name"%in%vertex_attr_names(X[[3]]))])
tempsmall<-tempsmall[sapply(tempsmall,length)>0]
tempsmall<-unlist(tempsmall, recursive=F)
plot(tempsmall[[1]]$Net %u% tempsmall[[2]]$Net %u% tempsmall[[3]]$Net %u% tempsmall[[4]]$Net)
plot(tempsmall[[4]]$Net)
mtl1full<-tolower(unlist(matchtable$subject.keywords))
mtl1<-unique(tolower(unlist(matchtable$subject.keywords)))
topnames<-names(table(mtl1full)[table(mtl1full)>50])
library(pbapply)
eachlist<-pblapply(topnames, function(WORD){
temp<-igraphob_object4(WORD,matchtable,W=0,strictlimit=T)
#temp<-igraphob_object5("potato",matchtable,W=0,strictlimit=T)
tempsmall<-lapply(temp,function(K) K[sapply(K, function(X) "name"%in%vertex_attr_names(X[[3]]))])
tempsmall<-tempsmall[sapply(tempsmall,length)>0]
tempsmall<-unlist(tempsmall, recursive=F)
tempnets<-lapply(tempsmall,function(X) {
Net1<-X$Net %>% set_edge_attr("topic",value=X$Topic) %>% set_edge_attr("Orig",value=X$Orig)
#network(as_adjacency_matrix(Net1),matrix.type="adjacency",directed=TRUE,edges=T)
Net1
})
tempnets})
eachlist<-unlist(eachlist,recursive=FALSE)
templist<-pblapply(eachlist,function(X) list("net"=as_edgelist(X),"covs"=as.data.frame(igraph::get.edge.attribute(X)[c('topic','Orig')])))
outlist<-list("elist"=do.call(rbind, lapply(templist, function(X) X$net)),
"covslist"=do.call(rbind, lapply(templist, function(X) X$covs)))
join_net_by<-function(covar="topic",outlist){
alllist<-unique(outlist$covslist[covar][[1]])
tlist<-lapply(alllist, function(X) {network::network(outlist$elist[which(outlist$covslist[covar]==X),],matrix.type="edgelist")})
names(tlist)<-alllist
tlist
}
fullnet<-join_net_by("topic",outlist)
degree(fullnet$`47`)
bonpow(fullnet$`39`)
?bonpow
network::set.edge.attribute(newnet,"topic",value=outlist$covslist$topic)
network::set.edge.attribute(newnet,"orig",value=outlist$covslist$Orig)
newnet}
sna::degree(fullnet$`39`)
?degree
newnet
tempnets2<-do.call(union,c(tempnets,byname=TRUE))
igraph::set_edge_attr(tempnets2,"TOPIC",value=edatfun('topic',tempnets2))
igraph::get.edge.attribute(tempnets2)
tempnet3<-network(as_adjacency_matrix(tempnets2),matrix.type="adjacency",directed=TRUE)
?network
tempnet3
get.edge.attribute(tempnet3)
get.edge.id(tempnet3)
edatfun('topic',tempnets2)
rowsum
edatfun<-function(string_entry,netdat){
newcol<-as.data.frame(edge_attr(tempnets2)[str_detect(edge_attr_names(netdat),string_entry)])
newcol[is.na(newcol)]<-""
as.character(interaction(newcol,sep="_")) %>% str_extract_all(.,"[0-9]+") %>% sapply(.,function(X) paste(X,collapse="_"))
}
edatfun('topic',tempnets2)
set.edge.attribute(
newcol
?str_extract_all
?interaction
?rowsum
?tidyr::unite
?network
network
?as_adjacency_matrix
library(sna)
tempsmall<-lapply(tempsmall,function(X){
X$Adj<-network(as_adjacency_matrix(X$Net),matrix.type="adjacency",directed=TRUE,vertex.attr=list('Topic'=X$Topic,'Orig'=X$Orig))
X
})
library(network)
tempsmall[[1]]$Net
network::matr
tempsmall[[2]]$Adj
igraph::edge.attributes(temp)
temp<-as_adjacency_matrix(temp,edges=T,attr=)
?as_adjacency_matrix
library("statnet")
=t1<-lapply(unique(unlist(matchtable$subject.keywords))[1:100], function(X) igraphob_object(X,matchtable,0))
t12<-union(t1[[1]],t1[[2]])
for(i in 3:length(t1)){
t12<-union(t12,t1[[i]])
}
t12d3<-t12 %>% igraph_to_networkD3()
sankeyNetwork(t12d3$links,t12d3$nodes,"source","target",NodeID="name")
t12d3$nodes$name
|
1809bc44d4bc6462c8ba8baa4a4d4a94ac9e5c56
|
bbffe8845045c6e4a9e121bef78c8a4d8b2b51c2
|
/man/empty.Rd
|
36bf7b775c2a8fe441b0b2388b128982e9363e85
|
[] |
no_license
|
vsbuffalo/rivr
|
43d799b9af22332b648f15d70191439b3224c75b
|
0ffe216939d45333a2b4688f68712ec86ea823f1
|
refs/heads/master
| 2016-09-11T04:53:15.119943
| 2015-04-02T00:49:14
| 2015-04-02T00:49:14
| 32,969,361
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
rd
|
empty.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/exception_handler.R
\name{empty}
\alias{empty}
\title{Determine if stream/iterator is empty}
\usage{
empty(x)
}
\arguments{
\item{x}{Iterator}
}
\description{
Determine if stream/iterator is empty
}
|
540d1bc4cc8013dae1c85ac5d0857ba40b41d6d3
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/JSM/R/InitValMultGeneric.R
|
37e6eaee2cddcebc4122da02562d7f8b17d38894
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,337
|
r
|
InitValMultGeneric.R
|
#=============== Initial Value Calculation for Model II with NMRE ===============#
InitValMultGeneric <- function (gamma, B.st, n, Y.st, ni, model, ID, Index, B, Btime, Btime2, start, stop, event, Z, ncz, Ztime2, Index2, Index1, rho, iter, nk, d, Ztime22, Ztime, tol.P) {
BTg <- lapply(B.st, function(x) as.vector(x %*% gamma))
G <- unlist(lapply(1:n, function(i) as.vector( tcrossprod(Y.st[[i]] - BTg[[i]]))))
tempCov1 <- unlist(lapply(BTg, function(x) tcrossprod(x) ))
tempCov2 <- unlist(lapply(1:n, function(i) c(diag(1, ni[i]))))
tempCov <- cbind(tempCov1, tempCov2)
sigmas <- solve(t(tempCov) %*% tempCov) %*% t(tempCov) %*% G
Bsigma2 <- sigmas[1]
Ysigma2 <- sigmas[2]
if(Bsigma2 < 0) Bsigma2 <- 0.1
if(Ysigma2 < 0) Ysigma2 <- 0.1
M <- length(Index)
VY <- lapply(1:n, function(i) calc_VY( M = BTg[[i]], A = Bsigma2, b = Ysigma2))
bBLUP <-unlist(lapply(1:n, function(i) calc_muBMult( Bsigma2,VY[[i]],BTg[[i]],Y.st[[i]] )+1 ))
if (model == 2){
fixedOrRand <- bBLUP[ID]
fixedOrRand.time2 <- bBLUP[Index]
} else if( model == 1) {
fixedOrRand <- bBLUP[ID] * as.vector(B %*% gamma)
fixedOrRand.time <- bBLUP * as.vector(Btime %*% gamma)
fixedOrRand.time2 <- bBLUP[Index] * as.vector(Btime2 %*% gamma)
} else {
stop("Invalid model type")
}
#========== first fit the Cox model ==========#
data.init <- data.frame(start = start, stop = stop, event = event, Z = Z, fixedOrRand = fixedOrRand)
fit <- if (ncz > 0) coxph(Surv(start, stop, event) ~ Z + fixedOrRand, data = data.init) else coxph(Surv(start, stop, event) ~ fixedOrRand, data = data.init)
phi.old <- if (ncz > 0) fit$coefficients[1:ncz] else numeric(0)
alpha.old <- fit$coefficients[ncz + 1]
Ztime2_phi.old <- if (ncz > 0) Ztime2 %*% phi.old else rep(0, M)
temp <- as.vector(exp(Ztime2_phi.old + alpha.old * fixedOrRand.time2)) # M*1 vector #
lamb.old <- Index2 / calc_tapply_vect_sum( temp, as.integer(Index1-1))
if (rho == 0) {
phi.new <- phi.old
alpha.new <- alpha.old
lamb.new <- lamb.old
} else {
for (it in 1:iter) {
exp.es <- exp(as.vector(Ztime2_phi.old + alpha.old * fixedOrRand.time2))
temp0a <- exp.es * lamb.old[Index1];
temp0b <- fixedOrRand.time2 * temp0a;
const <- rep(0, n)
const[nk != 0] <- calc_tapply_vect_sum(v1 = temp0a, v2 = as.integer(Index - 1)) # vector of length n #
CondExp <- (1 + d * rho) / (1 + rho * const) # conditional expectation E(xi|Oi), vector of length n #
CondExp2 <- CondExp[nk != 0]
if (ncz > 0) {
temp1 <- lapply(1:ncz, function(i) CondExp2 * calc_tapply_vect_sum(v1 = Ztime2[, i] * temp0a, v2 = as.integer(Index - 1)))
temp1 <- sapply(temp1, sum) # vector of length ncz #
temp3 <- lapply(1:(ncz ^ 2), function(i) CondExp2 * calc_tapply_vect_sum(v1 = Ztime22[, i] * temp0a, v2 = as.integer(Index - 1)))
temp3 <- sapply(temp3, sum) # vector of length ncz^2 #
temp5 <- lapply(1:ncz, function(i) CondExp2 * calc_tapply_vect_sum(v1 = Ztime2[, i] * temp0b, v2 = as.integer(Index - 1)))
temp5 <- sapply(temp5, sum) # vector of length ncz #
phiScore <- colSums(d * Ztime) - temp1 # vector of length ncz #
}
temp2 <- sum(CondExp2 * calc_tapply_vect_sum(v1 = temp0b, v2 = as.integer(Index - 1)))
temp4 <- sum(CondExp2 * calc_tapply_vect_sum(v1 = fixedOrRand.time2 * temp0b, v2 = as.integer(Index - 1)))
if (model==2) {
alphaScore <- sum(d * bBLUP) - temp2
} else {
alphaScore <- sum(d * fixedOrRand.time) - temp2
}
if(ncz > 0) {
pa.score <- c(phiScore, alphaScore)
pa.info <- matrix(0, (ncz + 1), (ncz + 1)) # (ncz+1)*(ncz+1) matrix #
pa.info[1:ncz, 1:ncz] <- - temp3
pa.info[(ncz + 1), (ncz + 1)] <- - temp4
pa.info[(ncz + 1), 1:ncz] <- - temp5
pa.info[1:ncz, (ncz + 1)] <- - temp5
#=============== Update phi and alpha ===============#
pa.old <- c(phi.old, alpha.old) # vector of length (ncz+1) #
paSVD <- svd(pa.info)
pa.info.inv <- paSVD$v %*% diag(1 / paSVD$d) %*% t(paSVD$u)
pa.new <- pa.old - pa.info.inv %*% pa.score # vector of length (ncz+1) #
phi.new <- pa.new[1 : ncz]
alpha.new <- pa.new[ncz + 1]
} else {
alpha.new <- alpha.old - alphaScore / (-temp4)
phi.new <- phi.old
pa.new <- alpha.new
pa.old <- alpha.old
}
Ztime2_phi.new <- if (ncz > 0) Ztime2 %*% phi.new else rep(0, M)
#========== Calculate the new lambda with new parameters ==========#
exp.esn <- exp(as.vector(Ztime2_phi.new + alpha.new * fixedOrRand.time2))
tempLamb <- calc_tapply_vect_sum(v1 = CondExp[Index] * exp.esn, v2 = as.integer(Index1 - 1))
lamb.new <- Index2 / tempLamb
#========== Check Convergence ==========#
err <- max(abs(pa.new - pa.old) / (abs(pa.old) + tol.P))
if (err <= tol.P) break
else {
phi.old <- phi.new
alpha.old <- alpha.new
lamb.old <- lamb.new
Ztime2_phi.old <- if (ncz > 0) Ztime2 %*% phi.old else rep(0, M) # Added by Pantelis
}
}
}
result <- list(phi = phi.new, alpha = alpha.new, lamb = lamb.new, Ysigma = sqrt(Ysigma2), Bsigma = sqrt(Bsigma2))
return(result)
}
|
a5bd4aaabbba7a219571161a68f4404ea4f474ed
|
d63505181503615a8d4cbe39a1c0ace9eeb1c36e
|
/real-data/real-data-univariate/Stomach-data-univariate-wo-cohort.R
|
8be486a600adb75d748beb416f97b1a0ba364c01
|
[] |
no_license
|
papezneuroO/Project-thesis_Helenerb
|
d1eebd76e2d067aad949c2cc8e9da79a18aaaecb
|
7de5aeee10dfcf6ee6cb904477bc0d446869ffee
|
refs/heads/main
| 2023-06-01T10:22:09.799608
| 2021-06-16T12:01:55
| 2021-06-16T12:01:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,385
|
r
|
Stomach-data-univariate-wo-cohort.R
|
# Replicate of Real-data-univariate-v1, using the stomach-cancer data instead of
# the lung cancer data.
library(INLA)
library(inlabru)
library(ggplot2)
library(patchwork)
library(tidyverse)
library(lubridate)
library(readxl)
# ---- read data from excel files ----
# read population data
population <- read_excel("population-germany.xlsx", sheet=1,
col_names = c("age", "male", "female", "total"),
skip = 6)
years <- population %>% select(age) %>%
filter(!is.na(as.Date(age, format="%d.%m.%Y", optional = TRUE)))
population <- population %>% slice(1:1848) %>%
mutate(year = rep(as.vector(years$age), each=88)) %>%
filter(year != age) %>% filter(age != "Insgesamt") %>%
mutate(age.number = parse_number(age)) %>%
mutate(age.number = replace(age.number, age == "unter 1 Jahr", 0)) %>%
mutate(age.int = 5*(age.number%/%5)) %>%
mutate(age.int = paste(age.int, "-", age.int + 4)) %>%
mutate(age.int = replace(age.int, age.int == "85 - 89", "85")) %>%
group_by(age.int, year) %>%
summarize(total = sum(total), male = sum(male), female = sum(female)) %>%
mutate(year = format(as.POSIXct(year, format="%d.%m.%Y"), format="%Y")) %>%
filter(year < 2017)
# read stomach cancer data
stomach.cancer <- read_excel("stomachCancer-germany.xls") %>%
rename(sex = "...1") %>% rename(age = "...2") %>%
pivot_longer(!c(sex,age), names_to="year", values_to="deaths") %>%
mutate(sex = replace(sex, sex == "männlich", "male")) %>%
mutate(sex = replace(sex, sex == "weiblich", "female")) %>%
pivot_wider(names_from = sex, values_from = deaths) %>%
mutate(total = male + female) %>%
mutate(t = as.integer(year)-1999) %>% mutate(t.1 = t) %>%
mutate(x = parse_number(age)) %>% mutate(x.1 = x) %>%
mutate(xt = ((x%/%5)*(2016-1998) +t))
# ---- Start defining the inlabru model components ----
# this first attempt is based on the L-C-cohort-v2
# helper values for constraining of beta:
A.mat = matrix(1, nrow = 1, ncol = length(unique(stomach.cancer$age))) # not sure if you did this correctly
e.vec = 1
# keeping the "less informative priors" that seemed to work well
pc.prior.alpha <- list(prec = list(prior = "pc.prec", param = c(0.1, 0.4)))
pc.prior.kappa <- list(prec = list(prior = "pc.prec", param = c(0.3, 0.6)))
pc.prior.epsilon <- list(prec = list(prior = "pc.prec", param = c(0.05, 0.5)))
pc.prior.gamma <- list(prec = list(prior = "pc.prec", param = c(0.3, 0.5)))
# this is just how we define our model
comp = ~ -1 +
Int(1) +
alpha(x, model = "rw1", values = unique(stomach.cancer$x), constr = TRUE, hyper = pc.prior.alpha) +
phi(t, model = "linear", prec.linear = 1) +
beta(x.1, model = "iid", extraconstr = list(A = A.mat, e = e.vec)) +
kappa(t.1, model = "rw1", values = unique(stomach.cancer$t), constr = TRUE, hyper = pc.prior.kappa) +
epsilon(xt, model = "iid", hyper = pc.prior.epsilon)
# here total will refer to the total deaths for each age-year
#form.1 = total ~ -1 + Int + alpha + beta*phi + beta*kappa + gamma + epsilon
# first: without cohort
form.1 = total ~ -1 + Int + alpha + beta*phi + beta*kappa + epsilon
# add data as stomach.cancer and offset as population$total, which is the number of people at-risk
likelihood.1 = like(formula = form.1, family = "poisson", data = stomach.cancer, E = population$total)
# the same control compute as in Sara's first example
c.c <- list(cpo = TRUE, dic = TRUE, waic = TRUE, config = TRUE)
#initial.values = list(alpha.c = alpha, beta.c = beta, kappa.c = kappa, phi.t = phi*(1:nt))
res = bru(components = comp,
likelihood.1,
options = list(verbose = F,
bru_verbose = 1,
num.threads = "1:1",
control.compute = c.c
))
res = bru_rerun(res)
res$summary.fixed
res$summary.hyperpar
data.alpha = res$summary.random$alpha %>%
mutate(id.order = factor(ID, levels=ID))
ggplot(data.alpha, aes(x = id.order)) +
#geom_ribbon(aes(ymin = X0.025quant, ymax = X0.975quant), fill = "lightskyblue1") +
geom_errorbar(aes(id.order, min = `0.025quant`, ymax =`0.975quant`), position=position_dodge(width=0.5)) +
geom_point(aes(y = mean, color = "Estimated")) +
ggtitle("Alpha - real data for stomach cancer")
data.beta = res$summary.random$beta
ggplot(data.beta, aes(x = ID)) +
#geom_ribbon(aes(ymin = X0.025quant, ymax = X0.975quant), fill = "lightskyblue1") +
geom_errorbar(aes(ID, min = `0.025quant`, ymax =`0.975quant`), position=position_dodge(width=0.5)) +
geom_point(aes(y = mean, color = "Estimated")) +
ggtitle("Beta - real data for stomach cancer")
data.kappa = res$summary.random$kappa
ggplot(data.kappa, aes(x = ID)) +
#geom_ribbon(aes(ymin = X0.025quant, ymax = X0.975quant), fill = "lightskyblue1") +
geom_errorbar(aes(ID, min = `0.025quant`, ymax =`0.975quant`), position=position_dodge(width=0.5)) +
geom_point(aes(y = mean, color = "Estimated")) +
ggtitle("Kappa - real data for stomach cancer")
data.phi = data.frame(cbind(ID = unique(stomach.cancer$t),
mean = res$summary.fixed$mean[2]*unique(stomach.cancer$t),
X0.025quant = res$summary.fixed$`0.025quant`[2]*unique(stomach.cancer$t),
X0.975quant = res$summary.fixed$`0.975quant`[2]*unique(stomach.cancer$t)))
ggplot(data = data.phi, aes(x = ID)) +
geom_ribbon(aes(ymin = X0.025quant, ymax = X0.975quant), fill = "lightskyblue1") +
geom_point(aes(y = mean, color = "Estimated")) +
ggtitle("Phi - real data for stomach cancer")
data.eta <- data.frame(eta = res$summary.linear.predictor$mean[1:324]) %>%
mutate(t = stomach.cancer$t, x = stomach.cancer$x)
ggplot(data = data.eta, aes(x=t, y=x, fill = eta)) + geom_tile() +
xlab("Time: 1999 - 2016") +
ylab("Age: 0 - 85+") +
ggtitle("")
data.eta.t <- data.eta %>%
group_by(t) %>% summarize(eta.t = mean(eta))
ggplot(data = data.eta.t, aes(x = t)) +
geom_point(aes(y = eta.t, color = "Estimated")) +
ggtitle("Eta for t - real data for stomach cancer") + xlab("t") + ylab("Predictor")
data.eta.x <- data.eta %>%
group_by(x) %>% summarize(eta.x = mean(eta))
ggplot(data = data.eta.x, aes(x = x)) +
geom_point(aes(y = eta.x, color = "Estimated")) +
ggtitle("Eta for x - real data for stomach cancer") + xlab("t") + ylab("Predictor")
|
dc7022a45a3a77dd121d750b803991b7fe3c0dee
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diceR/inst/testfiles/connectivity_matrix/libFuzzer_connectivity_matrix/connectivity_matrix_valgrind_files/1609958624-test.R
|
743c45c6e39d0e9c9c239135fbc837dc983d0317
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 111
|
r
|
1609958624-test.R
|
testlist <- list(x = 2.16443570677964e-312)
result <- do.call(diceR:::connectivity_matrix,testlist)
str(result)
|
b322e315ce08ee613fec79f743abc0785e40dff6
|
8f9fea74327fb383b19bdc95b1b1cf703136f433
|
/R/global_vars_funs.R
|
8d3b2f3888d6aefa588921537a3c4fd4480e859c
|
[
"Apache-2.0"
] |
permissive
|
rtlemos/rcsurplus1d
|
a4b98c7ca5e7e16c74506f954de8e4a5059260a3
|
69ef6212b0df416f2ab15ffb147dcd5cc7e93e56
|
refs/heads/master
| 2021-04-30T16:20:27.719003
| 2020-06-19T01:19:07
| 2020-06-19T01:19:07
| 56,413,138
| 1
| 0
| null | 2016-04-17T02:24:12
| 2016-04-17T01:16:24
| null |
UTF-8
|
R
| false
| false
| 803
|
r
|
global_vars_funs.R
|
#' Global specs for rcsurplus
#'
#' \code{rcsurplus1d.defaults} is a list that contains default
#' parameters for this package
#' @param priorK prior for carrying capacity
#' @param priorr prior for intrinsic growth rate
#' @param priorq prior for catchability
#' @param priors prior for observational variance
#' @param mcmc_n number of Markov Chain Monte Carlo iterations
#' @param mcmc_t MCMC thinning
#' @param mcmc_b MCMC burn-in fraction
#' @param mcmc_c number of MCMC chains
#' @export rcsurplus1d.default
#' @export myglobal
myglobal <- rcsurplus1d.default <- list(
priorK = c(100, 15000),
priorr = c(0,1),
priorPHI = c(0,1),
priorq = c(-20,-1),
priors = c(-20,20),
mcmc_n = c(1000, 10000),
mcmc_t = c(1,100),
mcmc_b = c(0.05, 0.5),
mcmc_c = c(1,3)
)
|
8b8679a3a00ed23e0db2d3e367b25601c65bcfd5
|
9463fd30587b1c2608bc53c0e442bab2cd899dd6
|
/Modules/downmodule_ui.R
|
d22c5f42ebd46850768e2ce7fce306c303a328a2
|
[] |
no_license
|
microgenomics/HumanMicrobiomeAnalysis
|
f72c9b3f217a4ea92843ccd8b707e023f80440d9
|
a83cd8f4b3ee2c1511e38c45ba8ba37dedaf9a18
|
refs/heads/master
| 2022-04-30T18:58:25.008184
| 2022-03-08T10:34:41
| 2022-03-08T10:34:41
| 198,847,636
| 2
| 2
| null | 2019-08-01T16:15:45
| 2019-07-25T14:32:45
|
R
|
UTF-8
|
R
| false
| false
| 663
|
r
|
downmodule_ui.R
|
DownloadUI<- function(id){
ns <- NS(id)
tabPanel("Download Data",
sidebarPanel(
selectInput(inputId = ns("DataBase"), label="Data Base",
choices = c("Complete","OneMore","Depths"),
selected = c("Complete")),
uiOutput(ns("SpeciesQdown")),
uiOutput(ns("DeepSdown")),
uiOutput(ns("Dominancedown")),
uiOutput(ns("ReadLdown"))
),
mainPanel(
h4("Links to files selected"),
htmlOutput(ns("files")),
h5("Please note that different files might have the same file name and thus might be overwritten")
)
)
}
|
48e1575841b0f0f8605e61ec2b5a854802e951b1
|
d9396697675606d97824a787a7b0f8c19619b3f8
|
/man/ExpDesigns.Rd
|
bf6f7c5aa337c9f65851dddf9b34cf438298ee14
|
[] |
no_license
|
qchengray/sommer
|
45859dbd6550a6c7069ab4a2d1904da6ed41f6b1
|
aca3863a7e1df8b462212c40468e44083c76812f
|
refs/heads/master
| 2021-01-23T04:38:57.350625
| 2017-08-24T07:25:49
| 2017-08-24T07:25:49
| 102,450,483
| 1
| 0
| null | 2017-09-05T07:45:38
| 2017-09-05T07:45:37
| null |
UTF-8
|
R
| false
| false
| 7,557
|
rd
|
ExpDesigns.Rd
|
\name{ExpDesigns}
\alias{ExpDesigns}
\docType{data}
\title{
Data for different experimental designs
}
\description{
The following data is a list containing data frames for different type of experimental designs relevant in plant breeding:
1) Augmented designs (2 examples)
2) Incomplete block designs (1 example)
3) Split plot design (2 examples)
4) Latin square designs (1 example)
5) North Carolina designs I,II and III
How to fit each is shown at the Examples section. This may help you get introduced to experimental designs relevant to plant breeding. Good luck.
}
\format{
Different based on the design.
}
\source{
Datasets and more detail about them can be found in the agricolae package. Here we just show the datasets and how to analyze them using the \code{\link{sommer}} package.
}
\references{
Covarrubias-Pazaran G (2016) Genome assisted prediction of quantitative traits using the R package sommer. PLoS ONE 11(6): doi:10.1371/journal.pone.0156744
}
\examples{
#### =================================== ####
#### ===== Augmented Block Design 1 ==== ####
#### =================================== ####
data(ExpDesigns)
data1 <- ExpDesigns$au1
head(data1)
## response variable: "yield"
## check indicator: "entryc" ('nc' for all unreplicated, but personal.name for checks)
## blocking factor: "block"
## treatments, personal names for replicated and non-replicated: "trt"
## check no check indicator: "new"
mix1 <- mmer2(yield~entryc, random=~block+trt, data=data1)
summary(mix1)
# ## compare raw unreplicated measure with adjusted blup
# library(plyr)
# avers <- ddply(data1, .(trt), summarize,mean = round(mean(yield), 2))
# plot(avers$mean[-c(1:4)], mix1$u.hat$trt[-c(1:4),1],ylab="BLUP",xlab="Raw",
# pch=20, cex=2, col="blue")
# ## if you have row and column information you can see the design
# library(agridat)
# data1$row <- c(sample(1:7),sample(1:6),sample(1:7))
# data1$col <- c(1:3)[data1$block]
# d1 <- desplot(yield ~ row*col, data1, main="ABD1",
# text=trt,strip.cex=3, cex=1)
# print(d1)
#### =================================== ####
#### ===== Augmented Block Design 2 ==== ####
#### =================================== ####
data(ExpDesigns)
data2 <- ExpDesigns$au2
head(data2)
## response variable: "TSW"
## check indicator: "entryc"
## blocking factor: "Block"
## treatments, replicated and non-replicated: "Entry"
## check no check indicator: "new"
## this is also known as Federer's unreplicated design
mix2<- mmer2(TSW ~ entryc, random=~Block+Entry, data=data2)
summary(mix2)
#### =================================== ####
#### ===== Incomplete block design ==== ####
#### =================================== ####
data(ExpDesigns)
data.ibd <- ExpDesigns$ibd$book
head(data.ibd)
ExpDesigns$ibd$sketch
## response variable: "yield"
## 2 replications (r)
## 30 genotypes (trt)
## 10 incomplete blocks (s) with 3 trts each (k)
## design was an alpha design
## agricolae::design.alpha(trt=paste("gen",1:30,sep=""),k=3,r=2,seed=5)$sketch
mix.ibd <- mmer2(yield~Genotype,random=~replication+replication:block,
data=data.ibd)
summary(mix.ibd)
# rownames(a)[1] <-"geno1"
#a[-1] <- a[-1]+a[1]
#plot(density(mix.ibd$beta.hat))
## map of the field
# library(agridat)
# data.ibd$block <- as.numeric(as.character(data.ibd$block))
# data.ibd$cols <- as.numeric(as.character(data.ibd$cols))
# d1 <- desplot(yield ~ block*cols, data.ibd, main="IBD",
# text=Genotype,strip.cex=3, cex=1)
# print(d1)
#### =================================== ####
#### ======= Split Plot Design ======== ####
#### =================================== ####
data(ExpDesigns)
data.spd <- ExpDesigns$spd
head(data.spd)
## response variable: "yield"
## 3 blocks or reps (r)
## 2 whole plot treatment (A)
## 3 small plot treatments (B)
##
## i.e BLOCK 1
##[]======================[]
##[] A1(B1) A1(B2) A1(B3) []
##[] A2(B1) A2(B2) A2(B3) []
##[]======================[]
##
## more replication in whole plot treatments (A)
## less replication in sub plot treatments (B)
mix.split <- mmer2(yield ~block + A + B ,random=~ A:B, data=data.spd)
summary(mix.split)
#### =================================== ####
#### ==== Split-Split Plot Design ===== ####
#### =================================== ####
data(ExpDesigns)
data.sspd <- ExpDesigns$sspd
head(data.sspd)
## response variable: "yield"
## 5 levels of nitrogen (N) main plot
## 3 levels of management (M) sub-plot
## 3 varieties (B) sub-sub-plot
##
## i.e BLOCK 1
##[]==================================[]
##[] N1(M1(V1)) N1(M2(V1)) N1(M3(V1)) []
##[] N2(M1(V1)) N2(M2(V1)) N2(M3(V1)) []
##[] N3(M1(V1)) N3(M2(V1)) N3(M3(V1)) []
##[] N4(M1(V1)) N4(M2(V1)) N4(M3(V1)) []
##[] N5(M1(V1)) N5(M2(V1)) N5(M3(V1)) []
##[]==================================[]
##
head(data.sspd)
mix.sspd <- mmer2(yield ~1,random=~ block + nitrogen + management +
variety + nitrogen:management + variety:nitrogen +
variety:management + variety:nitrogen:management,
data=data.sspd)
summary(mix.sspd)
#### =================================== ####
#### ======= Latin Square Design ====== ####
#### =================================== ####
data(ExpDesigns)
data.lsd <- ExpDesigns$lsd
head(data.lsd)
## response variable: "yield"
## 4 columns (c)
## 4 rows (r)
## 4 varieties (V)
##
## c1 c2 c3 c4
##[]=============[]
##[] V1 V4 V2 V3 [] row 1
##[] V2 V3 V4 V1 [] row 2
##[] V3 V2 V4 V1 [] row 3
##[] V4 V1 V3 V2 [] row 4
##[]=============[]
## c1 c2 c3 c4
##
mix.lsd <- mmer2(yield ~ variety ,random=~ row + col, data=data.lsd)
summary(mix.lsd)
# library(agridat)
# desplot(yield ~ row*col, data.lsd, main="LSD",
# strip.cex=3, cex=1, text=variety)
#### =================================== ####
#### ===== North Carolina Design I ==== ####
#### =================================== ####
data(ExpDesigns)
data.car1 <- ExpDesigns$car1
head(data.car1)
## response variable: "yield"
## male indicator: "male"
## female indicator: "female"
## replication: "rep"
## set of males: "set"
mix.car1 <- mmer2(yield~set,random=~ set:rep + set:male
+set:male:female + set:male:female:rep, data=data.car1)
(suma <- summary(mix.car1))
(Var.A <- 4*suma$var.comp.table[2,1])
(Var.D <- 4*suma$var.comp.table[3,1] - 4*suma$var.comp.table[2,1])
#### =================================== ####
#### ===== North Carolina Design II ==== ####
#### =================================== ####
data(ExpDesigns)
data.car2 <- ExpDesigns$car2
head(data.car2)
## response variable: "yield"
## male indicator: "male"
## female indicator: "female"
## replication: "rep"
## set of males: "set"
mix.car2 <- mmer2(yield ~ 1, random=~ set + set:rep + set:male
+ set:female + set:male:female, data=data.car2)
(suma <- summary(mix.car2))
(Var.Am <- 4*suma$var.comp.table[3,1])
(Var.Af <- 4*suma$var.comp.table[4,1])
(Var.D <- 4*suma$var.comp.table[5,1])
#### =================================== ####
#### ==== North Carolina Design III ==== ####
#### =================================== ####
data(ExpDesigns)
data.car3 <- ExpDesigns$car3
head(data.car3)
## response variable: "yield"
## male indicator: "male"
## female indicator: "female"
## replication: "rep"
## set of males: "set"
mix.car3 <- mmer2(yield ~ set + set:rep, random=~ set:male
+ set:female + set:male:female, data=data.car3)
(suma <- summary(mix.car3))
(Var.A <- 4*suma$var.comp.table[1,1]) # var males
(Var.D <- 2*suma$var.comp.table[3,1]) # var females in males
}
|
4187a9285decbdc2c39115a254f515a48202b02e
|
5e65f58f231b331ba0cddb512398e39cda3a9a67
|
/kew_imperial_grasses_project/Support/handler.R
|
4ccf788d281c01b954d1790355c7d1a415694692
|
[] |
no_license
|
cwkprojects/myprojects
|
719644297fbf8c9269f9e3e440be9988a859df57
|
0bed4cd790cf4e4fa18d4683afadfee400ab7b33
|
refs/heads/master
| 2021-06-20T20:57:39.324961
| 2017-08-02T18:56:32
| 2017-08-02T18:56:32
| 98,444,604
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,873
|
r
|
handler.R
|
# This script can be used to replicate the analysis undertaken in the summer
# project "Where are the missing grasses?" based at RBG Kew in summer 2016.
#
# For a full explanation of the script and the methods it employs, as well as
# a guide, please see the readme in this repository.
#
# Antonio Remiro, Jonathan Williams, 2016
# remiroantonio@gmail.com
# jonvw28@gmail.com
#
################################################################################
#
# Install any dependancies and load basic functions
#
source("./kew_grasses/Support/packages.R")
source("./kew_grasses/Support/functions.R")
#
# Load in all files giving setting for the models
#
source("./kew_grasses/Options_Files/indices.R")
source("./kew_grasses/Options_Files/output_options.R")
source("./kew_grasses/Options_Files/search_parameters.R")
source("./kew_grasses/Options_Files/name_formatting.R")
#
# Call geographical setting is requested, else set levels to NULL to signal
# not to apply geographic model
#
if(geo.model) {
source("./kew_grasses/Options_Files/geographical_model.R")
} else {
levels <- NULL
loc.ind <- NULL
filt.ind <- NULL
filt.mk <- NULL
n.spec <- NULL
}
#
# Load gradient descent parameters if appropriate
#
if(gradient.descent || geo.gradient.descent){
source("./kew_grasses/Options_Files/gradient_descent.R")
}
#
# Call scripts which set parameters to ensure the correct method is applied.
#
source("./kew_grasses/Support/Data_Processing/species_method.R")
source("./kew_grasses/Support/Data_Processing/author_method.R")
#
# Call correct script to run analysis
#
if(subsetting) {
source("./kew_grasses/Support/complete_pipeline_filter.R")
} else {
# No filtering
source("./kew_grasses/Support/complete_pipeline_whole_dataset.R")
}
|
ca271d03b95a36afbc8a07d4c0f517ceda43e8f2
|
580be5feec96aee48f98f02683409e373d024783
|
/man/c_layout.Rd
|
8e7fe7e97d97f62ed014845f2dfbee657062a6e9
|
[
"MIT"
] |
permissive
|
han-tun/charter
|
86a8ea8b0024785fa7b63b6ee1fa12250008b737
|
6b77bdac72fe27629c90045e6c0b77bca12030f3
|
refs/heads/master
| 2022-12-30T06:31:57.701194
| 2020-10-13T19:09:40
| 2020-10-13T19:09:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 573
|
rd
|
c_layout.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layout.R
\name{c_layout}
\alias{c_layout}
\title{Layout}
\usage{
c_layout(c, left = NULL, right = NULL, top = NULL, bottom = NULL)
}
\arguments{
\item{c}{An object of class \code{charter} as returned by \code{\link{c_hart}}.}
\item{left, right, top, bottom}{Values, in pixels, defining margin around chart.}
}
\description{
Adjust the layout of the graph.
}
\examples{
c_hart(cars, caes(speed, dist)) \%>\%
c_line() \%>\%
c_layout(
top = 20, right = 20,
left = 20, bottom = 20
)
}
|
7ed0527d0be8ba821429619fb4db2c43e1209acd
|
231c176babe88ed186d9edc92799224fac38a75b
|
/Código/universal 3.0.r
|
3e244e3bc47558bde0b0091df8d562c3c058f6ec
|
[
"Apache-2.0"
] |
permissive
|
ronyrst/analise_temperaturas
|
b8ca877ea9561f4d389a6e41e46e8d407e76c316
|
2df83e1a9b777436bad3e8e377e4fb316e39cbbf
|
refs/heads/master
| 2020-03-31T00:33:52.713797
| 2019-05-07T22:21:36
| 2019-05-07T22:21:36
| 151,744,099
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 89,470
|
r
|
universal 3.0.r
|
#### FUNCIONALIDADES (a partir da linha 33):
# -'save_data' salva matriz, data.frame ou lista em um arquivo .csv.
# -'ctrab_na' para uso interno dos outros programas.
# -'ctrab_cons' para uso interno dos outros programas.
# -'confere_na' mostra quantos NAs existem em um data.frame já na estrutura de convert_universal, os salvando em um arquivo .csv.
# -'confere_cons' mostra quantas inconsistências há em um data.frame já na estrutura de convert_universal, os salvando em um arquivo .csv.
# -'plot_temps' gera gráfico das temperaturas máximas e mínimas.
# -'graficaliza' gera gráfico(s) de barras das matrizes já calculadas.
# -'checa_data' verifica se a data usada é do tipo YYYY-MM-DD ou DD/MM/YYYY.
# -'def_dia' recebe um string de data e o transforma em uma lista que separa dia, mês e ano em valores numéricos.
# -'compara_data' faz comparação lógica entre duas datas.
# -'separa_estacao' coloca a estação em cada uma das datas de um data.frame.
#### CONVERTER DADOS (a partir da linha 1013):
# -'convert_universal' função que converte as estruturas do IAG e de Mirante para a estrutura aceita pelas funções aqui descritas.
#### MATRIZES (a partir da linha 1171):
# -'define_minima' para uso interno de separa_temp.
# -'define_maxima' para uso interno de separa_temp.
# -'separa_temp' calcula a matriz de máximas e mínimas de um data.frame na estrutura de convert_universal.
# -'quinquenal' calcula matrizes de 5 em 5 anos, ou sazonalmente em cada período de 5 anos, com o data.frame na estrutura de convert_universal.
# -'sazonal' calcula a matriz para cada uma das quatro estações de um data.frame na estrutura de convert_universal.
# -'decenio' calcula a matriz de 10 em 10 anos, ou sazonalmente em cada período de 10 anos, com o data.frame na estrutura de convert_universal.
# -'trinta' calcula a matriz em períodos de 30 anos, ou sazonalmente dentro de cada período de 30 anos, com o data.frame na estrutura de convert_universal.
# -'anual'calcula a matriz ano a ano, ou sazonalmente em cada ano, com o data.frame já na estrutura de convert_universal.
#### TUTORIAL (a partir da linha 1865):
# apresenta uma forma rápida de uso dos programas abaixo.
########################################################
# F U N C I O N A L I D A D E S
########################################################
save_data <- function(dado, titulo = ""){
#####
# Entra com uma matriz, data.frame ou lista (com matriz e/ou data.frame), e com um título (do tipo string, opcional), e salva em arquivo no computador.
#
#####
if(is.matrix(dado)){ # se o dado é uma matriz, salva aqui.
op <- paste(titulo, " matriz.csv", sep = "")
write.csv2(dado, file = op)
} else if(is.data.frame(dado)){ # se o dado é um data.frame, salva aqui.
op <- paste(titulo, " data_frame.csv", sep = "")
write.csv2(dado, file = op, quote = F)
} else if(is.list(dado)){ # se é lista, salva aqui.
guard <- names(dado)
for(i in 1:length(dado)){
op <- paste(i, ". ", titulo, " ", guard[i], ".csv", sep = "") # código que define o título do arquivo. Fica, p/ ex: "1. Verao.csv".
if(is.matrix(dado[[i]])){
write.csv2(dado[[i]], file = op)
} else if(is.data.frame(dado[[i]])){
write.csv2(dado[[i]], file = op, quote = F)
} else {
print(paste("A entrada de número", i, "não é nem uma matriz nem um data.frame. Portanto, não foi salva.", sep = " "))
}
}
} else {
print("O dado entrado não é uma matriz, data.frame ou lista.")
return(NULL)
}
}
########################################################
ctrab_na <- function(dado, salva = F, altera = F){
#####
# Entra com um data.frame já na estrutura de convert_universal, e checa se há entradas NA nas colunas de máximas e de mínimas.
# Ele necessariamente retorna o mesmo data.frame, mas com ou sem alterações.
# Se o programa não encontrou NAs no data.frame, ele retorna o mesmo data.frame de entrada.
# Se o programa encontrou NAs e 'altera' = FALSE, o programa também retorna o mesmo data.frame de entrada.
#
# 'salva': booleano. Define se as linhas onde foram encontrados NAs são salvas ou não em um arquivo .csv à parte.
# Se TRUE: as linhas onde foram encontrados NAs são colocadas em um data.frame à parte e salvas.
# Se FALSE: nada é feito.
#
# 'altera': booleano. Define se as linhas que possuem NA são ou não retiradas do data.frame que retorna da função.
# Se TRUE: as linhas são retiradas do data.frame 'dado', e retornadas.
# Se FALSE: as linhas permanecem no data.frame 'dado'. Nada é feito.
#####
contador <- 0 # guarda quantos NAs o data.frame possui.
opera <- c() # guarda as posições dos NAs encontrados no data.frame.
for( i in 1:nrow(dado) ){
if(is.na(dado$tmin[i]) | is.na(dado$tmax[i])){ # se ou a coluna de mínimas ou a coluna de máximas tiver NA, entra aqui.
contador <- contador + 1
opera <- c(opera, i)
}
}
if(contador == 0){ # se contador for igual a zero, o for acima não encontrou NAs, então nada é feito, e se retorna o data.frame original.
return(dado)
}
if(salva == T){ # se salva é definido por TRUE, o programa faz o caminho de salvar as linhas com NA presente.
perdidos <- data.frame() # isso é feito por esse data.frame.
for( i in opera ){
perdidos <- rbind(perdidos, dado[i,]) # rbind coloca a linha de dado dentro de perdidos.
}
save_data(perdidos, "NAs encontrados") # o data.frame finalizado é então salvo usando a função save_data.
}
if(altera == T){ # se a opção altera for TRUE, é retirado do data.frame original as linhas em que há NAs presentes.
dado <- dado[-opera,]
}
return(dado)
}
########################################################
ctrab_cons <- function(dado, salva = F, altera = F){
#####
# Entra com um data.frame já na estrutura de convert_universal, e checa se há consistência nos dados: se as mínimas são maiores ou iguais às máximas.
# Ele necessariamente retorna o mesmo data.frame, mas com ou sem alterações.
# Se o programa não encontrou inconsistências no data.frame, ele retorna o mesmo data.frame de entrada.
# Se o programa encontrou inconsistências e 'altera' = FALSE, o programa também retorna o mesmo data.frame de entrada.
# O programa não faz nada ao encontrar temperaturas NA.
#
# 'salva': booleano. Define se as linhas onde foram encontradas inconsistências são salvas ou não em um arquivo .csv à parte.
# Se TRUE: as linhas onde foram encontradas inconsistências são colocadas em um data.frame à parte e salvas.
# Se FALSE: nada é feito.
#
# 'altera': booleano. Define se as linhas que possuem inconsistências são ou não retiradas do data.frame que retorna da função.
# Se TRUE: as linhas são retiradas do data.frame 'dado', e retornadas.
# Se FALSE: as linhas permanecem no data.frame 'dado'. Nada é feito.
#####
contador <- 0 # guarda quantas linhas inconsistentes o data.frame possui.
opera <- c() # guarda as posições das linhas inconsistentes encontradas no data.frame.
for( i in 1:nrow(dado) ){
if( is.na(dado$tmin[i]) | is.na(dado$tmax[i]) ){
next
} else if( dado$tmin[i] >= dado$tmax[i] ){ # se o valor do dia da mínima for maior que o da máxima, entra aqui.
contador <- contador + 1
opera <- c(opera, i)
}
}
if(contador == 0){ # se contador for igual a zero, o for acima não encontrou inconsistências, então nada é feito, e se retorna o data.frame original.
return(dado)
}
if(salva == T){ # se salva é definido por TRUE, o programa faz o caminho de salvar as linhas com inconsistências.
perdidos <- data.frame() # perdidos é um data.frame criado, onde são salvas as linhas de inconsistências.
for( i in opera ){
perdidos <- rbind(perdidos, dado[i,]) # rbind coloca a linha de dado dentro de perdidos.
}
save_data(perdidos, "Dados inconsistentes") # o data.frame finalizado é então salvo usando a função save_data.
}
if(altera == T){ # se a opção altera for TRUE, são retiradas do data.frame original as linhas inconsistentes.
dado <- dado[-opera,]
}
return(dado)
}
########################################################
confere_na <- function(dado){
#####
# Entra com um data.frame já na estrutura de convert_universal, escreve um arquivo .csv, caso foram achados NAs no data.frame, ou printa
# uma mensagem, caso nenhuma linha com NAs foi encontrada.
# O programa não retorna nada.
#####
conf <- ctrab_na(dado, salva = T, altera = T) # faz uso da função 'ctrab_na' - função essa, que se usada em contextos errados, pode fazer outros
# programas retornarem erros.
# para que isso não ocorra, é usada essa função 'confere_na', que garante que o usuário a use
# indiscriminadamente, sem maiores perigos à integridade dos outros programas.
if(nrow(dado) == nrow(conf)){
print("O data.frame entrado não possui linhas com NA.") # sai o print se não forem encontrados NAs no data.frame.
}
rm(conf)
}
########################################################
confere_cons <- function(dado){
#####
# Entra com um data.frame já na estrutura de convert_universal, escreve um arquivo .csv, caso sejam encontradas inconsistências no data.frame,
# ou printa uma mensagem, caso nenhuma linha com inconsistências foi encontrada.
# O programa não retorna nada.
#####
conf <- ctrab_cons(dado, salva = T, altera = T) # faz uso da função 'ctrab_cons' - função essa, que se usada em contextos errados, pode fazer outros
# programas retornarem erros.
# para que isso não ocorra, é usada essa função 'confere_cons', que garante que o usuário a use
# indiscriminadamente, sem maiores perigos à integridade dos outros programas.
if(nrow(dado) == nrow(conf)){
print("O data.frame entrado não possui inconsistências.") # sai o print se não foram encontradas inconsistências no data.frame.
}
rm(conf)
}
########################################################
plot_temps <- function(dados, titulo = " "){
#####
# Entra com um data.frame de temperaturas máximas e mínimas, e plota um gráfico das temperaturas.
#
# 'titulo': string com texto que descreve o gráfico, a entrar como subtítulo no plot.
#####
copia <- ctrab_na(dados, altera = T)
i <- nrow(dados) - nrow(copia)
mini <- min(copia[[2]]) # encontra a temperatura máxima e a mínima observadas.
maxi <- max(copia[[3]]) # espera-se que a menor mínima esteja no data.frame de mínimas, e a maior máxima no de máximas.
# levando em conta que o data.frame 'copia' tem a estrutura 'Data' 'tmin' 'tmax'. copia[[2]] é o 'tmin', copia[[3]] é o 'tmax'.
if(i == 0){
plot(copia[[3]], col = "red", type = "l", ylim = c(mini, maxi), xlab = "Dias do período", ylab = "Temperatura", sub = titulo)
points(copia[[2]], col = "blue", type = "l")
} else{
newtitulo <- paste(titulo, " (o data.frame original tinha ", i, " linhas com valores NA, que foram omitidos)", sep = "")
plot(copia[[3]], col = "red", type = "l", ylim = c(mini, maxi), xlab = "Dias do período", ylab = "Temperatura", sub = newtitulo)
points(copia[[2]], col = "blue", type = "l")
}
}
########################################################
graficaliza <- function(dado, sazonal = F){
#####
# Entra com matriz ou lista de matrizes, plota gráfico(s) segundo o modelo de Estévez, indicando a quantidade de dias em cada classificação de temperatura
# do período presente na matriz. A classificação dos dias se dá por: "Muito frio", "frio", "fresco", "ameno", "quente", "muito quente" e "sufocante".
#
# 'sazonal': se TRUE, plota gráficos separando as estações, contando que a lista de matrizes já possua as estações separadas. Se a lista original não possuir
# as estações já separadas, o resultado não será correto (e possivelmente, nenhum erro será mostrado). Caso entre TRUE, mas 'dado' for apenas uma matriz,
# nada é feito, e só um gráfico é gerado.
# Se FALSE (default), só plota um gráfico.
#####
###
if( is.matrix(dado) ){ # se o dado entrado for só uma matriz.
m <- matrix(rep(0, len = 14), nrow = 7, dimnames = (list(c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"))))
# gera uma matriz de zeros, com 7 linhas, e nesse caso, 2 colunas.
muitofrio <- as.numeric(dado[6,1]) + sum(dado[7,1:2]) # nesse conjunto de linhas, os valores de cada classificação são somados, dos presentes na matriz.
frio <- sum(dado[4:5,1]) + sum(dado[5:6,2]) + sum(dado[6:7,3]) # essa estrutura se repete no código mais vezes.
fresco <- sum(dado[3:4,2]) + sum(dado[4:5,3]) + sum(dado[5:6,4])
ameno <- sum(dado[2:3,3]) + sum(dado[3:4,4]) + sum(dado[4:5,5])
quente <- sum(dado[1:2,4]) + sum(dado[2:3,5]) + sum(dado[3:4,6]) + as.numeric(dado[4,7])
muitoquente <- as.numeric(dado[1,5]) + sum(dado[1:2,6]) + sum(dado[2:3,7])
sufocante <- as.numeric(dado[1,7])
m[,1] <- c(muitofrio, frio, fresco, ameno, quente, muitoquente, sufocante) # a matriz 'm' recebe os valores de cada classificação.
barplot(m, col = c("darkblue", "blue", "cadetblue1", "white", "yellow", "red", "darkred"), ylab = "Quantidade de dias", # a matriz é plotada.
legend = c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"))
###
} else if( is.list(dado) & sazonal == F ){ # caso de que os dados entrados estão em uma lista, e sazonal é FALSE (só um gráfico é plotado).
ops <- ceiling(length(dado)/0.7) - length(dado) # ceiling arredonda um possível valor quebrado para o menor inteiro maior que o número.
# ou seja: 7.2 > 8,
# 7.8 > 8.
# length(dado)/0.7 é feito para se gerar, no total, uma matriz com 30% de colunas de zeros, o suficiente
# para que a legenda não tampe valor algum.
contornos <- c(as.vector(names(dado)), rep(" ", ops)) # contornos é feito para criar "títulos" vazios para os 30% de colunas de zeros.
m <- matrix(rep(0, len = (7*ceiling(length(dado)/0.7))), nrow = 7, # matriz é gerada.
dimnames = (list(c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"), contornos)))
coluna <- 1
for( i in dado ){ # itera em cada matriz presente em 'dado'.
muitofrio <- as.numeric(i[6,1]) + sum(i[7,1:2])
frio <- sum(i[4:5,1]) + sum(i[5:6,2]) + sum(i[6:7,3])
fresco <- sum(i[3:4,2]) + sum(i[4:5,3]) + sum(i[5:6,4])
ameno <- sum(i[2:3,3]) + sum(i[3:4,4]) + sum(i[4:5,5])
quente <- sum(i[1:2,4]) + sum(i[2:3,5]) + sum(i[3:4,6]) + as.numeric(i[4,7])
muitoquente <- as.numeric(i[1,5]) + sum(i[1:2,6]) + sum(i[2:3,7])
sufocante <- as.numeric(i[1,7])
m[,coluna] <- c(muitofrio, frio, fresco, ameno, quente, muitoquente, sufocante)
coluna <- coluna + 1
}
barplot(m, col = c("darkblue", "blue", "cadetblue1", "white", "yellow", "red", "darkred"), ylab = "Quantidade de dias" , # plota a matriz criada.
legend = c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"))
###
} else if( is.list(dado) & sazonal == T ){ # caso em que os dados entrados estão em uma lista, e a sazonal é TRUE.
o <- 1
cont_ver <- c() # cria as "labels" para cada barra, no gráfico final. As "labels" são separadas por estação.
cont_out <- c()
cont_inv <- c()
cont_pri <- c()
for( i in as.vector(names(dado)) ){ # itera nos nomes das matrizes, presentes na lista.
if( o == 1){
o <- 2
cont_ver <- c(cont_ver, i)
} else if( o == 2 ){
o <- 3
cont_out <- c(cont_out, i)
} else if( o == 3 ){
o <- 4
cont_inv <- c(cont_inv, i)
} else if( o == 4 ){
o <- 1
cont_pri <- c(cont_pri, i)
}
}
total <- length(dado)/4 # o "tamanho" de cada estação, do total das presentes na lista.
if( total != round(total) ){ # se total for um valor quebrado, implica que não foi entrada uma lista com estações separadas.
# é o único erro que pode ser acusado, caso se entre com dados incorretamente.
# convém notar que: se os dados não forem de estações separadas, mas forem divisíveis por 4, nada ocorre aqui.
print("Os dados entrados não condizem com o esperado.")
return(NULL)
}
ops <- ceiling(total/0.7) - total # ceiling( total/0.7 ) possui a mesma argumentação de acima. Com os 30% de colunas vazias.
cont_ver <- c(cont_ver, rep(" ", ops)) # adiciona as "labels" vazias, para os 30% de barras vazias geradas.
cont_out <- c(cont_out, rep(" ", ops))
cont_inv <- c(cont_inv, rep(" ", ops))
cont_pri <- c(cont_pri, rep(" ", ops))
m_verao <- matrix(rep(0, len = (7*ceiling(total/0.7))), nrow = 7, # 4 matrizes são geradas, uma para cada estação.
dimnames = (list(c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"), cont_ver)))
m_outono <- matrix(rep(0, len = (7*ceiling(total/0.7))), nrow = 7,
dimnames = (list(c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"), cont_out)))
m_inverno <- matrix(rep(0, len = (7*ceiling(total/0.7))), nrow = 7,
dimnames = (list(c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"), cont_inv)))
m_primavera <- matrix(rep(0, len = (7*ceiling(total/0.7))), nrow = 7,
dimnames = (list(c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"), cont_pri)))
qual <- 1 # 'qual' indica qual estação é a atual.
coluna <- 1 # 'coluna' itera na... coluna das matrizes. É a coluna onde os dados devem ser entrados.
for( i in dado ){ # itera nas matrizes presentes na lista 'dado'.
muitofrio <- as.numeric(i[6,1]) + sum(i[7,1:2])
frio <- sum(i[4:5,1]) + sum(i[5:6,2]) + sum(i[6:7,3])
fresco <- sum(i[3:4,2]) + sum(i[4:5,3]) + sum(i[5:6,4])
ameno <- sum(i[2:3,3]) + sum(i[3:4,4]) + sum(i[4:5,5])
quente <- sum(i[1:2,4]) + sum(i[2:3,5]) + sum(i[3:4,6]) + as.numeric(i[4,7])
muitoquente <- as.numeric(i[1,5]) + sum(i[1:2,6]) + sum(i[2:3,7])
sufocante <- as.numeric(i[1,7])
if( qual == 1){ # 'qual' = 1: verão.
qual <- 2
m_verao[,coluna] <- c(muitofrio, frio, fresco, ameno, quente, muitoquente, sufocante) # os valores são adicionados na coluna da matriz de verão.
} else if( qual == 2 ){ # 'qual' = 2: outono.
qual <- 3
m_outono[,coluna] <- c(muitofrio, frio, fresco, ameno, quente, muitoquente, sufocante)
} else if( qual == 3 ){ # 'qual' = 3: inverno.
qual <- 4
m_inverno[,coluna] <- c(muitofrio, frio, fresco, ameno, quente, muitoquente, sufocante)
} else if( qual == 4 ){ # 'qual' = 4: primavera.
qual <- 1
m_primavera[,coluna] <- c(muitofrio, frio, fresco, ameno, quente, muitoquente, sufocante)
coluna <- coluna + 1 # coluna só itera quando chega-se na estação de primavera, fazendo com que verão receba valores na próxima coluna, and so on.
}
}
barplot(m_verao, col = c("darkblue", "blue", "cadetblue1", "white", "yellow", "red", "darkred"), ylab = "Quantidade de dias" , # os gráficos são plotados.
legend = c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"), sub = "Verão")
barplot(m_outono, col = c("darkblue", "blue", "cadetblue1", "white", "yellow", "red", "darkred"), ylab = "Quantidade de dias" ,
legend = c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"), sub = "Outono")
barplot(m_inverno, col = c("darkblue", "blue", "cadetblue1", "white", "yellow", "red", "darkred"), ylab = "Quantidade de dias" ,
legend = c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"), sub = "Inverno")
barplot(m_primavera, col = c("darkblue", "blue", "cadetblue1", "white", "yellow", "red", "darkred"), ylab = "Quantidade de dias" ,
legend = c("muito frio", "frio", "fresco", "ameno", "quente", "muito quente", "sufocante"), sub = "Primavera")
###
} else { # se o entrado não foi nem matriz nem uma lista: retorna erro.
print("O dado entrado não condiz com o esperado.")
return(NULL)
}
}
########################################################
checa_data <- function(string){
#####
# Entra com string, sai com booleano, indicando o tipo de data que se está lidando.
#
# Se TRUE: a data é do tipo YYYY-MM-DD.
# Se FALSE: a data é do tipo DD/MM/YYYY.
#####
x <- as.character(string)
a <- strsplit(x, " ")[[1]][1] # a recebe o string "data", separado da "hora".
azero <- strsplit(a, "") # mais um split de string, agora quebrando caractere a caractere os dados.
for(i in 1:length(azero[[1]])){
if(azero[[1]][i] == "-"){ # se encontrar um "-", é do tipo YYYY-MM-DD.
return(T)
} else if(azero[[1]][i] == "/"){ # se encontrar um "/", é do tipo DD/MM/YYYY.
return(F)
}
}
}
########################################################
def_dia <- function(data){
#####
# Recebe um string de data, retorna uma lista com o dia, o mês, e o ano separados.
#
#####
nova <- strsplit(data, "")
sabe <- checa_data(data) # 'sabe' define se o tipo de dado tratado é YYYY-MM-DD ou DD/MM/YYYY.
resp <- vector("list", 3) # resp é a lista a ser retornada, com os valores de dia, mês e ano.
names(resp) <- c("dia", "mês", "ano")
op <- c()
if(sabe == T){ # YYYY-MM-DD
count <- 3 # define a posição que os valores entrarão na lista 'resp'.
for(i in 1:length(nova[[1]])){
if(nova[[1]][i] == "-"){ # serve para pegar os dois primeiros valores numéricos do string. No caso, YYYY e MM.
opa <- ""
for(i in 1:length(op)){ # junta os strings separados em um somente.
opa <- paste(opa, op[i], sep = "")
}
resp[count] <- as.numeric(opa)
count <- count - 1
op <- c()
} else {
op <- c(op,nova[[1]][i])
}
}
opa <- ""
for(i in 1:length(op)){ # pega o último valor numérico do string, no caso, DD.
opa <- paste(opa, op[i], sep = "")
}
resp[count] <- as.numeric(opa)
return(resp)
} else if(sabe == F){ # DD/MM/YYYY
count <- 1 # define a posição que os valores entrarão na lista 'resp'.
for(i in 1:length(nova[[1]])){
if(nova[[1]][i] == "/"){ # ao encontrar "/", junta todos os strings em um valor numérico. Pega DD e MM.
opa <- ""
for(i in 1:length(op)){
opa <- paste(opa, op[i], sep = "")
}
resp[count] <- as.numeric(opa)
count <- count + 1
op <- c()
} else{
op <- c(op,nova[[1]][i])
}
}
opa <- ""
for(i in 1:length(op)){ # pega YYYY.
opa <- paste(opa, op[i], sep = "")
}
resp[count] <- as.numeric(opa)
return(resp)
}
}
########################################################
compara_data <- function(data1, data2, string){
#####
# Recebe duas datas, e as compara. Retorna booleano, de acordo com a veracidade da comparação realizada.
# As datas podem estar ou como string, ou como lista, conforme 'def_dia'.
#
# 'string': pode receber as entradas "=", "!=", ">", "<", ">=", "<=".
#
# A função compara a primeira data entrada com a segunda. Na forma: data1 *string_de_comparação* data2.
# Se a comparação for verdadeira, retorna True, caso contrário, retorna False.
#####
if(is.character(data1)){ # checa se data1 é string, o converte a lista.
data1 <- def_dia(data1)
}
if(is.character(data2)){ # checa se data2 é string, o converte a lista.
data2 <- def_dia(data2)
}
if((!is.list(data1)) & (!is.list(data2))){ # se ambas as datas não são listas: o programa encontrou um erro e termina, retornando NULL.
print("Os valores entrados em compara_data não puderam ser comparados.")
return(NULL)
} else {
nula <- NULL # nula checa se os dias, meses e anos são iguais.
if( (data1[[1]] == data2[[1]]) & (data1[[2]] == data2[[2]]) & (data1[[3]] == data2[[3]]) ){
nula <- T
} else {
nula <- F
}
if(nula == T){ # nula lida com situações que falsificam ou não a comparação, dado seu resultado.
if(string == "=" | string == ">=" | string == "<="){
return(T)
} else if(string == "!=" | string == ">" | string == "<"){
return(F)
}
} else if(nula == F){
if(string == "="){
return(F)
} else if(string == "!="){
return(T)
}
}
if(string == ">" | string == ">="){ # situações que sobram, das anteriores, de ser maior ou maior-igual ( ">" e ">=" ).
if(data1[[3]] > data2[[3]]){ # compara se um ano é maior que o outro.
return(T)
} else if(data1[[3]] < data2[[3]]){
return(F)
} else if(data1[[2]] > data2[[2]]){ # compara se um mês é maior que o outro.
return(T)
} else if(data1[[2]] < data2[[2]]){
return(F)
} else{ # caso ambos os casos falhem, compara os dias.
if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 1)){ # checa de Janeiro a Dezembro, levando em conta bissextos, para ver se um dia é maior que outro.
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Janeiro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 2)){ # roda a comparação de Fevereiro, onde checa os bissextos.
bis <- NULL
ao <- 1960
for(i in 1:20){ # checa se o ano comparado é bissexto.
ao <- ao + 4
if(data1[[3]] == ao){
bis <- T
break
} else if(data1[[3]] < ao){
bis <- F
break
} else if(ao == 2016){
break
}
}
if(is.null(bis)){
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
if(bis == F){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 28)) & ((data2[[1]] >= 1) & (data2[[1]] <= 28)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
} else if(bis == T){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 29)) & ((data2[[1]] >= 1) & (data2[[1]] <= 29)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
}
## Fevereiro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 3)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Março
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 4)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 30)) & ((data2[[1]] >= 1) & (data2[[1]] <= 30)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Abril
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 5)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Maio
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 6)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 30)) & ((data2[[1]] >= 1) & (data2[[1]] <= 30)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Junho
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 7)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Julho
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 8)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Agosto
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 9)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 30)) & ((data2[[1]] >= 1) & (data2[[1]] <= 30)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Setembro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 10)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Outubro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 11)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 30)) & ((data2[[1]] >= 1) & (data2[[1]] <= 30)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Novembro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 12)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] > data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Dezembro
}
}
}
if(string == "<" | string == "<="){ # realiza as mesmas operações de acima, mas para menor e menor-igual ( "<" e "<=" ).
if(data1[[3]] < data2[[3]]){
return(T)
} else if(data1[[3]] > data2[[3]]){
return(F)
} else if(data1[[2]] < data2[[2]]){
return(T)
} else if(data1[[2]] > data2[[2]]){
return(F)
} else{
if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 1)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Janeiro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 2)){
bis <- NULL
ao <- 1960
for(i in 1:20){
ao <- ao + 4
if(data1[[3]] == ao){
bis <- T
break
} else if(data1[[3]] < ao){
bis <- F
break
} else if(ao == 2016){
break
}
}
if(is.null(bis)){
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
if(bis == F){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 28)) & ((data2[[1]] >= 1) & (data2[[1]] <= 28)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
} else if(bis == T){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 29)) & ((data2[[1]] >= 1) & (data2[[1]] <= 29)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
}
## Fevereiro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 3)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Março
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 4)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 30)) & ((data2[[1]] >= 1) & (data2[[1]] <= 30)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Abril
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 5)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Maio
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 6)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 30)) & ((data2[[1]] >= 1) & (data2[[1]] <= 30)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Junho
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 7)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Julho
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 8)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Agosto
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 9)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 30)) & ((data2[[1]] >= 1) & (data2[[1]] <= 30)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Setembro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 10)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Outubro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 11)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 30)) & ((data2[[1]] >= 1) & (data2[[1]] <= 30)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Novembro
} else if( (data1[[2]] == data2[[2]]) & (data1[[2]] == 12)){
if( ((data1[[1]] >= 1) & (data1[[1]] <= 31)) & ((data2[[1]] >= 1) & (data2[[1]] <= 31)) ){
if(data1[[1]] < data2[[1]]){
return(T)
} else{
return(F)
}
} else{
print(paste("As datas ", data1[[1]], "/", data1[[2]], "/", data1[[3]], " e ", data2[[1]], "/", data2[[2]], "/", data2[[3]],
" possuem algum erro.", sep = ""))
return(NULL)
}
## Dezembro
} else {
print(paste("Os meses ", data1[[2]], "/", data1[[3]], " e ", data2[[2]], "/", data2[[3]], " possuem algum erro.", sep = ""))
return(NULL)
}
}
}
}
}
########################################################
separa_estacao <- function(dado){
#####
# Entra com data.frame de temperaturas máximas e mínimas, e retorna o data.frame original com uma coluna a mais,
# indicando a estação do ano que a data pertence.
#
# O código funciona apenas com o modelo 'dia' 'tmin' 'tmax', adicionando logo após 'tmax' a coluna 'season'.
#
# datas das estações do ano coletadas de:
# https://www.timeanddate.com/calendar/seasons.html?year=1950&n=233
#####
seasons <- c("1961-03-20", "1961-06-21", "1961-09-23", "1961-12-21", "1962-03-20", "1962-06-21", "1962-09-23", "1962-12-22",
"1963-03-21", "1963-06-22", "1963-09-23", "1963-12-22", "1964-03-20", "1964-06-21", "1964-09-22", "1964-12-21",
"1965-03-20", "1965-06-21", "1965-09-23", "1965-12-21", "1966-03-20", "1966-06-21", "1966-09-23", "1966-12-22",
"1967-03-21", "1967-06-21", "1967-09-23", "1967-12-22", "1968-03-20", "1968-06-21", "1968-09-22", "1968-12-21",
"1969-03-20", "1969-06-21", "1969-09-23", "1969-12-21", "1970-03-20", "1970-06-21", "1970-09-23", "1970-12-22",
"1971-03-21", "1971-06-21", "1971-09-23", "1971-12-22", "1972-03-20", "1972-06-21", "1972-09-22", "1972-12-21",
"1973-03-20", "1973-06-21", "1973-09-23", "1973-12-21", "1974-03-20", "1974-06-21", "1974-09-23", "1974-12-22",
"1975-03-21", "1975-06-21", "1975-09-23", "1975-12-22", "1976-03-20", "1976-06-21", "1976-09-22", "1976-12-21",
"1977-03-20", "1977-06-21", "1977-09-23", "1977-12-21", "1978-03-20", "1978-06-21", "1978-09-23", "1978-12-22",
"1979-03-21", "1979-06-21", "1979-09-23", "1979-12-22", "1980-03-20", "1980-06-21", "1980-09-22", "1980-12-21",
"1981-03-20", "1981-06-21", "1981-09-23", "1981-12-21", "1982-03-20", "1982-06-21", "1982-09-23", "1982-12-22",
"1983-03-21", "1983-06-21", "1983-09-23", "1983-12-22", "1984-03-20", "1984-06-21", "1984-09-22", "1984-12-21",
"1985-03-20", "1985-06-21", "1985-09-22", "1985-12-21", "1986-03-20", "1986-06-21", "1986-09-23", "1986-12-22",
"1987-03-21", "1987-06-21", "1987-09-23", "1987-12-22", "1988-03-20", "1988-06-21", "1988-09-22", "1988-12-21",
"1989-03-20", "1989-06-21", "1989-09-22", "1989-12-21", "1990-03-20", "1990-06-21", "1990-09-23", "1990-12-22",
"1991-03-21", "1991-06-21", "1991-09-23", "1991-12-22", "1992-03-20", "1992-06-21", "1992-09-22", "1992-12-21",
"1993-03-20", "1993-06-21", "1993-09-22", "1993-12-21", "1994-03-20", "1994-06-21", "1994-09-23", "1994-12-22",
"1995-03-20", "1995-06-21", "1995-09-23", "1995-12-22", "1996-03-20", "1996-06-20", "1996-09-22", "1996-12-21",
"1997-03-20", "1997-06-21", "1997-09-22", "1997-12-21", "1998-03-20", "1998-06-21", "1998-09-23", "1998-12-21",
"1999-03-20", "1999-06-21", "1999-09-23", "1999-12-22", "2000-03-20", "2000-06-20", "2000-09-22", "2000-12-21",
"2001-03-20", "2001-06-21", "2001-09-22", "2001-12-21", "2002-03-20", "2002-06-21", "2002-09-23", "2002-12-21",
"2003-03-20", "2003-06-21", "2003-09-23", "2003-12-22", "2004-03-20", "2004-06-20", "2004-09-22", "2004-12-21",
"2005-03-20", "2005-06-21", "2005-09-22", "2005-12-21", "2006-03-20", "2006-06-21", "2006-09-23", "2006-12-21",
"2007-03-20", "2007-06-21", "2007-09-23", "2007-12-22", "2008-03-20", "2008-06-20", "2008-09-22", "2008-12-21",
"2009-03-20", "2009-06-21", "2009-09-22", "2009-12-21", "2010-03-20", "2010-06-21", "2010-09-23", "2010-12-21",
"2011-03-20", "2011-06-21", "2011-09-23", "2011-12-22", "2012-03-20", "2012-06-20", "2012-09-22", "2012-12-21",
"2013-03-20", "2013-06-21", "2013-09-22", "2013-12-21", "2014-03-20", "2014-06-21", "2014-09-22", "2014-12-21",
"2015-03-20", "2015-06-21", "2015-09-23", "2015-12-22", "final") # datas dos equinócios e solstícios, para mudança de estação.
# O string "final" é para que o código não dê erro ao terminar
# a última mudança de estação na checagem de verdadeiro/falso
# do if abaixo. Fora disso, é inútil.
if(ncol(dado) != 3){
print("O número de colunas não é o esperado no data.frame de entrada.")
print("O código não foi rodado.")
return(NULL)
}
estacao <- c("verao", "outono", "inverno", "primavera")
dado$season <- c(1:nrow(dado)) # é adicionado ao data.frame 'dado' a coluna 'season', que é preenchida de 1 ao número de linhas total do data.frame.
conta <- 1 # itera no vetor seasons.
guarda_est <- 1 # indica a estação do vetor estacao.
for(i in 1:(nrow(dado))){
anovo <- dado[i, 1] # anovo recebe a data da linha i do data.frame 'dado'.
if(anovo == seasons[conta]){ # caso chegou em uma data de mudança de estação, o código faz a estação mudar.
conta <- conta + 1
if(guarda_est == 1){
guarda_est <- 2
} else if(guarda_est == 2){
guarda_est <- 3
} else if(guarda_est == 3){
guarda_est <- 4
} else if(guarda_est == 4){
guarda_est <- 1
}
}
dado[i,]$season <- estacao[guarda_est] # guarda em dado a estação que a data pertence.
}
return(dado)
}
########################################################
# C O N V E R T E R D A D O S
########################################################
convert_universal <- function(dado1, dado2 = NA, Mirante = T){
#####
# Entra com um ou dois data.frames: tabelas de dados de temperaturas máximas e mínimas (dois data.frames: um de máximas, outro de mínimas, padrão IAG;
# um data.frame: com máximas e mínimas no mesmo arquivo, padrão Mirante) e retorna um único data.frame com temperaturas máximas e mínimas dos dias
# 01/01/1961 a 31/12/2015.
# O programa adiciona NA a dados faltantes.
# A estrutura esperada do padrão IAG: [Data, tmax], [Data, tmin].
# A estrutura esperada do padrão Mirante: [__, Data, Hora, __, TempMaxima, TempMinima]
#
# 'Mirante': se a estrutura a ser usada nos dados seguindo padrão Mirante usa o dia posterior ou o mesmo dia, para a coleta de temperaturas.
# O padrão é dia posterior, com 'Mirante' = T. Toma-se que a hora é em padrão UTC, o que implica que a coleta do dia posterior, às 00:00 horas UTC
# na verdade condiz com o dia anterior, às 21 horas. Por isso esse método se vale de pegar o valor coletado às 12:00 horas UTC do mesmo dia, como
# temperatura mínima, e o valor do dia posterior, coletado às 00:00 horas UTC, como o valor da temperatura máxima daquele dia.
# Usando 'Mirante' = F, os dados são arranjados de forma a, no dia i, a temperatura mínima desse dia é a coletada às 12:00 horas UTC, e a temperatura
# máxima é a coletada às 00:00 horas UTC do mesmo dia i, no data.frame original.
#####
op <- ncol(dado1) # op checa se o data.frame dado1 é um padrão IAG ou Mirante.
if( (op == 2) & (is.data.frame(dado2)) ){ # se há um data.frame em dado2, e o tamanho de op é 2, então é padrão IAG.
trabalho <- T
} else if( (op > 2) & (is.na(dado2)) ){ # se não há um data.frame em dado2, e op é maior que 2, é padrão Mirante.
trabalho <- F
}
if(trabalho == T){ # aqui, há uma segunda rechecagem, para saber se o data.frame IAG tem as datas formatadas em padrão IAG.
ea <- checa_data(dado1[1,1])
eb <- checa_data(dado2[1,1])
if( (ea == eb) & (ea == trabalho) ){
ea <- "a"
} else {
print("Há um erro na entrada dos data.frames. Certifique-se de que ambos são dados do IAG (temperaturas máximas e mínimas).")
return(NULL) # retorna NULL se algo errado, em qualquer das duas rechecagens.
}
rm(ea, eb)
} else if(trabalho == F){ # outra segunda rechecagem, mas para o padrão Mirante, para ver a formatação das datas, também.
ea <- checa_data(dado1[1,2])
if(ea == trabalho){
ea <- "b"
} else {
print("Há um erro na entrada do data.frame. Certifique-se de é o tipo de data.frame de Mirante.")
return(NULL)
}
rm(ea)
}
dia <- read.table("dias.txt") # a forma que cheguei para obter o data.frame de datas foi deixar um arquivo em separado com os valores de cada data.
# esse data.frame é acessado cada vez que o programa roda, portanto.
dia$tmax <- c(1:nrow(dia)) # o data.frame criado recebe uma coluna de tmax e outra de tmin.
dia$tmin <- c(1:nrow(dia))
names(dia) <- c("Data", "tmin", "tmax") # a ordem é Data, tmin, e tmax.
######
if(trabalho == T){ # caso de trabalharmos com estrutura IAG.
opera_dado1 <- 1 # opera_dado1 e opera_dado2 são duas iterações que são realizadas além das feitas pelo for.
opera_dado2 <- 1
for(i in 1:nrow(dia)){
maxa <- as.vector(dado1[opera_dado1,1])
mina <- as.vector(dado2[opera_dado2,1])
maxa_n <- strsplit(maxa, " ")[[1]][1] # pedaço de código usado para extrair apenas os valores de data da estrutura IAG.
mina_n <- strsplit(mina, " ")[[1]][1] # uma vez que as datas aqui são da forma "data hora". A "hora" é retirada, para que o programa avance.
if(compara_data(as.character(dia[i,1]), maxa_n, "=")){ # usa-se compara_data para... comparar as datas de dia e do data.frame de máximas, e colocar
# os valores.
dia[i,3] <- dado1[opera_dado1, 2]
opera_dado1 <- opera_dado1 + 1 # opera_dado1 e opera_dado2 itera aqui, para saber a posição que o data.frame entrado está.
# isso porque podem haver saltos nos dados coletados, e isso não seria captado se todos fossem iterados pelo for.
} else { # se a comparação deu resultado negativo, então esse dia não possui o valor de temperatura coletado, e recebe NA.
dia[i,3] <- NA
}
if(compara_data(as.character(dia[i,1]), mina_n, "=")){ # mesmo processo descrito acima, mas lidando com a temperatua mínima, do outro data.frame.
dia[i,2] <- dado2[opera_dado2, 2]
opera_dado2 <- opera_dado2 + 1
} else {
dia[i,2] <- NA
}
}
######
} else if(trabalho == F){ # caso de trabalharmos com estrutura Mirante.
## ----
if(Mirante == T){ # se Mirante = T, usaremos o dia posterior para pegar os dados.
opera_dado1 <- 2
for(i in 1:nrow(dia)){
if(as.character(dado1$Data[opera_dado1]) != "01/01/1961"){ # situação em que o primeiro valor do data.frame padrão Mirante é descartado, pois condiz com
# a data de 31/12/1960.
if( compara_data(as.character(dia[i,1]), as.character(dado1$Data[opera_dado1]), "=") & (dado1$Hora[opera_dado1] == 0000) ){ # se é 0000, é máxima,
# e vai pra data anterior
# à atual.
dia[i-1,3] <- dado1$TempMaxima[opera_dado1]
opera_dado1 <- opera_dado1 + 1
} else { # caso contrário, o data.frame padrão Mirante não tem o dado, e entra NA.
dia[i-1,3] <- NA
}
}
if( compara_data(as.character(dia[i,1]), as.character(dado1$Data[opera_dado1]), "=") & (dado1$Hora[opera_dado1] == 1200) ){ # se é 1200, é mínima, e
# vai para a data atual.
dia[i, 2] <- dado1$TempMinima[opera_dado1]
opera_dado1 <- opera_dado1 + 1
} else { # caso não haja no data.frame padrão Mirante, entra NA.
dia[i, 2] <- NA
}
}
dia[i,3] <- dado1$TempMaxima[opera_dado1] # Coloca a máxima do dia 31/12/2015.
## ----
} else if(Mirante == F){ # se Mirante = F, usaremos o mesmo dia para pegar os dados.
opera_dado1 <- 1
for(i in 1:nrow(dia)){
if( compara_data(as.character(dia[i,1]), as.character(dado1$Data[opera_dado1]), "=") & (dado1$Hora[opera_dado1] == 0000) ){ # se é 0000, entra como
# temperatura máxima do
# dia atual.
dia[i,3] <- dado1$TempMaxima[opera_dado1]
opera_dado1 <- opera_dado1 + 1
} else { # se não tem a máxima, entra como NA.
dia[i,3] <- NA
}
if( compara_data(as.character(dia[i,1]), as.character(dado1$Data[opera_dado1]), "=") & (dado1$Hora[opera_dado1] == 1200) ){ # se é 1200, entra como
# temperatura mínima do
# dia atual.
dia[i, 2] <- dado1$TempMinima[opera_dado1]
opera_dado1 <- opera_dado1 + 1
} else { # caso contrário, entra NA.
dia[i, 2] <- NA
}
}
}
}
return(dia)
}
########################################################
# M A T R I Z E S
########################################################
define_minima <- function(mima){ # encontra a posição na matriz da temperatura mínima obtida.
#####
# Entra com um valor de temperatura mínima, define em que linha da matriz ela entrará.
#
#####
if(mima <= -5){
return("G")
} else if(mima <= 0){
return("F")
} else if(mima <= 5){
return("E")
} else if(mima <= 10){
return("D")
} else if(mima <= 15){
return("C")
} else if(mima <= 20){
return("B")
} else {
return("A")
}
}
########################################################
define_maxima <- function(maxa){ # encontra a posição na matriz da temperatura máxima obtida.
#####
# Entra com um valor de temperatura máxima, define em que coluna da matriz ela entrará.
#
#####
if(maxa <= 10){
return(1)
} else if(maxa <= 15){
return(2)
} else if(maxa <= 20){
return(3)
} else if(maxa <= 25){
return(4)
} else if(maxa <= 30){
return(5)
} else if(maxa <= 35){
return(6)
} else {
return(7)
}
}
########################################################
separa_temp <- function(dado, save = F){
#####
# Entra com data.frame na estrutura de convert_universal, sai com matriz de temperaturas calculada.
#
#####
resultado <- matrix(rep(0L, len=49), nrow=7, dimnames=list(c("A", "B", "C", "D", "E", "F", "G"))) # gera a matriz da resposta.
if( is.null(dado) | (nrow(dado) == 0) ){ # estrutura que impede de ctrab_na e ctrab_cons retornem erro ao encontrarem a
#totalidade de linhas com NAs ou inconsistentes.
return(resultado)
}
dado_na <- ctrab_na(dado, salva = F, altera = T)
if( is.null(dado_na) | (nrow(dado_na) == 0) ){
return(resultado)
}
dado_fim <- ctrab_cons(dado_na, salva = F, altera = T)
if( is.null(dado_fim) | (nrow(dado_fim) == 0) ){
return(resultado)
}
for( i in 1:nrow(dado_fim) ){
mbm <- define_minima( dado_fim$tmin[i] )
mam <- define_maxima( dado_fim$tmax[i] )
resultado[mbm,mam] <- resultado[mbm,mam] + 1 # e mbm e mam são encaixadas como coordenadas da matriz resposta.
}
if( save == T ){
save_data(resultado)
}
return(resultado)
}
########################################################
quinquenal <- function(dado, sazonal = F, save = F, plota = F){
#####
# Entra com data.frame de temperaturas máximas e mínimas, retorna as matrizes calculadas quinquênio a quinquênio (5 em 5 anos).
#
# 'sazonal': se TRUE, calcula as matrizes para cada estação de cada quinquênio. Sendo, então, 4 matrizes para cada quinquênio.
# Se FALSE (default), cada quinquênio só terá calculada uma matriz.
#
# 'save': se TRUE, salva todas as matrizes calculadas em arquivos separados, devidamente nomeados.
#
# 'plota': se TRUE, cada quinquênio (ou cada estação de cada quinquênio, a depender de 'sazonal') terá um gráfico plotado de seus valores.
#####
####
if(sazonal == T){ # essa estrutura se repete no resto do código. Se a opção sazonal é escolhida, o código declara variáveis diferentes.
dado <- separa_estacao(dado) # adiciona a coluna com a estação de cada data da tabela.
}
####
if(sazonal == T){ # pedaço de código onde se declara a lista que retornará os resultados.
resultado <- vector("list", 44)
prog <- c("Verao 1961-1965", "Outono 1961-1965", "Inverno 1961-1965", "Primavera 1961-1965",
"Verao 1966-1970", "Outono 1966-1970", "Inverno 1966-1970", "Primavera 1966-1970",
"Verao 1971-1975", "Outono 1971-1975", "Inverno 1971-1975", "Primavera 1971-1975",
"Verao 1976-1980", "Outono 1976-1980", "Inverno 1976-1980", "Primavera 1976-1980",
"Verao 1981-1985", "Outono 1981-1985", "Inverno 1981-1985", "Primavera 1981-1985",
"Verao 1986-1990", "Outono 1986-1990", "Inverno 1986-1990", "Primavera 1986-1990",
"Verao 1991-1995", "Outono 1991-1995", "Inverno 1991-1995", "Primavera 1991-1995",
"Verao 1996-2000", "Outono 1996-2000", "Inverno 1996-2000", "Primavera 1996-2000",
"Verao 2001-2005", "Outono 2001-2005", "Inverno 2001-2005", "Primavera 2001-2005",
"Verao 2006-2010", "Outono 2006-2010", "Inverno 2006-2010", "Primavera 2006-2010",
"Verao 2011-2015", "Outono 2011-2015", "Inverno 2011-2015", "Primavera 2011-2015")
names(resultado) <- prog
} else {
resultado <- vector("list", 11)
prog <- c("1961-1965", "1966-1970", "1971-1975", "1976-1980", "1981-1985", "1986-1990", "1991-1995",
"1996-2000", "2001-2005", "2006-2010", "2011-2015")
names(resultado) <- prog
}
####
datas <- c("1961-01-01", "1966-01-01", "1971-01-01", "1976-01-01", "1981-01-01", "1986-01-01", "1991-01-01",
"1996-01-01", "2001-01-01", "2006-01-01", "2011-01-01") # datas para quebrar nos quinquênios.
data_count <- 2 # itera no vetor datas.
wee <- 1 # itera na posição a salvar da lista resultado.
o <- 0 # faz as operações de cortar o data.frame.
abs <- nrow(dado)
dado_atual <- dado
####
for(i in 1:abs){
o <- o + 1
const <- as.character(dado_atual$Data[i])
###
if( const == datas[data_count] ){
dado_novo <- dado[1:(o-1),] # quebra dado no quinquênio "encontrado".
dado <- dado[-c(1:(o-1)),]
if(sazonal == T){ # calcula para cada estação do quinquênio obtido.
##
ver_op <- subset(dado_novo, season == "verao")
if(plota == T){
plot_temps(ver_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(ver_op)
wee <- wee + 1
##
out_op <- subset(dado_novo, season == "outono")
if(plota == T){
plot_temps(out_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(out_op)
wee <- wee + 1
##
inv_op <- subset(dado_novo, season == "inverno")
if(plota == T){
plot_temps(inv_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(inv_op)
wee <- wee + 1
##
pri_op <- subset(dado_novo, season == "primavera")
if(plota == T){
plot_temps(pri_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(pri_op)
wee <- wee + 1
} else { # situação onde se calcula apenas o quinquênio, sem considerar as estações.
if(plota == T){
plot_temps(dado_novo, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(dado_novo)
wee <- wee + 1
}
data_count <- data_count + 1
o <- 1
}
###
if( (const == "2011-01-01") & (sazonal == F) ){ # última parte do último quinquênio, é salva aqui.
if(plota == T){
plot_temps(dado, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(dado)
break
} else if( (const == "2011-01-01") & (sazonal == T)){ # última parte do último quinquênio, calculando as estações,
##
ver_op <- subset(dado, season == "verao")
if(plota == T){
plot_temps(ver_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(ver_op)
wee <- wee + 1
##
out_op <- subset(dado, season == "outono")
if(plota == T){
plot_temps(out_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(out_op)
wee <- wee + 1
##
inv_op <- subset(dado, season == "inverno")
if(plota == T){
plot_temps(inv_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(inv_op)
wee <- wee + 1
##
pri_op <- subset(dado, season == "primavera")
if(plota == T){
plot_temps(pri_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(pri_op)
break
}
}
####
if(save == T){
save_data(resultado)
}
return(resultado)
}
########################################################
sazonal <- function(dado, save = F, plota = F){
#####
# Entra com data.frame de temperaturas, retorna 4 matrizes calculadas: uma para cada estação presente no data.frame inteiro.
#
# 'save': se TRUE, salva todas as matrizes calculadas em arquivos separados, devidamente nomeados.
#
# 'plota': se TRUE, cada estação terá um gráfico plotado de seus valores.
#####
dado <- separa_estacao(dado) # função que coloca, para cada dia do data.frame original, a estação do ano à qual aquele dia pertence.
resultado <- vector("list", 4)
names(resultado) <- c("Verão", "Outono", "Inverno", "Primavera") # identifica cada matriz de resposta com o título da estação.
ver_op <- subset(dado, season == "verao") # com os valores das estações nos data.frames, é só fazer subset dos dados,
if(plota == T){ # e com o subset realizar a entrada na função separa_temp.
plot_temps(ver_op, titulo = "Verão")
}
resultado[[1]] <- separa_temp(ver_op)
out_op <- subset(dado, season == "outono")
if(plota == T){
plot_temps(out_op, titulo = "Outono")
}
resultado[[2]] <- separa_temp(out_op)
inv_op <- subset(dado, season == "inverno")
if(plota == T){
plot_temps(inv_op, titulo = "Inverno")
}
resultado[[3]] <- separa_temp(inv_op)
pri_op <- subset(dado, season == "primavera")
if(plota == T){
plot_temps(pri_op, titulo = "Primavera")
}
resultado[[4]] <- separa_temp(pri_op)
if(save == T){
save_data(resultado)
}
return(resultado)
}
########################################################
decenio <- function(dado, sazonal = F, save = F, plota = F){
#####
# Entra com data.frame de temperaturas, retorna as matrizes calculadas decênio a decênio (10 em 10 anos).
#
# 'sazonal': se TRUE, calcula as matrizes para cada estação de cada decênio. Sendo, então, 4 matrizes para cada decênio.
# Se FALSE (default), cada decênio só terá calculada uma matriz.
#
# 'save': se TRUE, salva todas as matrizes calculadas em arquivos separados, devidamente nomeados.
#
# 'plota': se TRUE, cada decênio (ou cada estação de cada decênio, a depender de 'sazonal') terá um gráfico plotado de seus valores.
#####
if(sazonal == T){ # essa estrutura se repete no resto do código. Se a opção sazonal é escolhida, o código declara variáveis diferentes.
dado <- separa_estacao(dado) # adiciona a coluna com a estação de cada data das tabelas.
}
if(sazonal == T){ # pedaço de código onde se declara a lista que retornará os resultados.
resultado <- vector("list", 20)
prog <- c("Verao 1961-1970", "Outono 1961-1970", "Inverno 1961-1970", "Primavera 1961-1970",
"Verao 1971-1980", "Outono 1971-1980", "Inverno 1971-1980", "Primavera 1971-1980",
"Verao 1981-1990", "Outono 1981-1990", "Inverno 1981-1990", "Primavera 1981-1990",
"Verao 1991-2000", "Outono 1991-2000", "Inverno 1991-2000", "Primavera 1991-2000",
"Verao 2001-2010", "Outono 2001-2010", "Inverno 2001-2010", "Primavera 2001-2010")
names(resultado) <- prog
} else {
resultado <- vector("list", 5)
prog <- c("1961-1970", "1971-1980", "1981-1990", "1991-2000", "2001-2010")
names(resultado) <- prog
}
datas <- c("1961-01-01", "1971-01-01", "1981-01-01", "1991-01-01", "2001-01-01", "2011-01-01") # datas para quebrar nos decênios.
data_count <- 2 # itera no vetor datas.
wee <- 1 # itera na posição a salvar da lista resultado.
o <- 0 # faz as operações de cortar o data.frame.
abs <- nrow(dado)
dado_atual <- dado
for(i in 1:abs){
o <- o + 1
const <- as.character(dado_atual$Data[i])
if( const == datas[data_count] ){
dado_novo <- dado[1:(o-1),] # quebra dado no decênio "encontrado".
dado <- dado[-c(1:(o-1)),]
if(sazonal == T){ # calcula para cada estação do decênio obtido.
ver_op <- subset(dado_novo, season == "verao")
resultado[[wee]] <- separa_temp(ver_op)
wee <- wee + 1
out_op <- subset(dado_novo, season == "outono")
resultado[[wee]] <- separa_temp(out_op)
wee <- wee + 1
inv_op <- subset(dado_novo, season == "inverno")
resultado[[wee]] <- separa_temp(inv_op)
wee <- wee + 1
pri_op <- subset(dado_novo, season == "primavera")
resultado[[wee]] <- separa_temp(pri_op)
wee <- wee + 1
if(plota == T){
plot_temps(ver_op, titulo = prog[wee-4])
plot_temps(out_op, titulo = prog[wee-3])
plot_temps(inv_op, titulo = prog[wee-2])
plot_temps(pri_op, titulo = prog[wee-1])
}
} else { # situação onde se calcula apenas o decênio, sem considerar as estações.
if(plota == T){
plot_temps(dado_novo, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(dado_novo)
wee <- wee + 1
}
data_count <- data_count + 1
o <- 1
}
if( const == "2011-01-01" ){ # nos 5 anos finais, o código para, uma vez que ele não completa um decênio.
break
}
}
if(save == T){
save_data(resultado)
}
return(resultado)
}
########################################################
trinta <- function(dado, sazonal = F, save = F, plota = F){
#####
# Entra com data.frame de temperaturas, retorna as matrizes calculadas de trinta em trinta anos.
#
# 'sazonal': se TRUE, calcula as matrizes para cada estação de cada período de trinta anos. Sendo, então, 4 matrizes para cada período.
# Se FALSE (default), cada período só terá calculada uma matriz.
#
# 'save': se TRUE, salva todas as matrizes calculadas em arquivos separados, devidamente nomeados.
#
# 'plota': se TRUE, cada período de trinta anos (ou cada estação de cada período, a depender de 'sazonal') terá um gráfico plotado de seus valores.
#####
if(sazonal == T){ # essa estrutura se repete no resto do código. Se a opção sazonal é escolhida, o código declara variáveis diferentes.
dado <- separa_estacao(dado) # adiciona a coluna com a estação de cada data.
}
if(sazonal == T){ # pedaço de código onde se declara a lista que retornará os resultados.
resultado <- vector("list", 12)
prog <- c("Verao 1961-1990", "Outono 1961-1990", "Inverno 1961-1990", "Primavera 1961-1990",
"Verao 1971-2000", "Outono 1971-2000", "Inverno 1971-2000", "Primavera 1971-2000",
"Verao 1981-2010", "Outono 1981-2010", "Inverno 1981-2010", "Primavera 1981-2010")
names(resultado) <- prog
} else {
resultado <- vector("list", 3)
prog <- c("1961-1990", "1971-2000", "1981-2010")
names(resultado) <- prog
}
dataA <- c("1961-01-01", "1971-01-01", "1981-01-01", "fim") # datas que são importantes para que o código rode. "fim", aqui, serve
# para que o programa não retorne um erro, ao terminar de iterar as datas desse vetor.
dataZ <- c("1991-01-01", "2001-01-01", "2011-01-01")
a1 <- 0 # marcam as datas de início de cada período de 30 anos.
a2 <- 0
a3 <- 0
a <- 1
operaA <- 1 # itera em dataA.
z1 <- 0 # marcam as datas de fim de cada período de 30 anos.
z2 <- 0
z3 <- 0
z <- 1
operaZ <- 1 # itera em dataZ.
wee <- 1 # itera na posição a salvar da lista resultado.
abs <- nrow(dado) # número de linhas do vetor original.
dado_atual <- dado # cópia simples do vetor original.
for(i in 1:abs){
const <- as.character(dado_atual$Data[i])
if( const == dataA[operaA] ){ # a função de ambos os ifs é de guardar as posições que demarcam um período de 30 anos nos dados.
if(a == 1){ # indica os inícios dos períodos.
a1 <- i
} else if (a == 2){
a2 <- i
} else if (a == 3){
a3 <- i
}
a <- a + 1
operaA <- operaA + 1
}
if( const == dataZ[operaZ] ){ # indica os fins dos períodos.
if(z == 1){
z1 <- i - 1
} else if (z == 2){
z2 <- i - 1
} else if (z == 3){
z3 <- i - 1
break # ao chegar na última data, não é necessário andar pelo resto dos dados, por isso o break.
}
z <- z + 1
operaZ <- operaZ + 1
}
}
for(i in 1:3){ # aqui, os dados são tratados em matrizes.
if(i == 1){
a <- a1
z <- z1
} else if(i == 2){
a <- a2
z <- z2
} else if(i == 3){
a <- a3
z <- z3
}
dado_novo <- dado[a:z,]
if(sazonal == T){ # caso de cada estação, nos 30 anos.
ver_op <- subset(dado_novo, season == "verao")
resultado[[wee]] <- separa_temp(ver_op)
wee <- wee + 1
out_op <- subset(dado_novo, season == "outono")
resultado[[wee]] <- separa_temp(out_op)
wee <- wee + 1
inv_op <- subset(dado_novo, season == "inverno")
resultado[[wee]] <- separa_temp(inv_op)
wee <- wee + 1
pri_op <- subset(dado_novo, season == "primavera")
resultado[[wee]] <- separa_temp(pri_op)
wee <- wee + 1
if(plota == T){ # se plota for marcado como TRUE, ele faz o plot dos dados para cada estação.
plot_temps(ver_op, titulo = prog[wee-4])
plot_temps(out_op, titulo = prog[wee-3])
plot_temps(inv_op, titulo = prog[wee-2])
plot_temps(pri_op, titulo = prog[wee-1])
}
} else { # situação sem considerar as estações.
if(plota == T){
plot_temps(dado_novo, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(dado_novo)
wee <- wee + 1
}
}
if(save == T){
save_data(resultado) # se nada errado for identificado, os dados são salvos, se a opção foi dada na chamada da função.
}
return(resultado) # e então, vem o return dos dados já analisados, em matrizes.
}
########################################################
anual <- function(dado, sazonal = F, save = F, plota = F){
#####
# Entra com data.frame de temperaturas, retorna as matrizes calculadas ano a ano.
#
# 'sazonal': se TRUE, calcula as matrizes para cada estação de cada ano. Sendo, então, 4 matrizes para cada ano.
# Se FALSE (default), cada ano "só" terá calculada uma matriz.
#
# 'save': se TRUE, salva todas as matrizes calculadas em arquivos separados, devidamente nomeados.
#
# 'plota': se TRUE, cada ano (ou cada estação de cada ano, a depender de 'sazonal') terá um gráfico plotado de seus valores.
#####
if(sazonal == T){ # essa estrutura se repete no resto do código. Se a opção sazonal é escolhida, o código declara variáveis diferentes.
dado <- separa_estacao(dado) # adiciona a coluna com a estação de cada data das tabelas.
}
if(sazonal == T){ # pedaço de código onde se declara a lista que retornará os resultados.
resultado <- vector("list", 220)
prog <- c("Verao 1961", "Outono 1961", "Inverno 1961", "Primavera 1961", "Verao 1962", "Outono 1962", "Inverno 1962", "Primavera 1962",
"Verao 1963", "Outono 1963", "Inverno 1963", "Primavera 1963", "Verao 1964", "Outono 1964", "Inverno 1964", "Primavera 1964",
"Verao 1965", "Outono 1965", "Inverno 1965", "Primavera 1965", "Verao 1966", "Outono 1966", "Inverno 1966", "Primavera 1966",
"Verao 1967", "Outono 1967", "Inverno 1967", "Primavera 1967", "Verao 1968", "Outono 1968", "Inverno 1968", "Primavera 1968",
"Verao 1969", "Outono 1969", "Inverno 1969", "Primavera 1969", "Verao 1970", "Outono 1970", "Inverno 1970", "Primavera 1970",
"Verao 1971", "Outono 1971", "Inverno 1971", "Primavera 1971", "Verao 1972", "Outono 1972", "Inverno 1972", "Primavera 1972",
"Verao 1973", "Outono 1973", "Inverno 1973", "Primavera 1973", "Verao 1974", "Outono 1974", "Inverno 1974", "Primavera 1974",
"Verao 1975", "Outono 1975", "Inverno 1975", "Primavera 1975", "Verao 1976", "Outono 1976", "Inverno 1976", "Primavera 1976",
"Verao 1977", "Outono 1977", "Inverno 1977", "Primavera 1977", "Verao 1978", "Outono 1978", "Inverno 1978", "Primavera 1978",
"Verao 1979", "Outono 1979", "Inverno 1979", "Primavera 1979", "Verao 1980", "Outono 1980", "Inverno 1980", "Primavera 1980",
"Verao 1981", "Outono 1981", "Inverno 1981", "Primavera 1981", "Verao 1982", "Outono 1982", "Inverno 1982", "Primavera 1982",
"Verao 1983", "Outono 1983", "Inverno 1983", "Primavera 1983", "Verao 1984", "Outono 1984", "Inverno 1984", "Primavera 1984",
"Verao 1985", "Outono 1985", "Inverno 1985", "Primavera 1985", "Verao 1986", "Outono 1986", "Inverno 1986", "Primavera 1986",
"Verao 1987", "Outono 1987", "Inverno 1987", "Primavera 1987", "Verao 1988", "Outono 1988", "Inverno 1988", "Primavera 1988",
"Verao 1989", "Outono 1989", "Inverno 1989", "Primavera 1989", "Verao 1990", "Outono 1990", "Inverno 1990", "Primavera 1990",
"Verao 1991", "Outono 1991", "Inverno 1991", "Primavera 1991", "Verao 1992", "Outono 1992", "Inverno 1992", "Primavera 1992",
"Verao 1993", "Outono 1993", "Inverno 1993", "Primavera 1993", "Verao 1994", "Outono 1994", "Inverno 1994", "Primavera 1994",
"Verao 1995", "Outono 1995", "Inverno 1995", "Primavera 1995", "Verao 1996", "Outono 1996", "Inverno 1996", "Primavera 1996",
"Verao 1997", "Outono 1997", "Inverno 1997", "Primavera 1997", "Verao 1998", "Outono 1998", "Inverno 1998", "Primavera 1998",
"Verao 1999", "Outono 1999", "Inverno 1999", "Primavera 1999", "Verao 2000", "Outono 2000", "Inverno 2000", "Primavera 2000",
"Verao 2001", "Outono 2001", "Inverno 2001", "Primavera 2001", "Verao 2002", "Outono 2002", "Inverno 2002", "Primavera 2002",
"Verao 2003", "Outono 2003", "Inverno 2003", "Primavera 2003", "Verao 2004", "Outono 2004", "Inverno 2004", "Primavera 2004",
"Verao 2005", "Outono 2005", "Inverno 2005", "Primavera 2005", "Verao 2006", "Outono 2006", "Inverno 2006", "Primavera 2006",
"Verao 2007", "Outono 2007", "Inverno 2007", "Primavera 2007", "Verao 2008", "Outono 2008", "Inverno 2008", "Primavera 2008",
"Verao 2009", "Outono 2009", "Inverno 2009", "Primavera 2009", "Verao 2010", "Outono 2010", "Inverno 2010", "Primavera 2010",
"Verao 2011", "Outono 2011", "Inverno 2011", "Primavera 2011", "Verao 2012", "Outono 2012", "Inverno 2012", "Primavera 2012",
"Verao 2013", "Outono 2013", "Inverno 2013", "Primavera 2013", "Verao 2014", "Outono 2014", "Inverno 2014", "Primavera 2014",
"Verao 2015", "Outono 2015", "Inverno 2015", "Primavera 2015")
names(resultado) <- prog
} else {
resultado <- vector("list", 55)
prog <- c("1961", "1962", "1963", "1964", "1965", "1966", "1967", "1968", "1969", "1970", "1971", "1972", "1973", "1974", "1975", "1976",
"1977", "1978", "1979", "1980", "1981", "1982", "1983", "1984", "1985", "1986", "1987", "1988", "1989", "1990", "1991", "1992",
"1993", "1994", "1995", "1996", "1997", "1998", "1999", "2000", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008",
"2009", "2010", "2011", "2012", "2013", "2014", "2015")
names(resultado) <- prog
}
datas <- c("1961-01-01", "1962-01-01", "1963-01-01", "1964-01-01", "1965-01-01", "1966-01-01", "1967-01-01", "1968-01-01", "1969-01-01", "1970-01-01",
"1971-01-01", "1972-01-01", "1973-01-01", "1974-01-01", "1975-01-01", "1976-01-01", "1977-01-01", "1978-01-01", "1979-01-01", "1980-01-01",
"1981-01-01", "1982-01-01", "1983-01-01", "1984-01-01", "1985-01-01", "1986-01-01", "1987-01-01", "1988-01-01", "1989-01-01", "1990-01-01",
"1991-01-01", "1992-01-01", "1993-01-01", "1994-01-01", "1995-01-01", "1996-01-01", "1997-01-01", "1998-01-01", "1999-01-01", "2000-01-01",
"2001-01-01", "2002-01-01", "2003-01-01", "2004-01-01", "2005-01-01", "2006-01-01", "2007-01-01", "2008-01-01", "2009-01-01", "2010-01-01",
"2011-01-01", "2012-01-01", "2013-01-01", "2014-01-01", "2015-01-01") # datas definidas que o código corta os dados, para cada ano.
data_count <- 2 # itera no vetor datas.
wee <- 1 # itera na posição a salvar da lista resultado.
o <- 0 # faz as operações de cortar o data.frame.
abs <- nrow(dado)
dado_atual <- dado
for(i in 1:abs){
const <- as.character(dado_atual$Data[i])
o <- o + 1
if( const == datas[data_count] ){
dado_novo <- dado[1:(o-1),] # quebra a cópia de max e min em cada ano.
dado <- dado[-c(1:(o-1)),]
if(sazonal == T){ # calcula para cada estação do ano
ver_op <- subset(dado_novo, season == "verao")
if(plota == T){
plot_temps(ver_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(ver_op)
wee <- wee + 1
out_op <- subset(dado_novo, season == "outono")
if(plota == T){
plot_temps(out_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(out_op)
wee <- wee + 1
inv_op <- subset(dado_novo, season == "inverno")
if(plota == T){
plot_temps(inv_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(inv_op)
wee <- wee + 1
pri_op <- subset(dado_novo, season == "primavera")
if(plota == T){
plot_temps(pri_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(pri_op)
wee <- wee + 1
} else { # situação onde se calcula apenas o ano, sem considerar as estações.
if(plota == T){
plot_temps(dado_novo, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(dado_novo)
wee <- wee + 1
}
data_count <- data_count + 1
o <- 1
}
if( (const == "2015-01-01") & (sazonal == F) ){ # última parte do último ano é salva aqui.
if(plota == T){
plot_temps(dado, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(dado)
break
} else if( (const == "2015-01-01") & (sazonal == T)){ # última parte do último ano, calculando as estações, é salva aqui.
ver_op <- subset(dado, season == "verao")
if(plota == T){
plot_temps(ver_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(ver_op)
wee <- wee + 1
out_op <- subset(dado, season == "outono")
if(plota == T){
plot_temps(out_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(out_op)
wee <- wee + 1
inv_op <- subset(dado, season == "inverno")
if(plota == T){
plot_temps(inv_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(inv_op)
wee <- wee + 1
pri_op <- subset(dado, season == "primavera")
if(plota == T){
plot_temps(pri_op, titulo = prog[wee])
}
resultado[[wee]] <- separa_temp(pri_op)
break
}
}
if(save == T){
save_data(resultado)
}
return(resultado)
}
########################################################
# T U T O R I A L
########################################################
# CONFIRME O QUE DESEJA FAZER, E PARA TAL, APAGUE O " # " À FRENTE DO CÓDIGO.
# APÓS ISSO, RODE NORMALMENTE.
########################################################
# 1.
# caso da estrutura do IAG.
# com duas tabelas, uma de máximas, uma de mínimas.
#max <- read.table("tmax_dia.txt", header = T, sep = ",")
#min <- read.table("tmin_dia.txt", header = T, sep = ",")
#op <- convert_universal(max, min)
# caso da estrutura de Mirante.
# com uma única tabela, onde há informações de hora UTC, precipitação, temperatura máxima e mínima.
#data <- read.table("Estação - 83781_Mirante_1961_2016.txt", header = T, sep = ";")
# caso trabalhe com o dia posterior na obtenção da tabela de dados, usa-se Mirante = T. Caso contrário, usando o mesmo dia, usa-se Mirante = F.
#op <- convert_universal(data, Mirante = T)
# 2.
# agora, para conferir a quantidade de entradas NA no data.frame gerado (as entradas com NA serão salvas em um arquivo .csv), usa-se:
#confere_na(op)
# para conferir inconsistência dos dados de entrada (será salvo em um arquivo .csv), usa-se:
#confere_cons(op)
# 3.
# recomenda-se retirar as matrizes já geradas e colocá-las em pasta à parte, descritas corretamente, para que não hajam confusões.
# alguns programas retornam muitas matrizes, o que pode tornar as coisas confusas, sem o devido cuidado.
# para calcular a matriz dos dados todos (save = T salva em arquivo .csv a matriz gerada).
#resultado1 <- separa_temp(op, save = T)
# para calcular as matrizes em quinquênios (5 em 5 anos).
# aqui em diante: sazonal > se calcula a matriz em estações dentro do quinquênio (ou outros períodos, como 10 anos, anual, 30 anos).
# save > se salva em arquivo .csv as matrizes geradas.
# plota > se faz um gráfico dos dados separados.
#resultado2 <- quinquenal(op, sazonal = T, save = F, plota = F)
# para calcular as matrizes em estações (4 estações para o data.frame inteiro.
#resultado3 <- sazonal(op, save = F, plota = F)
# para calcular as matrizes em decênios (10 em 10 anos).
#resultado4 <- decenio(op, sazonal = T, save = F, plota = F)
# para calcular as matrizes de trinta em trinta anos.
#resultado5 <- trinta(op, sazonal = T, save = F, plota = F)
# para calcular as matrizes ano a ano.
#resultado6 <- anual(op, sazonal = T, save = T, plota = F)
# 4.
# por fim, é possível pôr em gráfico(s) o que foi gerado em resultado1, resultado2, ... , resultado6. Isso é feito pela função graficaliza.
# basta entrar com o resultadoi (i = 1, ... ,6), que foi calculado acima.
# usando sazonal = T, o programa separa as estações, contando que o dado de entrada já possua as estações diferenciadas.
# em outras palavras, usa-se sazonal = T aqui, quando para calcular o resultadoi (i = 1, ... , 6), a opção sazonal = T estava definida nele.
#graficaliza(resultado1, sazonal = F)
#graficaliza(resultado6, sazonal = T)
|
a09b90e419a748845097683c93fb78a351caba51
|
122976d6e18856ea2b2c3d876f4d4773209b1fbc
|
/plot5.R
|
5265ca78395e2b06a0d4fe699242423d27b3d0bc
|
[] |
no_license
|
ovijive/ExData_Plotting2
|
3378618c3ce289478e942acdc6d1a4511f90646e
|
fafa6ff0d0263362ad5bab0d06b6f34b6d13512a
|
refs/heads/master
| 2021-01-10T05:20:54.502836
| 2016-02-01T22:24:20
| 2016-02-01T22:24:20
| 50,853,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 625
|
r
|
plot5.R
|
# read RDS files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Baltimore City (fips: 24510)
baltEmiss = subset(NEI, fips==24510 & type =="ON-ROAD")
#Baltimore City emission
totalEmi = aggregate(Emissions ~ year, data = baltEmiss, sum)
#load ggplot2blibrary
library(ggplot2)
#plot
png(filename="plot5.png", width=800, height=800, units="px")
g <- ggplot(totalEmi, aes(factor(year), Emissions))
g <- g + geom_bar(stat="identity") +
ylab(expression('PM'[2.5]*" Emissions")) +
xlab("year") +
ggtitle("Emission Trends in Baltimore City from 1999 to 2008")
print(g)
dev.off()
|
31048a5fb28063a27689b215d84cb7428e05116f
|
bd207458397914151d99414c01662054c31f8e74
|
/example.R
|
9495adaae22b750d7897017b8ad689a9f8d12e09
|
[] |
no_license
|
ineswilms/taglasso
|
99f262e2cae38f297db282554da37bf1668b88ee
|
6201ac5b4431e2b60bf4a7d99467e100b6863ab4
|
refs/heads/main
| 2023-03-01T18:35:37.718303
| 2021-01-29T10:00:56
| 2021-01-29T10:00:56
| 321,664,356
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,824
|
r
|
example.R
|
rm(list=ls())
#### Install package from GitHub ####
install.packages("devtools")
devtools::install_github("ineswilms/taglasso")
library(taglasso)
#### Pre-process the data ####
data('rv')
rv_data <- rv$data
A <- rv$A
# Estimate HAR models
estimate.HAR <- function(y){
# Function : HAR model with daily, weekly and monthly computed for the realized variances
HARdata <- embed(y, 22+1)
XHAR <- HARdata[, -1]
YHAR <- HARdata[,1]
X.D <- as.matrix(XHAR[,1])
X.W <- as.matrix(apply(XHAR[,1:5]/5,1,sum))
X.M <- as.matrix(apply(XHAR[,]/22,1,sum))
X.HAR <- cbind(1, X.D,X.W,X.M)
beta.HAR <- solve(t(X.HAR)%*%X.HAR)%*%t(X.HAR)%*%YHAR
resid.HAR <- YHAR - X.HAR%*%beta.HAR
return(resid.HAR)
}
resid_HAR <- apply(rv_data, 2, estimate.HAR)
data <- resid_HAR
#### 5-fold cross-validation to select the regularization parameters ####
library(parallel)
ptm <- proc.time()
rv_taglasso_cv <- taglasso_cv(X = data, A = A, seed = floor(abs(data[1]*1000)), fold = 5,
l1gran = 5, l2gran = 5, nc = detectCores()-1, do_parallel = TRUE)
proc.time() - ptm
#### tag-lasso fit ####
rv_taglasso <- taglasso(X = data, A = A, lambda1 = rv_taglasso_cv$l1opt, lambda2 = rv_taglasso_cv$l2opt, hc = TRUE, plot = TRUE)
#### networks ####
library(corrplot)
corrplot(rv_taglasso$omega_aggregated!=0, cl.pos = "n", tl.cex = 1.5,
method = "color", main = "" ,
mar = c(0,0,0.5,0), addgrid.col = "black", cex.main = 1.5, font.main = 1,
is.corr = F, col=c("#F0F0F0", "White", "Black"),
tl.col = "black")
corrplot(rv_taglasso$omega_full!=0, cl.pos = "n", tl.cex = 1.5,
method = "color", main = "" ,
mar = c(0,0,0.5,0), addgrid.col = "black", cex.main = 1.5, font.main = 1,
is.corr = F, col=c("#F0F0F0", "White", "Black"),
tl.col = "black")
|
e529484365a05598afb3560ed0e8082b8cd6e42e
|
4aa6996769d3096a62c87834daed4ce11b5a0ec3
|
/R/utils.r
|
8b0e89b3b6835775326606eee649c0acd702b917
|
[] |
no_license
|
selcukfidan47/testthat
|
a308a5f8a1637ad6956299def805ab7d4c96cdff
|
df4ca8c6975d0b45845be481cda89bebe3d8c09e
|
refs/heads/master
| 2021-01-18T08:50:19.832177
| 2016-02-21T10:56:26
| 2016-02-21T10:56:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 327
|
r
|
utils.r
|
`%||%` <- function(a, b) if (is.null(a)) b else a
starts_with <- function(string, prefix) {
substr(string, 1, nchar(prefix)) == prefix
}
is_directory <- function(x) file.info(x)$isdir
is_readable <- function(x) file.access(x, 4) == 0
null <- function(...) invisible()
klass <- function(x) paste(class(x), collapse = "/")
|
724e83a4bf48865fe1f73b562cda42020a7bcaf8
|
136ecc91ee6a29dcd414aba36e19c709c8ce805e
|
/DataMining.R
|
15d28ee3391f6592375d7dc773bcb8915ed17616
|
[] |
no_license
|
PabloArmasM/Data-Stream-Mining-para-el-modelado-del-tra-fico-en-Nueva-York
|
1c1e05a39fc1b7f77d01eb13a1c194500241b77b
|
5d2a7e7cc26f50d67434bd4270f95950afb73119
|
refs/heads/master
| 2020-12-02T06:24:10.192824
| 2017-07-10T22:45:59
| 2017-07-10T22:45:59
| 96,827,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,364
|
r
|
DataMining.R
|
require('XML')
library("neuralnet")
library("animation")
require("ggplot2")
## Installation from github
##
library("devtools")
require(RMOA)
##
library("arules")
setwd("/home/loedded/Escritorio/XML")
f <- as.formula("Class ~ c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9 + c10 + c11 + c12 + c13 + c14 + c15 + c16 + c17 + c18 + c19 + c20 + c21 + c22 + c23 + c24 + c25 + c26 + c27 + c28 + c29 + c30 + c31 + c32 + c33 + c34 + c35 + c36 + c37 + c38 + c39 + c40 + c41 + c42 + c43 + c44 + c45 + c46 + c47 + c48 + c49 + c50")
errorInit <- function(names){
err <- list()
for(name in names){
err[[name]] = rep(0,50)
}
return (err)
}
addElement <- function(err, error){
namesE <- names(error)
for (name in namesE) {
err [[name]] <- c(err[[name]], error[[name]])
au <- err[[name]]
}
return (err)
}
main <- function(){
sizeRl <- list()
err <- list()
first = 0
last = 0 + 99
dataFrame <- chargeData(first, last)
dataFrameAux <- prepareData(dataFrame)
rm(dataFrame)
err <- errorInit(names(dataFrameAux))
hdt <- HoeffdingTree(numericEstimator = "GaussianNumericAttributeClassObserver")
linear <- linearRegresion(dataFrameAux)
NN <- learning(dataFrameAux)
NW <- onlyWeights(NN)
#hdt <- learningHoefftingFirst(dataFrameAux, hdt)
rm(dataFrameAux)
test <- chargeData(first + 50, last + 1)
test <- prepareDataTest(test)
errorRLList <- calculateErrorRL(linear, test)
cP<-errorRLList
err <- errorRLList
errlist <- addElement(err, errorRLList)
errorRLList <- as.data.frame(t(errorRLList))
errorNNRList <- as.data.frame(t(calculateErrorNNR(NN, test)))
#errorHdtList <- as.data.frame(t(calculateErrorHDT(hdt, test)))
rm(test)
sizeRL <- sizeRLInit(names(errorRLList))
returns <- proportionalAdwin(err, errlist, sizeRL, cP)
cP <- returns$eControlPoint
sizeRl <- returns$sizeRL
siezeHistory <- as.data.frame(t(sizeRL))
rm(linear)
#res <- manualRegresion(as.vector(as.matrix(result$net.result)) * maxF,(firstElement[-1]*maxF)-lastmaxF)
for(i in c(1:400)){
first = first + 1
last = last + 1
dataFrame<-chargeData(first, last)
dataFrameAux <- prepareData(dataFrame)
rm(dataFrame)
linear <- linearRegresionADWIN(dataFrameAux, sizeRL)
NN <- learningWithWeights(dataFrameAux, NW)
NW <- onlyWeights(NN)
#hdt <- learningHoeffting(dataFrameAux, hdt)
rm(dataFrameAux)
test <- chargeData(first + 50, last + 1)
test <- prepareDataTest(test)
newErrorLR <- calculateErrorRL(linear, test)
cP<-newErrorLR
err <- newErrorLR
errlist <- addElement(err, newErrorLR)
newErrorLR <- as.data.frame(t(newErrorLR))
returns <- proportionalAdwin(err, errlist, sizeRL, cP)
cP <- returns$eControlPoint
sizeRl <- returns$sizeRL
newErrorNNR <- calculateErrorNNR(NN, test)
rm(test)
rm(linear)
print(last)
print("ULTIMO asifja ipjai")
siezeHistory <- rbind(siezeHistory, as.data.frame(t(sizeRL)))
errorRLList <- rbind(errorRLList, newErrorLR)
errorNNRList <- rbind(errorNNRList, as.matrix(t(newErrorNNR)))
#errorhdtC <- calculateErrorHDT(hdt, test)
#errorHdtList <- rbind(errorHdtList, as.matrix(t(errorhdtC)))
rm(newErrorLR)
rm(newErrorNNR)
}
animate_plot(errorRLList, errorNNRList, siezeHistory)
#plotter(errorRLList, errorNNRList)
}
#Añadir graficas de media varianza, y el error resta del error de la rg y de la nnr, y añadir tamaño de la ventana
|
238a6c17539d42e15fc6ce92ba36385231c38807
|
f0489c47853fc78a49bfbc28ca3cf39798b17431
|
/man/consensusmap-NMFfitX-method.Rd
|
1597423b90fd18755c52cfa548adbb0c97c196ce
|
[] |
no_license
|
pooranis/NMF
|
a7de482922ea433a4d4037d817886ac39032018e
|
c9db15c9f54df320635066779ad1fb466bf73217
|
refs/heads/master
| 2021-01-17T17:11:00.727502
| 2019-06-26T07:00:09
| 2019-06-26T07:00:09
| 53,220,016
| 0
| 0
| null | 2016-03-05T19:46:24
| 2016-03-05T19:46:24
| null |
UTF-8
|
R
| false
| true
| 574
|
rd
|
consensusmap-NMFfitX-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NMFSet-class.R
\docType{methods}
\name{consensusmap,NMFfitX-method}
\alias{consensusmap,NMFfitX-method}
\title{Plots a heatmap of the consensus matrix obtained when fitting an NMF model with multiple runs.}
\usage{
\S4method{consensusmap}{NMFfitX}(object, annRow = NA, annCol = NA,
tracks = c("basis:", "consensus:", "silhouette:"),
main = "Consensus matrix", info = FALSE, ...)
}
\description{
Plots a heatmap of the consensus matrix obtained when fitting an NMF model with multiple runs.
}
|
74f1e2cb00407640a7c8279d738e5450a626cb41
|
6927c39fa8f7762025a999f912f27da02fe88d26
|
/code/cats_vs_dogs_SMALL.R
|
2680a855e57c026abd9ccdab719ad7f7f0787350
|
[] |
no_license
|
uwpz/DL
|
af6eb9e915e43e81d08d15c6f3f2ebe0dc6ca5e3
|
23948db5715b52323756b783799998fc976df2a7
|
refs/heads/master
| 2020-07-12T00:19:46.486800
| 2019-08-28T17:48:54
| 2019-08-28T17:48:54
| 204,673,607
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,451
|
r
|
cats_vs_dogs_SMALL.R
|
#######################################################################################################################-
#|||| Initialize and ETL ||||----
#######################################################################################################################-
## Libraries
library(reticulate)
library(magick)
library(viridis)
## Backend
#use_python("C:\\ProgramData\\Python\\Python36\\python.exe", required = TRUE)
#use_condaenv(condaenv = "cntk-py35", conda = "C:/local/Anaconda3-4.1.1-Windows-x86_64/Scripts/conda.exe", required = TRUE)
library(keras)
k_backend()
#use_backend("tensorflow")
#use_backend("cntk")
#cmd: "nvidia-smi -l" to monitor gpu-usage
## Functions
source("code/0_init.R")
## Parameter
type = "small"
dataloc = paste0("./data/cats_vs_dogs/",type,"/")
(n.train = length(list.files(paste0(dataloc,"train"), recursive = TRUE)))
(n.test = length(list.files(paste0(dataloc,"test"), recursive = TRUE)))
(n.validate = length(list.files(paste0(dataloc,"validate"), recursive = TRUE)))
batchsize = 20
#######################################################################################################################-
#|||| Prepare ||||----
#######################################################################################################################-
## Validate and test
# Validate
generator.validate = flow_images_from_directory(
paste0(dataloc,"validate"),
image_data_generator(rescale = 1/255),
target_size = c(150, 150),
batch_size = batchsize,
class_mode = "binary")
# Plot
par(mfrow = c(2,2), mar = c(2,0,2,0))
for (i in 1:4) {
generator_next(generator.validate)[[1]][i,,,] %>% as.raster() %>% plot()
title(paste0("Image\n",i), cex.main = 1)
}
# Test
generator.test = flow_images_from_directory(
paste0(dataloc,"test"),
image_data_generator(rescale = 1/255),
target_size = c(150, 150),
batch_size = batchsize,
class_mode = "binary",
shuffle = FALSE) #no shuffle !!!
files = list.files(paste0(dataloc,"test"), recursive = TRUE)
## Train
# Data augmentation
datagen.augment = image_data_generator(
rescale = 1/255,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = TRUE,
fill_mode = "nearest"
)
# Check augmentation
img = image_load(paste0(dataloc,"train/cats/00003.jpg"), target_size = c(150,150)) %>%
image_to_array() %>%
array_reshape(c(1,150,150,3))
par(mfrow = c(2,2), mar = c(2,0,2,0))
plot(as.raster(img[1,,,]/255)); title("Orig")
for (i in 1:3) {
generator_next(flow_images_from_data(img, generator = datagen.augment, batch_size = 1))[1,,,] %>%
as.raster() %>%
plot()
}
# Generator
generator.train = flow_images_from_directory(
paste0(dataloc,"train"),
datagen.augment,
target_size = c(150, 150),
batch_size = batchsize,
class_mode = "binary")
#######################################################################################################################-
#|||| Small convnet ||||----
#######################################################################################################################-
# Fit -----------------------------------------------------------------------------------------------------------
# Model definition
model.1 = keras_model_sequential() %>%
layer_conv_2d(filters = 32, kernel_size = c(3, 3), activation = "relu",
input_shape = c(150, 150, 3)) %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_conv_2d(filters = 64, kernel_size = c(3, 3), activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_conv_2d(filters = 128, kernel_size = c(3, 3), activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_conv_2d(filters = 128, kernel_size = c(3, 3), activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_flatten() %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 512, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid") %>%
compile(
loss = "binary_crossentropy",
optimizer = optimizer_rmsprop(lr = 1e-4),
metrics = c("acc")
)
model.1
# Fit
fit.1 = model.1 %>% fit_generator(
generator.train,
steps_per_epoch = n.train/batchsize,
#initial_epoch = 1, #must be less than epoch
epochs = 15,
validation_data = generator.validate,
validation_steps = n.validate/batchsize
)
plot(fit.1)
# Evaluate
model.1 %>% evaluate_generator(generator.test, steps = n.test/batchsize)
yhat = predict_generator(model.1, generator.test, steps = n.test/batchsize)
y_num = as.vector(generator.test$classes)
y = factor(ifelse(y_num == 0, "N", "Y"))
performance_summary(data.frame(yhat = data.frame("Y"=yhat, "N"=1-yhat), y = y))
plots = plot_all_performances(yhat = data.frame("Y"=yhat, "N"=1-yhat), y = y)
ggsave(paste0(plotloc, "model.1_performance.pdf"), marrangeGrob(plots, ncol = 4, nrow = 2, top = NULL),
w = 18, h = 12)
# Save
#model.1 %>% save_model_hdf5(paste(type,"_model1.h5"))
#model.1 = load_model_hdf5(paste(type,"_model1.h5"))
# Interpret -------------------------------------------------------------------------------------------------------
## Plot images with low and high residuals
# Get residuals
res = abs(yhat - y_num)
order(res)
k = 9
i.img_low = order(res)[1:k]
res[i.img_low]
i.img_high = order(res, decreasing = TRUE)[1:k]
res[i.img_high]
# High Residuals
i.img = i.img_high
dev.off()
pdf(paste0(plotloc,"residuals_high_model1.pdf"))
par(mfrow = c(4,4), mar = c(1,0,1,0))
for (i in 1:k) {
plot_cam(img_path = paste0(dataloc,"test/",files[i.img[i]]),
model = model.1,
layer_name = "conv2d_4",
titles = c(paste0("Class = ",y_num[i.img[i]]),
paste0("yhat = ",round(yhat[i.img[i]],3))),
target_class = 1 - y_num[i.img[i]])
}
dev.off()
# Low Residuals
i.img = i.img_low
pdf(paste0(plotloc,"residuals_low_model1.pdf"))
par(mfrow = c(4,4), mar = c(1,0,1,0))
for (i in 1:k) {
plot_cam(img_path = paste0(dataloc,"test/",files[i.img[i]]),
model = model.1,
layer_name = "conv2d_4",
titles = c(paste0("Class = ",y_num[i.img[i]]),
paste0("yhat = ",round(yhat[i.img[i]],3))),
target_class = y_num[i.img[i]])
}
dev.off()
#######################################################################################################################-
#|||| Feature extraction (with data augmentation) ||||----
#######################################################################################################################-
# Get pretrained vgg16 convbase
conv_base <- application_vgg16(
weights = "imagenet",
include_top = FALSE,
input_shape = c(150, 150, 3)
)
conv_base
# Freeze conv_base
freeze_weights(conv_base)
# Enlarge with dense layers
model.3 = keras_model_sequential() %>%
conv_base %>%
#a %>%
#layer_conv_2d(filters = 512, activation = "relu") %>%
layer_flatten() %>%
layer_dense(units = 256, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid") %>%
compile(
loss = "binary_crossentropy",
#optimizer = optimizer_rmsprop(lr = 2e-5),
optimizer = optimizer_adam(lr = 2e-5),
metrics = c("acc")
)
model.3
# Fit
fit.3 = model.3 %>% fit_generator(
generator.train,
steps_per_epoch = n.train/batchsize,
#initial_epoch = 90,
epochs = 10,
validation_data = generator.validate,
validation_steps = n.validate/batchsize
)
# Evaluate
plot(fit.3)
model.3 %>% evaluate_generator(generator.test, steps = n.test/batchsize)
yhat = predict_generator(model.3, generator.test, steps = n.test/batchsize)
y_num = as.vector(generator.test$classes)
y = factor(ifelse(y_num == 0, "N", "Y"))
performance_summary(data.frame(yhat = data.frame("Y"=yhat, "N"=1-yhat), y = y))
plots = plot_all_performances(yhat = data.frame("Y"=yhat, "N"=1-yhat), y = y)
ggsave(paste0(plotloc, "model.3_performance.pdf"), marrangeGrob(plots, ncol = 4, nrow = 2, top = NULL),
w = 18, h = 12)
# Save
#model.3 %>% save_model_hdf5(paste(type,"_model3.h5"))
#model.3 = load_model_hdf5(paste(type,"_model3.h5"))
#######################################################################################################################-
#|||| Fine Tuning with interpretation ||||----
#######################################################################################################################-
#
# Remove last 2 conv_layer + maxpool_layer from conv_base and freeze: !!! RUN THIS after model.3 to just restimate last 2 layer
# DirectLy unfreeze_weights(conv_base, from = "block5_conv2") does not work together with CAM
tmp = keras_model(conv_base$inputs, conv_base$layers[[16]]$output)
tmp
freeze_weights(tmp)
# Enlarge again with last 2 layers + dense layers
model.5 = keras_model_sequential() %>%
tmp %>%
layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same", activation = "relu") %>%
layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same", activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_flatten() %>%
layer_dense(units = 256, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid") %>%
compile(
loss = "binary_crossentropy",
optimizer = optimizer_adam(lr = 2e-5),
metrics = c("acc")
)
model.5
# Fit
fit.5 = model.5 %>% fit_generator(
generator.train,
steps_per_epoch = n.train/batchsize,
epochs = 20,
validation_data = generator.validate,
validation_steps = n.validate/batchsize
)
# Evaluate
plot(fit.5)
model.5 %>% evaluate_generator(generator.test, steps = n.test/batchsize)
yhat = predict_generator(model.5, generator.test, steps = n.test/batchsize)
y_num = as.vector(generator.test$classes)
y = factor(ifelse(y_num == 0, "N", "Y"))
performance_summary(data.frame(yhat = data.frame("Y"=yhat, "N"=1-yhat), y = y))
plots = plot_all_performances(yhat = data.frame("Y"=yhat, "N"=1-yhat), y = y)
ggsave(paste0(plotloc, "model.5_performance.pdf"), marrangeGrob(plots, ncol = 4, nrow = 2, top = NULL),
w = 18, h = 12)
# Save
#model.5 %>% save_model_hdf5(paste(type,"_model5.h5"))
#model.5 = load_model_hdf5(paste(type,"_model5.h5"))
## Plot images with low and high residuals
# Get residuals
res = abs(yhat - y_num)
order(res)
k = 16
(i.img_low = order(res)[10+(1:k)])
res[i.img_low]
(i.img_high = order(res, decreasing = TRUE)[0+(1:k)])
res[i.img_high]
# High residuals
i.img = i.img_high
dev.off()
pdf(paste0(plotloc,"residuals_high_model5.pdf"))
par(mfrow = c(4,4), mar = c(1,0,1,0))
for (i in 1:k) {
plot_cam(img_path = paste0(dataloc,"test/",files[i.img[i]]),
model = model.5,
layer_name = "conv2d_6",
titles = c(paste0("Class = ",y_num[i.img[i]]),
paste0("yhat = ",round(yhat[i.img[i]],3))),
target_class = 1 - y_num[i.img[i]]) #target_class of prediction for high residuals
}
dev.off()
# Low residuals
i.img = i.img_low
dev.off()
pdf(paste0(plotloc,"residuals_low_model5.pdf"))
par(mfrow = c(4,4), mar = c(1,0,1,0))
for (i in 1:k) {
plot_cam(img_path = paste0(dataloc,"test/",files[i.img[i]]),
model = model.5,
layer_name = "conv2d_6",
titles = c(paste0("Class = ",y_num[i.img[i]]),
paste0("yhat = ",round(yhat[i.img[i]],3))),
target_class = y_num[i.img[i]])
}
dev.off()
|
6173f9bb7321284e7a471fc4a2e0f1d103cd36af
|
f32dbf645fa99d7348210951818da2275f9c3602
|
/R/MTMdisp.R
|
662401ac391c68e181897a37afabffdf487a26cc
|
[] |
no_license
|
cran/RSEIS
|
68f9b760cde47cb5dc40f52c71f302cf43c56286
|
877a512c8d450ab381de51bbb405da4507e19227
|
refs/heads/master
| 2023-08-25T02:13:28.165769
| 2023-08-19T12:32:32
| 2023-08-19T14:30:39
| 17,713,884
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
MTMdisp.R
|
`MTMdisp` <-
function(a, f1=f1, f2=f2, len2=1024, PLOT=FALSE)
{
### calculate and plot an MTM spectrum
# a = list(y=ampv, dt=0.008)
if(missing(PLOT)) { PLOT=TRUE }
if(missing(f1)) { f1 = 0.01 }
if(missing(f2)) { f2 = 10 }
len = length(a$y)
if(missing(len2))
{
len2 = 2*next2(len)
}
if(len2<len)
{
len2 = 2*next2(len)
}
Mspec = mtapspec(a$y,a$dt, klen=len2, MTP=list(kind=1,nwin=5, npi=3,inorm=0) )
f=Mspec$freq
amp = Mspec$spec[1:length(f)]
# sam = lowess(f,amp, f=10/length(f));
# sam$y[sam$y<=0] = amp[sam$y<=0];
# ma = cbind(amp, sam$y);
ma = amp;
flag = f>=f1 & f <= f2;
displ = ma/(2*pi*f);
if(PLOT==TRUE)
{
# matplot(f[flag],displ[flag,],type='l',log='xy',axes=FALSE, xlab="Hz")
plot(range(f[flag]),range(displ[flag]),type='n',log='xy',axes=FALSE, xlab="Hz", ylab="Disp Spec")
lines(f[flag], displ[flag], col=1, lty=1)
axis(2, las=2)
axis(1)
box()
}
invisible( list(len2=len2, f=f, f1=f1, f2=f2, displ=displ, ampsp=amp, flag=flag ) )
}
|
776769573609cea12ef77573d2c79953f64a560a
|
9cc8d14accb873157822c8790e480ba66e42cb81
|
/R/Challenge1_no_solutions.R
|
ed069790269fd310d26a470bd0e58af9d94c2173
|
[
"MIT"
] |
permissive
|
seedpcseed/R-Learning
|
bfa873fef46cc506c39936505ba294458bff7889
|
e30858c668509adfcc793c3b4b10491d1f86a4a6
|
refs/heads/master
| 2022-11-17T02:39:12.566115
| 2020-07-07T14:28:04
| 2020-07-07T14:28:04
| 268,669,278
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 775
|
r
|
Challenge1_no_solutions.R
|
# CHALLENGE: Plot the COVID-19 cases and deaths by
#
# You want to get public data about COVID-19
# infections and deaths but
# you want to focus on what happened in
# Midwestern states in April
# Consider what libraries you need to get the data,
# filter the data, and visualize the data
# Go for it!
# library loading
# what libraries might you use for getting the data?
# for reading and filtering the data?
# for plotting the data?
# you can find the data table here:
# https://covid19-lake.s3.us-east-2.amazonaws.com/enigma-nytimes-data-in-usa/csv/us_states/us_states.csv", "Data/us_covid.csv
# define the midwestern states
# read the data, filter, and plot
# how simple can you make the code to get it
# done?
# read in the data
# plot the data
|
4b4a19a52bf3b16e1de2efa6258d95a1145d4c94
|
da5d74f9895e2c00947a42805f2af73d209082a1
|
/ReadingInMinfiFile.R
|
e2103a5f2a61ce26d4bea94e6efeeb318aef5467
|
[
"MIT"
] |
permissive
|
xuefenfei712/sheepclock
|
b3321bdbee158f249af153d38599e75eeaf68ee2
|
b0eaec0b96afcc35f0d60982eb3d1215ea329d64
|
refs/heads/main
| 2023-06-03T08:50:48.750018
| 2021-06-24T00:49:17
| 2021-06-24T00:49:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,810
|
r
|
ReadingInMinfiFile.R
|
#read in minfi normalised beta values
minfibetas <- read.csv("/Users/victoriasugrue/Documents/Data_Files/MASTERS_DoNotEdit/MASTER_Betas_Minfi_Normalised.csv", header=FALSE)
#transpose the dataframe and remove unneeded columns
tminfibetas <- as.data.frame(t(minfibetas[,-c(1:2)]))
#read in file for headers and transpose it, assign the headers to the minfi normalised data
headers <- read.csv("/Users/victoriasugrue/Documents/Data_Files/MASTERS_DoNotEdit/MASTER_Minfi_Headers_forR.csv", header=FALSE)
theaders <- as.list(t(headers))
names(tminfibetas) <- theaders
#this treats the data as factors, not numeric, so graphs don't work
#to fix this, first must convert the columns of interest to numeric data
#change the cg to whichever is of interest
tminfibetas$Age=as.numeric(levels(tminfibetas$Age))[tminfibetas$Age] #run once only
tminfibetas$cg21524116=as.numeric(levels(tminfibetas$cg21524116))[tminfibetas$cg21524116] #run for each cg you wish to plot
#plot:
#this is for a jittered scatterplot of Age x Probe methylation with a linear regression applied
#again, change cg
library(ggplot2)
p1 <- ggplot(tminfibetas, aes(x=Age, y=cg21524116, colour=Sex)) +
geom_jitter(width=0.1) +
geom_smooth(method=lm) +
ggtitle("MKLN1 cg21524116")
p1 + scale_x_continuous("Age") + scale_y_continuous(limits=c(0,1))
#subset into blood only, if desired
blood <- tminfibetas[1:168,]
blood$Age=as.numeric(levels(blood$Age))[blood$Age]
blood$cg21524116=as.numeric(levels(blood$cg21524116))[blood$cg21524116]
p1 <- ggplot(blood, aes(x=Age, y=cg21524116, colour=BloodSex)) +
geom_jitter(width=0.1) +
geom_smooth(method=lm) +
ggtitle("MKLN1 sheep blood cg21524116")
p1 + scale_x_continuous("Age") + scale_y_continuous(limits=c(0,1))
#subset into ear only, if desired
ear <- tminfibetas[169:432,]
|
f634272110a7cf3bc1345c3f6bc056a77af44896
|
c7dd9f32f1b740b3f9b7f45917e0796aea6bbfd8
|
/Legacy/Keio_analysis_combined_old.R
|
3e3b7815892718c4e62977fdeb973875bae4a61c
|
[] |
no_license
|
PNorvaisas/5FU_knockouts
|
9488845fe17a7a01d52068aaddf56c006b686dd4
|
d6b65ee1d0a79129592d0c7e645d72ff14db117c
|
refs/heads/master
| 2021-01-24T00:02:56.536426
| 2018-10-02T16:34:20
| 2018-10-02T16:34:20
| 122,738,398
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,287
|
r
|
Keio_analysis_combined_old.R
|
library('ggplot2')
library('gplots')
library('plyr')
library('reshape2')
library(tidyr)
library(quantreg)
library(ellipse)
#Vennerable installation: install.packages("Vennerable", repos="http://R-Forge.R-project.org")
#library(quantreg)
elipsoid=function(df,xvar,yvar,scale=1,groups=''){
df<-subset(df,!is.na(df[,xvar]) &!is.na(df[,yvar]))
df_ell <- data.frame()
if (groups!=''){
for(g in levels(df[,groups])){
df_ell <- rbind(df_ell, cbind(as.data.frame(with(df[df$groups==g,],ellipse(cor(xvar, yvar),scale=c(sd(xvar),sd(yvar)),centre=c(mean(x),mean(y))))),
group=g))
}
}else {
df_ell <- as.data.frame( ellipse( cor(df[,c(xvar,yvar)],use='complete.obs'),scale=c( sd(df[,xvar],na.rm=TRUE)*scale ,sd(df[,yvar],na.rm=TRUE)*scale ),centre=c( mean(df[,xvar],na.rm=TRUE),mean(df[,yvar],na.rm=TRUE) )))
}
return(df_ell)
}
lm_eqn = function(m) {
fres<-summary(m)
l <- list(a = format(coef(m)[1], digits = 2),
b = format(abs(coef(m)[2]), digits = 2),
r2 = format(summary(m)$r.squared, digits = 3),
p2 = format(pf(fres$fstatistic[1], fres$fstatistic[2], fres$fstatistic[3],lower.tail = FALSE)[[1]], digits = 3));
if (coef(m)[2] >= 0) {
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2*","~~italic(p)~"="~p2,l)
} else {
eq <- substitute(italic(y) == a - b %.% italic(x)*","~~italic(r)^2~"="~r2*","~~italic(p)~"="~p2,l)
}
as.character(as.expression(eq));
}
evalmic=function(m){
print(m[1][1])
m<-as.numeric(m)
mic=1
if (!is.na(m[[3]]) & m[[3]]>0) {
mic=2.5
}
if (!is.na(m[[4]]) & m[[4]]>0) {
mic=5
}
if (!is.na(m[[5]]) & m[[5]]>0) {
mic=10
}
return(mic)
}
evalmic2=function(all){
for (i in rownames(all)){
mic=1
if (!is.na(all[i,'1']) & all[i,'1']>0) {
mic=2.5
}
if (!is.na(all[i,'2.5']) & all[i,'2.5']>0) {
mic=5
}
if (!is.na(all[i,'5']) & all[i,'5']>0) {
mic=10
}
all[i,'MIC']<-mic
}
return(all)
}
#Read Keio info table with identifiers
keioinfo<-read.table('../Keio_library/Keio_library_fully_annotated.csv',sep=',',quote = '"',header = TRUE,stringsAsFactors=FALSE)
keioinfo$X<-NULL
keioinfo<-subset(keioinfo,!Plate %in% c('91','93','95'))
#Get data that's already scored
scr1r<-read.table('Primary_screen_PN_clean_fixed.csv',sep=',',quote = '"',header = TRUE,stringsAsFactors=FALSE)
#scr1r<-subset(scr1r, ! Gene %in% c('XXXXXXX','no bact','empty','',NA))
scr1r$Plate<-scr1r$Keio.Plate.no.
scr1r$Well<-scr1r$Position
scr1r$Keio.Plate.no.<-NULL
scr1r$Position<-NULL
scr1r$Faults<-NULL
scr1r$Details<-NULL
scr1r[scr1r$Plate %in% c('-','',' '),'Plate']<-NA
scr1r[scr1r$Well %in% c('-','',' '),'Well']<-NA
scr1r[scr1r$Gene %in% c('WT?','WT control', 'dodgy "WT"'),'Gene']<-'WT'
scr1r[scr1r$Gene %in% c('XXXXXXX','no bact','empty',''),'Gene']<-NA
#scr1r[scr1r$Gene=='WT',c('Keio.Plate.no.','Position')]<-NA
##Apply bandage
timfix<-read.csv('Tim_fixed_scr1.csv',sep=',',quote = '"',header = TRUE,stringsAsFactors=FALSE)
timfix$Action<-NULL
timfix$Tim.s.comments<-NULL
timfix$Index<-apply(timfix[,c('Plate','Well')],1,paste,collapse='-')
scr1r$Index<-apply(scr1r[,c('Plate','Well')],1,paste,collapse='-')
scr1r[match(timfix$Index,scr1r$Index),'Gene']<-timfix$Gene_fix
scr1r$Index<-NULL
scr1<-rename(scr1r, c("X0"="0", "X1"="1", "X2.5"="2.5", "X5"="5"))
scr1m<-melt(scr1[,c('Gene','Plate','Well','0','1','2.5','5')],id=c('Gene','Plate','Well'),variable.name = 'Measure',value.name='Score')
scr1m$Measure<-as.character(scr1m$Measure)
scr1m$Score<-as.character(scr1m$Score)
#Secondary screen
scr2<-read.table('Secondary_screen_PN_new.csv',sep=',',quote = '"',header = TRUE,stringsAsFactors = FALSE)
scr2$Starving<-as.factor(scr2$Starving)
#Fix Gene-location relationships. Well must come first!
scr2[scr2$Gene=='yedN','Plate']<-'31'
scr2[scr2$Gene=='yedN','Well']<-'G12'
scr2[scr2$Gene=='dcuC' & scr2$Plate=='4','Well']<-'H3'
scr2[scr2$Gene=='dcuC' & scr2$Plate=='4','Plate']<-'89'
scr2[scr2$Gene=='dcuC' & scr2$Plate=='2','Well']<-'A4'
scr2[scr2$Gene=='dcuC' & scr2$Plate=='2','Plate']<-'89'
#scr2<-scr2[,c('Gene','Plate','Well','MIC1','MIC2','MIC3')]
scr2m<-melt(scr2[,c('Gene','Plate','Well','MIC1','MIC2','MIC3')],id=c('Gene','Plate','Well'),variable.name = 'Replicate',value.name='MIC')
scr2m$Replicate<-as.character(scr2m$Replicate)
scr2m$MIC<-as.character(scr2m$MIC)
#Supplemented screen 3
scr3fix<-read.table('3rd_screen_location_fix.csv',sep=',',quote = '"',header = TRUE,stringsAsFactors = FALSE)
scr3Scores<-read.table('3rd_screen_Scores.csv',sep=',',quote = '"',header = TRUE,stringsAsFactors = FALSE)
scr3Scores<-rename(scr3Scores, c("Keio.Plate.no."="Plate", "Position"="Well", "Well"="NWell"))
#Fix mistakes with gene-location missmatches
scr3Scores<-merge(scr3Scores,scr3fix[,c('Plate','Well','Right_gene')],by=c('Plate','Well'),all.x=TRUE)
scr3Scores[!is.na(scr3Scores$Right_gene),c('Gene')]<-scr3Scores[!is.na(scr3Scores$Right_gene),c('Right_gene')]
scr3Scores$Right_gene<-NULL
scr3Scores[scr3Scores$Gene=='upp cont','Plate']<-'61'
scr3Scores[scr3Scores$Gene=='upp cont','Well']<-'B1'
scr3Sm<-melt(scr3Scores[,c('Gene','Plate','Well',
"X0.1","X0.2","X0.3","X1.1","X1.2","X1.3",
"X2.5.1","X2.5.2","X2.5.3","X5.1","X5.2","X5.3")],
id=c('Gene','Plate','Well'),variable.name = 'Measure',value.name='Score')
scr3Sm$Measure<-as.character(scr3Sm$Measure)
scr3Sm$Score<-as.character(scr3Sm$Score)
scr3Sm[scr3Sm$Measure %in% c('X0.1','X0.2','X0.3'),'Measure']<-'0'
scr3Sm[scr3Sm$Measure %in% c('X1.1','X1.2','X1.3'),'Measure']<-'1'
scr3Sm[scr3Sm$Measure %in% c('X2.5.1','X2.5.2','X2.5.3'),'Measure']<-'2.5'
scr3Sm[scr3Sm$Measure %in% c('X5.1','X5.2','X5.3'),'Measure']<-'5'
scr3Sm[scr3Sm$Score %in% c('3'),'Score']<-'9'
scr3Sm[scr3Sm$Score %in% c('2'),'Score']<-'6'
scr3Sm[scr3Sm$Score %in% c('1'),'Score']<-'3'
#3rd screen MIC values all
scr3Mics<-read.table('3rd_screen_MICs.csv',sep=',',quote = '"',header = TRUE,stringsAsFactors = FALSE)
scr3Mics<-rename(scr3Mics, c("Keio.Plate.no."="Plate", "Position"="Well", "Well"="NWell"))
scr3Mics<-merge(scr3Mics,scr3fix[,c('Plate','Well','Right_gene')],by=c('Plate','Well'),all.x=TRUE)
scr3Mics[!is.na(scr3Mics$Right_gene),c('Gene')]<-scr3Mics[!is.na(scr3Mics$Right_gene),c('Right_gene')]
scr3Mics$Right_gene<-NULL
#Fix mistakes with gene-location missmatches
scr3Micsm<-melt(scr3Mics[,c('Gene','Plate','Well','MIC.1','MIC.2','MIC.3')],id=c('Gene','Plate','Well'),variable.name = 'Replicate',value.name='MIC')
scr3Micsm$Replicate<-as.character(scr3Micsm$Replicate)
scr3Micsm$MIC<-as.character(scr3Micsm$MIC)
#Part not really used
# scr3<-subset(scr3Micsm,!(is.na(as.numeric(scr3Micsm$MIC))))
# scr3[scr3$Gene=='WT cont','Gene']<-'WT'
# scr3[scr3$Gene=='upp cont','Gene']<-'upp'
# scr3$MIC<-as.numeric(scr3$MIC)
# scr3avg<-ddply(scr3, .(Gene,Plate,Well), summarise, MIC_avg=mean(MIC,na.rm = TRUE),MIC_sd=sd(MIC,na.rm = TRUE))
sc1g<-unique(scr1$Gene)
sc2g<-unique(scr2$Gene)
sc3Sg<-unique(scr3Scores$Gene)
sc3Mg<-unique(scr3Mics$Gene)
allscores<-merge(scr1m,scr3Sm,all.x=TRUE,all.y=TRUE)
allmics<-merge(scr2m,scr3Micsm,all.x=TRUE,all.y=TRUE)
allscores<-subset(allscores,! is.na(Gene) & Gene!='' & Score!='')
allmics<-subset(allmics,! is.na(Gene) & Gene!='' & MIC!='')
allscores[allscores$Gene=='WT cont','Gene']<-'WT'
allscores[allscores$Gene=='upp cont','Gene']<-'upp'
allmics[allmics$Gene=='WT cont','Gene']<-'WT'
allmics[allmics$Gene=='upp cont','Gene']<-'upp'
allscores[allscores$Gene=='WT','Plate']<-''
allscores[allscores$Gene=='WT','Well']<-''
allmics[allmics$Gene=='WT','Plate']<-''
allmics[allmics$Gene=='WT','Well']<-''
#Real duplicates!
rdupl<-as.factor(unique(keioinfo[which(duplicated(keioinfo$Gene)),]$Gene))
allmics<-merge(allmics,subset(keioinfo,!Gene %in% rdupl)[,c('Gene','Plate','Well')],by='Gene',all.x=TRUE)
allmics[!is.na(allmics$Plate.y) &!is.na(allmics$Well.y),c('Plate.x','Well.x')]<-allmics[!is.na(allmics$Plate.y) &!is.na(allmics$Well.y),c('Plate.y','Well.y')]
allmics<-rename(allmics,c('Plate.x'='Plate','Well.x'='Well'))
allmics$Plate.y<-NULL
allmics$Well.y<-NULL
#Real duplicates in mics
rduplm<-c('dcuC','yedN','yhcE')
scores<-subset(allscores,!(is.na(as.numeric(allscores$Score))))
scores$Score<-as.numeric(scores$Score)
micss<-subset(allmics,!(is.na(as.numeric(allmics$MIC))))
micss$MIC<-as.numeric(micss$MIC)
#Calculate averages for Scores and MICs over replicates
scores_avg<-ddply(scores, .(Gene,Plate,Well,Measure), summarise, Score_avg=mean(Score,na.rm = TRUE),Score_sd=sd(Score,na.rm = TRUE))
#Averaging by gene names, until duplicates are sorted out
mics_avg<-ddply(micss, .(Gene,Plate,Well), summarise, MIC_avg=mean(MIC,na.rm = TRUE),MIC_sd=sd(MIC,na.rm = TRUE))
#duplm should be dcuC and yhcE
duplm<-as.factor(unique(mics_avg[which(duplicated(mics_avg$Gene)),]$Gene))
alls<-dcast(scores_avg,Gene+Plate+Well ~Measure,mean,value.var = c('Score_avg'))
#Evaluate MICS
alls$MIC<-evalmic2(alls)$MIC
#Merging by gene names, until duplicates are sorted out ,'Plate','Well'
allfull<-merge(alls,mics_avg,by=c('Gene','Plate','Well'),all.x=TRUE,all.y=TRUE)
allfull$MIC<-ifelse(!is.na(allfull$MIC_avg),allfull$MIC_avg,allfull$MIC)
allfull$MIC_avg<-NULL
#Manual fixes
allfull[allfull$Gene=='WT','MIC']<-1
allfull[allfull$Gene=='WT','MIC_sd']<-0
allfull[allfull$Gene=='upp','MIC']<-15
allfull[allfull$Gene=='upp','MIC_sd']<-0
#Find duplicates
dupl<-as.factor(unique(allfull[which(duplicated(allfull$Gene)),]$Gene))
#
# #Find duplication problems
#Check for duplicates
# dad<-table(allfull$Gene)
# ind<-table(keioinfo$Gene)
# match<-merge(data.frame(dad),data.frame(ind),by='Var1')
# match<-rename(match, c("Var1"="Gene", "Freq.x"="Data_freq", "Freq.y"="Info_freq"))
# strange<-match[match$Data_freq!=match$Info_freq & match$Gene!='WT',]$Gene
#
# problems<-merge(subset(allfull,Gene %in% strange),
# subset(keioinfo,Gene %in% strange),
# by=c('Gene'),all.x=TRUE,all.y=TRUE)
#
# ps<-subset(problems,Gene %in% strange)
# ps<-merge(ps,match,by='Gene',all.x=TRUE)
# ps<-merge(ps,keioinfo[,c('Gene','Plate','Well')],by.x=c('Plate.x','Well.x'),by.y=c('Plate','Well'),all.x=TRUE)
# write.csv(ps,'Data/Mismatch_check.csv')
# #All missmatches between data and Keio info must come from plates 91,93,95
allinfr<-merge(allfull,keioinfo,by=c('Gene','Plate','Well'),all.x=TRUE)
allinf<-allinfr[,! colnames(allinfr) %in% c('Gene.y','Row','Column','Comment')]
#allinf<-rename(allinfr, c("Gene.x"="Gene"))
#Merge with Keio reference growth
keio<-read.table('Keio_growth.csv',sep=',',quote = '"',header = TRUE,stringsAsFactors=FALSE)
#keio<-keio[,colnames(keio) %in% c('Gene','LB_22hr','MOPS_24hr','MOPS_48hr')]
keio[is.na(keio)]<-NA
#keio<-subset(keio,!LB_22hr=='N.A.' & !MOPS_24hr=='N.A.' & !MOPS_48hr=='N.A.')
keio$LB_22hr<-as.numeric(as.character(keio$LB_22hr))
keio$MOPS_24hr<-as.numeric(as.character(keio$MOPS_24hr))
keio$MOPS_48hr<-as.numeric(as.character(keio$MOPS_48hr))
kdupl<-as.factor(unique(keio[which(duplicated(keio$Gene)),]$Gene))
kduplicates<-subset(keio,Gene %in% kdupl & Gene!='none')
#Merge with Keio library data
mics<-merge(allinf,keio,by.x=c('JW_id','ECK','Gene'),by.y=c('JW.id','ECK.number','Gene'),all.x=TRUE)
mics[is.na(mics$LB_22hr),c('LB_22hr','MOPS_24hr','MOPS_48hr')]<-keio[match(subset(mics,is.na(LB_22hr))$Gene,keio$Gene),c('LB_22hr','MOPS_24hr','MOPS_48hr')]
#Bacterial growth
#NGM media start OD=0.057392708
bac<-read.table('Bacteria.csv',sep=',',quote = '"',header = TRUE,stringsAsFactors=FALSE)
bac$Lookup<-NULL
bac$Row<-NULL
bac$Col<-NULL
bac[bac$Gene=='BW',]$Gene<-'WT'
bacm<-melt(bac,id=colnames(bac)[1:4],variable.name = 'Replicate',value.name='OD')
bacm$OD<-bacm$OD-0.057392708
bacavg<-ddply(bacm, .(Gene,Drug), summarise, NGM=mean(OD,na.rm=TRUE),NGM_sd=sd(OD,na.rm=TRUE)) #Plate,Well,Drug,
bacavg<-subset(bacavg,! Gene %in% c('XXXXXXX','no bacteria','empty'))
bacall<-merge(subset(bacavg,Drug==0),subset(bacavg,Drug==100),by=c('Gene'),suffixes = c("_C","_D"))
bacall$Drug_C<-NULL
bacall$Drug_D<-NULL
bacall<-subset(bacall,! Gene %in% c('XXXXXXX','no bacteria','empty'))
bacmic<-merge(mics,bacall,id=c('Gene'),all.x = TRUE)
#Data fully merged!!
#Get only unique instances of knockouts
#How to deal with duplicated entries
#ubacmic<-bacmic[!rev(duplicated(rev(bacmic$Gene))),]
#ubacmic<-ubacmic[,colnames(ubacmic)[c(1:3,6:20)]]
#subset(ubacmic,!is.na(Gene))
#Output folder:
odir<-'Figures_v2'
ddir<-'Data_v2'
#qbacmicq - no outliers
qbacmicq<-subset(bacmic,! Gene %in% c('glnA','aceE','atpB','atpG','atpE','atpF','lpd'))
bacmic<-
qbacmic<-
#
bcq05<-quantile(bacmic$NGM_C,0.05,na.rm=TRUE)[[1]]
bcq95<-quantile(bacmic$NGM_C,0.95,na.rm=TRUE)[[1]]
bdq05<-quantile(bacmic$NGM_D,0.05,na.rm=TRUE)[[1]]
bdq95<-quantile(bacmic$NGM_D,0.95,na.rm=TRUE)[[1]]
blq05<-quantile(bacmic$LB_22hr,0.05,na.rm=TRUE)[[1]]
blq95<-quantile(bacmic$LB_22hr,0.95,na.rm=TRUE)[[1]]
fitbac<-lm(NGM_D ~ NGM_C,data=bacmic)
#confint(fitbac,'(Intercept)',level=0.95)[[2]]
#coefficients(fitbac)[[2]]
fitqr<-rq(NGM_D ~ NGM_C,data=bacmic,tau=c(0.05,0.95))
bgli<-coefficients(fitqr)[1,][[1]]
bgui<-coefficients(fitqr)[1,][[2]]
bgls<-coefficients(fitqr)[2,][[1]]
bgus<-coefficients(fitqr)[2,][[2]]
bacres<-subset(bacmic,NGM_D>NGM_C*bgus+bgui)
bacsens<-subset(bacmic,NGM_D<NGM_C*bgls+bgli)
theme_set(theme_light())
baccor<-ggplot(bacmic,aes(x=NGM_C,y=NGM_D,color=MIC))+
geom_point(size=1)+ylim(0, .25)+
ylab(expression(paste('Knockout strain growth OD - 100',mu,'M 5FU')))+
xlab('Knockout strain growth OD - Control')+
ggtitle(expression(paste('Growth of knockout strains in control and 100',mu,'M 5FU treatment')))+
stat_smooth(aes(group = 1),method = "lm")+
geom_abline(intercept=0,slope=1,alpha=0.5,aes(color='grey'),linetype='longdash')+
geom_text(aes(label=ifelse(NGM_D>NGM_C*bgus+bgui | NGM_D < NGM_C*bgls+bgli | NGM_C<0.03 ,Gene,'')),
hjust=-0.1, vjust=-0.1,size=3)+
geom_errorbarh(aes(xmax=NGM_C+NGM_sd_C,xmin=NGM_C-NGM_sd_C),height=.001,alpha=0.2)+
geom_errorbar(aes(ymax=NGM_D+NGM_sd_D,ymin=NGM_D-NGM_sd_D),width=0.001,alpha=0.2)+
geom_abline(intercept=bgli,slope=bgls,alpha=0.5,color='red')+
geom_abline(intercept=bgui,slope=bgus,alpha=0.5,color='red')+
annotate("text", 0.25,0.25*bgls+bgli+0.005, label = "5%",color='red')+
annotate("text", 0.25,0.25*bgus+bgui+0.005, label = "95%",color='red')+
scale_x_continuous(breaks=seq(0,.3,by=.05))+
labs(color=expression(paste('MIC [5FU], ',mu,'M')))+
annotate('text',x = 0.125, y = 0.25, label = lm_eqn(fitbac), parse = TRUE)
baccor
dev.copy2pdf(device=cairo_pdf,file=paste(odir,"/Control-Treatment_NGM_growth.pdf",sep = ''),width=9,height=9)
bacmed<-melt(bacmic[,colnames(bacmic) %in% c('Gene','NGM_C','NGM_D','LB_22hr','MOPS_24hr','MOPS_48hr')],
id=c('Gene'),variable.name = 'Media',value.name='OD')
bacmed$Media<-factor(bacmed$Media,levels = c('NGM_C','NGM_D','LB_22hr','MOPS_24hr','MOPS_48hr'),
labels=c('NGM - 24h','NGM + 100uM 5FU','LB - 22hr','MOPS - 24hr','MOPS - 48hr'))
bacmed<-subset(bacmed,!Media %in% c('MOPS - 48hr')) #, 'MOPS - 48hr'
bachist<-ggplot(bacmed,aes(x=OD,fill=Media))+
geom_histogram(aes(y=0.01*..density..),position='identity',alpha=0.5,binwidth = 0.01)+
labs(fill='Media')+xlab('OD')+ylab('')+
scale_y_continuous(limits=c(0,0.10), labels = scales::percent)+
ggtitle('Distribution of strain growth')
bachist
dev.copy2pdf(device=cairo_pdf,file=paste(odir,"/Bac_growth_disribution.pdf",sep=''),width=9,height=9)
bdqq05<-quantile(qbacmicq$NGM_D,0.05,na.rm=TRUE)[[1]]
bdqq95<-quantile(qbacmicq$NGM_D,0.95,na.rm=TRUE)[[1]]
bcqq05<-quantile(qbacmicq$NGM_C,0.05,na.rm=TRUE)[[1]]
bcqq95<-quantile(qbacmicq$NGM_C,0.95,na.rm=TRUE)[[1]]
blqq05<-quantile(qbacmicq$LB_22hr,0.05,na.rm=TRUE)[[1]]
blqq95<-quantile(qbacmicq$LB_22hr,0.95,na.rm=TRUE)[[1]]
fitD<-lm(NGM_D ~ MIC,qbacmicq)
fitNDqr<-rq(NGM_D ~ MIC,data=qbacmicq,tau=c(0.05,0.95))
mndli<-coefficients(fitNDqr)[1,][[1]]
mndui<-coefficients(fitNDqr)[1,][[2]]
mndls<-coefficients(fitNDqr)[2,][[1]]
mndus<-coefficients(fitNDqr)[2,][[2]]
df_el<-elipsoid(subset(qbacmicq,!is.na(MIC) & ! is.na(NGM_D)),'MIC','NGM_D')
mbcD<-ggplot(qbacmicq,aes(x=MIC,y=NGM_D))+geom_point(size=1)+
stat_smooth(aes(group = 1),method = "lm")+
geom_errorbarh(aes(xmax=MIC+MIC_sd,xmin=MIC-MIC_sd),height=.0005,alpha=0.2,color='black')+
geom_errorbar(aes(ymax=NGM_D+NGM_sd_D,ymin=NGM_D-NGM_sd_D),width=0.0005,alpha=0.2,color='black')+
geom_text(aes(label=ifelse((NGM_D>MIC*mndus+mndui | NGM_D < MIC*mndls+mndli) | MIC>50 ,Gene,'')),
hjust=-0.1, vjust=-0.1,size=2)+
geom_abline(intercept=mndli,slope=mndls,alpha=0.5,color='red')+
geom_abline(intercept=mndui,slope=mndus,alpha=0.5,color='red')+
annotate("text", 100, mndls*100+mndli+0.005, label = "5%",color='red')+
annotate("text", 100, mndus*100+mndui+0.005, label = "95%",color='red')+
ggtitle(expression(paste('Strain growth in NGM 24hr OD - 100',mu,'M 5FU')))+xlab(expression(paste('MIC [5FU], ',mu,'M')))+
ylab('OD')+xlim(0,100)+ylim(0,0.25)+
annotate('text',x = 50, y = 0.25, label = lm_eqn(fitD), parse = TRUE)#+
#geom_path(data=df_el, aes(x=MIC, y=NGM_D), size=1, linetype=1,color='grey',alpha=0.5)
#stat_density2d()
mbcD
#dev.copy2pdf(device=cairo_pdf,file=paste(odir,'/MIC-NGMTreatment_bac_growth_Starving-color.pdf',sep=''),width=9,height=9)
#dev.copy2pdf(device=cairo_pdf,file=paste(odir,"/MIC-NGMTreatment_bac_growth.pdf",sep=''),width=9,height=9)
dev.copy2pdf(device=cairo_pdf,file=paste(odir,"/MIC-NGMTreatment_bac_growth_NoLabels.pdf",sep=''),width=5,height=5)
fitC<-lm(NGM_C ~ MIC,qbacmicq)
fitNCqr<-rq(NGM_C ~ MIC,data=qbacmicq,tau=c(0.05,0.95))
mncli<-coefficients(fitNCqr)[1,][[1]]
mncui<-coefficients(fitNCqr)[1,][[2]]
mncls<-coefficients(fitNCqr)[2,][[1]]
mncus<-coefficients(fitNCqr)[2,][[2]]
mbcC<-ggplot(qbacmicq,aes(x=MIC,y=NGM_C))+geom_point(size=1)+stat_smooth(aes(group = 1),method = "lm")+
xlim(0,100)+ylim(0,0.3)+
geom_errorbarh(aes(xmax=MIC+MIC_sd,xmin=MIC-MIC_sd),height=.001,alpha=0.2,color='black')+
geom_errorbar(aes(ymax=NGM_C+NGM_sd_C,ymin=NGM_C-NGM_sd_C),width=0.001,alpha=0.2,color='black')+
geom_text(aes(label=ifelse((NGM_C>MIC*mncus+mncui | NGM_C < MIC*mncls+mncli) | MIC>45,Gene,'')),
hjust=-0.1, vjust=-0.1,size=2)+
geom_abline(intercept=mncli,slope=mncls,alpha=0.5,color='red')+
geom_abline(intercept=mncui,slope=mncus,alpha=0.5,color='red')+
annotate("text", 100, mncls*100+mncli+0.005, label = "5%",color='red')+
annotate("text", 100, mncus*100+mncui+0.005, label = "95%",color='red')+
ggtitle('Strain growth in NGM 24hr OD - Control')+xlab(expression(paste('MIC [5FU], ',mu,'M')))+ylab('OD')+
annotate('text',x = 50, y = 0.3, label = lm_eqn(fitC), parse = TRUE)
mbcC
#dev.copy2pdf(device=cairo_pdf,file="Figures/MIC-NGMControl_bac_growth_Starving-color.pdf",width=9,height=9)
#dev.copy2pdf(device=cairo_pdf,file="Figures/MIC-NGMControl_bac_growth.pdf",width=9,height=9)
dev.copy2pdf(device=cairo_pdf,file=paste(odir,"/MIC-NGMControl_bac_growth_NoLabels.pdf",sep=''),width=6,height=6)
fitLB <- lm(LB_22hr ~ MIC, data=qbacmicq)
fitLBqr<-rq(LB_22hr ~ MIC,data=qbacmicq,tau=c(0.05,0.95))
mlbli<-coefficients(fitLBqr)[1,][[1]]
mlbui<-coefficients(fitLBqr)[1,][[2]]
mlbls<-coefficients(fitLBqr)[2,][[1]]
mlbus<-coefficients(fitLBqr)[2,][[2]]
mbcLB<-ggplot(qbacmicq,aes(x=MIC,y=LB_22hr))+geom_point(size=1)+
stat_smooth(aes(group = 1),method = "lm")+xlim(0,100)+
geom_errorbarh(aes(xmax=MIC+MIC_sd,xmin=MIC-MIC_sd),height=.001,alpha=0.2,color='black')+
geom_text(aes(label=ifelse(((LB_22hr>MIC*mlbus+mlbui | LB_22hr < MIC*mlbls+mlbli) & MIC >1) | MIC>40,Gene,'')),
hjust=-0.1, vjust=-0.1,size=3)+
geom_abline(intercept=mlbli,slope=mlbls,alpha=0.5,color='red')+
geom_abline(intercept=mlbui,slope=mlbus,alpha=0.5,color='red')+
annotate("text", 100, mlbls*100+mlbli+0.02, label = "5%",color='red')+
annotate("text", 100, mlbus*100+mlbui+0.02, label = "95%",color='red')+
ggtitle('Strain growth in LB 22hr OD - Control')+xlab(expression(paste('MIC [5FU], ',mu,'M')))+ylab('OD')+
annotate('text',x = 50, y = 1.1, label = lm_eqn(fitLB), parse = TRUE)
mbcLB
dev.copy2pdf(device=cairo_pdf,file=paste(odir,"/MIC-LB22hr_bac_growth_NoLabels.pdf",sep=''),width=9,height=9)
#All MICs
alldist<-ggplot(qbacmicq,aes(x=MIC,y=reorder(Gene,MIC,max)))+
geom_point(color='red',size=1) + geom_errorbarh(aes(xmax=MIC+MIC_sd,xmin=MIC-MIC_sd))+
theme(axis.text.y = element_text(vjust = 0,size=4))+
scale_x_continuous(breaks=seq(0,100,by=10))+
ylab('Gene knockout')+
xlab(expression(paste('MIC [5FU], ',mu,'M')))+
ggtitle('Protective properties of gene knockouts for C. elegans in 5FU exposure')
alldist
dev.copy2pdf(device=cairo_pdf,file=paste(odir,"/MIC_variation_SD_all.pdf",sep=''),width=8,height=150)
#MICs over 1
scr2dist<-ggplot(subset(qbacmicq,MIC>1),aes(x=MIC,y=reorder(Gene,MIC,max)))+
geom_point(color='red',size=1) + geom_errorbarh(aes(xmax=MIC+MIC_sd,xmin=MIC-MIC_sd))+
theme(axis.text.y = element_text(vjust = 0,size=4))+
scale_x_continuous(breaks=seq(0,100,by=10))+
ylab('Gene knockout')+
xlab(expression(paste('MIC [5FU], ',mu,'M')))+
ggtitle('Protective properties of gene knockouts for C. elegans in 5FU exposure')
scr2dist
dev.copy2pdf(device=cairo_pdf,file=paste(odir,"/MIC_variation_SD_MIC-over-1.pdf",sep=''),width=8,height=30)
######
q90<-quantile(qbacmicq$MIC,0.9,na.rm=TRUE)[[1]]
q95<-quantile(qbacmicq$MIC,0.95,na.rm=TRUE)[[1]]
q99<-quantile(qbacmicq$MIC,0.99,na.rm=TRUE)[[1]]
dist<-ggplot(qbacmicq,aes(x=MIC))+stat_ecdf()+ggtitle('Cumulative distribution of MIC values')+
geom_hline(yintercept=0.90,color='green',alpha=0.5,linetype='longdash')+
geom_vline(xintercept=q90,color='green',alpha=0.5,linetype='longdash')+
geom_hline(yintercept=0.95,color='blue',alpha=0.5,linetype='longdash')+
geom_vline(xintercept=q95,color='blue',alpha=0.5,linetype='longdash')+
geom_hline(yintercept=0.99,color='red',alpha=0.5,linetype='longdash')+
geom_vline(xintercept=q99,color='red',alpha=0.5,linetype='longdash')+
annotate("text", 1, 0.92, label = "90%",color='green')+
annotate("text", 1, 0.97, label = "95%",color='blue')+
annotate("text", 1, 1, label = "99%",color='red')+
scale_y_continuous(limits=c(0,1), labels = scales::percent,breaks=seq(0,1,by=0.1))+
scale_x_log10(breaks=c(0,2.5,5,10,20,30,40,50,75,100))+
ylab('')+xlab(expression(paste('MIC [5FU], ',mu,'M (log10 scaled)')))+theme(axis.text.x = element_text(angle = 90, hjust = 1))
dist
dev.copy2pdf(device=cairo_pdf,file=paste(odir,"/Cumulative_distribution_of_MIC_log10-x-scale.pdf",sep=''),width=9,height=9)
#dist<-dist+scale_x_continuous(breaks=seq(0,100,by=5))
write.csv(bacmic,paste(ddir,'/MICs_and_bacterial_growth-All.csv',sep=''))
write.csv(qbacmicq,paste(ddir,'/MICs_and_bacterial_growth-Clean.csv',sep=''))
#
# #PT lists
# PT_clean<-subset(qbacmicq,MIC>2.5 & !is.na(Gene))[,c('Gene','MIC')]
# PT_all<-subset(qbacmicq, !is.na(Gene) & !is.na(MIC) )[,c('Gene','MIC')]
# write.table(PT_clean,file='Data/Gene_MIC_above2.5_ForPT.tab',sep='\t',row.names = FALSE,quote=FALSE)
# write.table(PT_all,file='Data_Gene_MIC_all_ForPT.tab',sep='\t',row.names = FALSE,quote=FALSE)
|
8342f3affff711157928c249549215b77ef136e4
|
5713e52ef679c619afaf05f1e1ba6d46ece336ec
|
/plot1.R
|
75a13d75a402cae3a6da407fefd2c53f597cd26a
|
[] |
no_license
|
threeboys/ExData_Plotting1
|
731932d7c14b046269b8cda135a4c6525770fa14
|
bab24ab5f3d0c92bec21703dc2cdf66e819f5af3
|
refs/heads/master
| 2021-01-15T20:23:15.250350
| 2014-09-07T19:20:09
| 2014-09-07T19:20:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 671
|
r
|
plot1.R
|
########################################
# plot1.R
# - Drawing plot 1
# - Save plot1.png (Save it to a PNG file with a width of 480 pixels and a height of 480 pixels)
########################################
## locale set to english area
Sys.setlocale("LC_TIME", "C")
source ("./readTidyData.R")
preparedData <- readTidyData()
if (!file.exists("plotImages")) {
dir.create("plotImages")
}
png (filename = "./plotImages/plot1.png", width = 480, height = 480, units = "px")
hist (preparedData$Global_active_power,
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
col="red")
dev.off()
########### End of Document ############
|
006a6da5358eb0ca555424b1b2c71fb1149ff51e
|
63d97198709f3368d1c6d36739442efa699fe61d
|
/advanced algorithm/round3/k-server-analysis-master/data/tests/case054.rd
|
da217f5eed32f467a724ad08d2602ca8778b50bc
|
[] |
no_license
|
tawlas/master_2_school_projects
|
f6138d5ade91e924454b93dd8f4902ca5db6fd3c
|
03ce4847155432053d7883f3b5c2debe9fbe1f5f
|
refs/heads/master
| 2023-04-16T15:25:09.640859
| 2021-04-21T03:11:04
| 2021-04-21T03:11:04
| 360,009,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,443
|
rd
|
case054.rd
|
20
1 [18, 9, 17] 8 8 8 10 10
2 [18, 10, 17] 1 2 10 2 12
3 [18, 9, 17] 1 2 12 2 14
4 [18, 10, 17] 1 2 14 2 16
5 [18, 9, 17] 1 2 16 2 18
6 [18, 10, 17] 1 2 18 2 20
7 [18, 9, 17] 1 2 20 2 22
8 [18, 9, 10] 7 1 21 2 24
9 [3, 9, 10] 5 5 26 10 34
10 [4, 9, 10] 1 2 28 2 36
11 [3, 9, 10] 1 2 30 2 38
12 [4, 9, 10] 1 2 32 2 40
13 [3, 9, 10] 1 2 34 2 42
14 [4, 9, 10] 1 2 36 2 44
15 [3, 9, 10] 1 2 38 2 46
16 [4, 9, 10] 1 2 40 2 48
17 [3, 9, 10] 1 2 42 2 50
18 [4, 9, 10] 1 2 44 2 52
19 [3, 9, 10] 1 2 46 2 54
20 [3, 4, 10] 5 0 46 0 54
21 [3, 4, 16] 6 12 58 12 66
22 [3, 4, 17] 1 2 60 2 68
23 [3, 4, 16] 1 2 62 2 70
24 [3, 4, 17] 1 2 64 2 72
25 [3, 4, 16] 1 2 66 2 74
26 [3, 4, 17] 1 2 68 2 76
27 [3, 4, 16] 1 2 70 2 78
28 [3, 4, 17] 1 2 72 2 80
29 [3, 4, 16] 1 2 74 2 82
30 [3, 4, 17] 1 2 76 2 84
31 [3, 4, 16] 1 2 78 2 86
32 [17, 4, 16] 6 2 80 2 88
33 [17, 10, 16] 6 8 88 8 96
34 [17, 11, 16] 1 2 90 2 98
35 [17, 10, 16] 1 2 92 2 100
36 [17, 11, 16] 1 2 94 2 102
37 [17, 10, 16] 1 2 96 2 104
38 [17, 11, 16] 1 2 98 2 106
39 [17, 10, 16] 1 2 100 2 108
40 [17, 11, 16] 1 2 102 2 110
41 [17, 10, 16] 1 2 104 2 112
42 [17, 11, 16] 1 2 106 2 114
43 [17, 10, 16] 1 2 108 2 116
44 [17, 10, 11] 5 0 108 0 116
45 [3, 10, 11] 6 12 120 12 128
46 [4, 10, 11] 1 2 122 2 130
47 [3, 10, 11] 1 2 124 2 132
48 [4, 10, 11] 1 2 126 2 134
49 [3, 10, 11] 1 2 128 2 136
50 [4, 10, 11] 1 2 130 2 138
51 [3, 10, 11] 1 2 132 2 140
52 [4, 10, 11] 1 2 134 2 142
53 [3, 10, 11] 1 2 136 2 144
54 [4, 10, 11] 1 2 138 2 146
55 [3, 10, 11] 1 2 140 2 148
56 [4, 10, 11] 1 2 142 2 150
57 [3, 10, 11] 1 2 144 2 152
58 [3, 4, 11] 6 0 144 0 152
59 [3, 4, 17] 6 8 152 8 160
60 [3, 4, 18] 1 2 154 2 162
61 [3, 4, 17] 1 2 156 2 164
62 [3, 4, 18] 1 2 158 2 166
63 [3, 4, 17] 1 2 160 2 168
64 [3, 4, 18] 1 2 162 2 170
65 [3, 4, 17] 1 2 164 2 172
66 [3, 4, 18] 1 2 166 2 174
67 [3, 4, 17] 1 2 168 2 176
68 [18, 4, 17] 5 2 170 2 178
69 [18, 10, 17] 6 12 182 12 190
70 [18, 11, 17] 1 2 184 2 192
71 [18, 10, 17] 1 2 186 2 194
72 [18, 11, 17] 1 2 188 2 196
73 [18, 10, 17] 1 2 190 2 198
74 [18, 11, 17] 1 2 192 2 200
75 [18, 10, 17] 1 2 194 2 202
76 [18, 11, 17] 1 2 196 2 204
77 [18, 10, 17] 1 2 198 2 206
78 [18, 11, 17] 1 2 200 2 208
79 [18, 10, 17] 1 2 202 2 210
80 [18, 11, 17] 1 2 204 2 212
81 [18, 10, 17] 1 2 206 2 214
82 [18, 10, 11] 6 0 206 0 214
83 [4, 10, 11] 6 8 214 8 222
84 [5, 10, 11] 1 2 216 2 224
85 [4, 10, 11] 1 2 218 2 226
86 [5, 10, 11] 1 2 220 2 228
87 [4, 10, 11] 1 2 222 2 230
88 [5, 10, 11] 1 2 224 2 232
89 [4, 10, 11] 1 2 226 2 234
90 [5, 10, 11] 1 2 228 2 236
91 [4, 10, 11] 1 2 230 2 238
92 [5, 10, 11] 1 2 232 2 240
93 [4, 10, 11] 1 2 234 2 242
94 [4, 5, 11] 5 0 234 0 242
95 [4, 5, 17] 6 12 246 12 254
96 [4, 5, 18] 1 2 248 2 256
97 [4, 5, 17] 1 2 250 2 258
98 [4, 5, 18] 1 2 252 2 260
99 [4, 5, 17] 1 2 254 2 262
100 [4, 5, 18] 1 2 256 2 264
101 [4, 5, 17] 1 2 258 2 266
102 [4, 5, 18] 1 2 260 2 268
103 [4, 5, 17] 1 2 262 2 270
104 [4, 5, 18] 1 2 264 2 272
105 [4, 5, 17] 1 2 266 2 274
106 [18, 5, 17] 6 2 268 2 276
107 [18, 11, 17] 6 8 276 8 284
108 [18, 12, 17] 1 2 278 2 286
109 [18, 11, 17] 1 2 280 2 288
110 [18, 12, 17] 1 2 282 2 290
111 [18, 11, 17] 1 2 284 2 292
112 [18, 12, 17] 1 2 286 2 294
113 [18, 11, 17] 1 2 288 2 296
114 [18, 12, 17] 1 2 290 2 298
115 [18, 11, 17] 1 2 292 2 300
116 [18, 12, 17] 1 2 294 2 302
117 [18, 11, 17] 1 2 296 2 304
118 [18, 11, 12] 5 0 296 0 304
119 [4, 11, 12] 6 12 308 12 316
120 [5, 11, 12] 1 2 310 2 318
121 [4, 11, 12] 1 2 312 2 320
122 [5, 11, 12] 1 2 314 2 322
123 [4, 11, 12] 1 2 316 2 324
124 [5, 11, 12] 1 2 318 2 326
125 [4, 11, 12] 1 2 320 2 328
126 [5, 11, 12] 1 2 322 2 330
127 [4, 11, 12] 1 2 324 2 332
128 [5, 11, 12] 1 2 326 2 334
129 [4, 11, 12] 1 2 328 2 336
130 [5, 11, 12] 1 2 330 2 338
131 [4, 11, 12] 1 2 332 2 340
132 [4, 5, 12] 6 0 332 0 340
133 [4, 5, 18] 6 8 340 8 348
134 [4, 5, 19] 1 2 342 2 350
135 [4, 5, 18] 1 2 344 2 352
136 [4, 5, 19] 1 2 346 2 354
137 [4, 5, 18] 1 2 348 2 356
138 [4, 5, 19] 1 2 350 2 358
139 [4, 5, 18] 1 2 352 2 360
140 [4, 5, 19] 1 2 354 2 362
141 [4, 5, 18] 1 2 356 2 364
142 [19, 5, 18] 5 2 358 2 366
143 [19, 11, 18] 6 12 370 12 378
144 [19, 12, 18] 1 2 372 2 380
145 [19, 11, 18] 1 2 374 2 382
146 [19, 12, 18] 1 2 376 2 384
147 [19, 11, 18] 1 2 378 2 386
148 [19, 12, 18] 1 2 380 2 388
149 [19, 11, 18] 1 2 382 2 390
150 [19, 12, 18] 1 2 384 2 392
151 [19, 11, 18] 1 2 386 2 394
152 [19, 12, 18] 1 2 388 2 396
153 [19, 11, 18] 1 2 390 2 398
154 [19, 12, 18] 1 2 392 2 400
155 [19, 11, 18] 1 2 394 2 402
156 [19, 11, 12] 6 0 394 0 402
157 [5, 11, 12] 6 8 402 8 410
158 [6, 11, 12] 1 2 404 2 412
159 [5, 11, 12] 1 2 406 2 414
160 [6, 11, 12] 1 2 408 2 416
161 [5, 11, 12] 1 2 410 2 418
162 [6, 11, 12] 1 2 412 2 420
163 [5, 11, 12] 1 2 414 2 422
164 [6, 11, 12] 1 2 416 2 424
165 [5, 11, 12] 1 2 418 2 426
166 [6, 11, 12] 1 2 420 2 428
167 [5, 11, 12] 1 2 422 2 430
168 [5, 6, 12] 5 0 422 0 430
169 [5, 6, 18] 6 12 434 12 442
170 [5, 6, 19] 1 2 436 2 444
171 [5, 6, 18] 1 2 438 2 446
172 [5, 6, 19] 1 2 440 2 448
173 [5, 6, 18] 1 2 442 2 450
174 [5, 6, 19] 1 2 444 2 452
175 [5, 6, 18] 1 2 446 2 454
176 [5, 6, 19] 1 2 448 2 456
177 [5, 6, 18] 1 2 450 2 458
178 [5, 6, 19] 1 2 452 2 460
179 [5, 6, 18] 1 2 454 2 462
180 [19, 6, 18] 6 2 456 2 464
181 [19, 12, 18] 6 8 464 8 472
182 [19, 13, 18] 1 2 466 2 474
183 [19, 12, 18] 1 2 468 2 476
184 [19, 13, 18] 1 2 470 2 478
185 [19, 12, 18] 1 2 472 2 480
186 [19, 13, 18] 1 2 474 2 482
187 [19, 12, 18] 1 2 476 2 484
188 [19, 13, 18] 1 2 478 2 486
189 [19, 12, 18] 1 2 480 2 488
190 [19, 13, 18] 1 2 482 2 490
191 [19, 12, 18] 1 2 484 2 492
192 [19, 12, 13] 5 0 484 0 492
193 [5, 12, 13] 6 12 496 12 504
194 [6, 12, 13] 1 2 498 2 506
195 [5, 12, 13] 1 2 500 2 508
196 [6, 12, 13] 1 2 502 2 510
197 [5, 12, 13] 1 2 504 2 512
198 [6, 12, 13] 1 2 506 2 514
199 [5, 12, 13] 1 2 508 2 516
200 [6, 12, 13] 1 2 510 2 518
201 [5, 12, 13] 1 2 512 2 520
202 [6, 12, 13] 1 2 514 2 522
203 [5, 12, 13] 1 2 516 2 524
204 [6, 12, 13] 1 2 518 2 526
205 [5, 12, 13] 1 2 520 2 528
206 [5, 6, 13] 6 0 520 0 528
207 [5, 6, 19] 6 8 528 8 536
208 [5, 6, 0] 1 2 530 2 538
209 [5, 6, 19] 1 2 532 2 540
210 [5, 6, 0] 1 2 534 2 542
211 [5, 6, 19] 1 2 536 2 544
212 [5, 6, 0] 1 2 538 2 546
213 [5, 6, 19] 1 2 540 2 548
214 [5, 6, 0] 1 2 542 2 550
215 [5, 6, 19] 1 2 544 2 552
216 [0, 6, 19] 5 2 546 2 554
217 [0, 12, 19] 6 12 558 12 566
218 [0, 13, 19] 1 2 560 2 568
219 [0, 12, 19] 1 2 562 2 570
220 [0, 13, 19] 1 2 564 2 572
221 [0, 12, 19] 1 2 566 2 574
222 [0, 13, 19] 1 2 568 2 576
223 [0, 12, 19] 1 2 570 2 578
224 [0, 13, 19] 1 2 572 2 580
225 [0, 12, 19] 1 2 574 2 582
226 [0, 13, 19] 1 2 576 2 584
227 [0, 12, 19] 1 2 578 2 586
228 [0, 13, 19] 1 2 580 2 588
229 [0, 12, 19] 1 2 582 2 590
230 [0, 12, 13] 6 0 582 0 590
231 [6, 12, 13] 6 8 590 8 598
232 [7, 12, 13] 1 2 592 2 600
233 [6, 12, 13] 1 2 594 2 602
234 [7, 12, 13] 1 2 596 2 604
235 [6, 12, 13] 1 2 598 2 606
236 [7, 12, 13] 1 2 600 2 608
237 [6, 12, 13] 1 2 602 2 610
238 [7, 12, 13] 1 2 604 2 612
239 [6, 12, 13] 1 2 606 2 614
240 [7, 12, 13] 1 2 608 2 616
241 [6, 12, 13] 1 2 610 2 618
242 [6, 7, 13] 5 0 610 0 618
243 [6, 7, 19] 6 12 622 12 630
244 [6, 7, 0] 1 2 624 2 632
245 [6, 7, 19] 1 2 626 2 634
246 [6, 7, 0] 1 2 628 2 636
247 [6, 7, 19] 1 2 630 2 638
248 [6, 7, 0] 1 2 632 2 640
249 [6, 7, 19] 1 2 634 2 642
250 [6, 7, 0] 1 2 636 2 644
251 [6, 7, 19] 1 2 638 2 646
252 [6, 7, 0] 1 2 640 2 648
253 [6, 7, 19] 1 2 642 2 650
254 [0, 7, 19] 6 2 644 2 652
644 652 556
|
5c3896d7779e4f11ad84611e00772bae9fa6ee5f
|
b8d9444434a0ed37cce66230515b5e9436db53b7
|
/hw4/do-stepcv.R
|
d1d4ff34458ae2bea124e3273b15a03ccb7b673b
|
[] |
no_license
|
zhiminwu29/machine_learning_2021S
|
38170f9090bd6d394e4999d4a007f7a0a3e2e2fd
|
ad003ebd19704565a4f190332a206741b2b2a9bc
|
refs/heads/master
| 2023-04-13T18:48:48.661078
| 2021-04-30T20:10:28
| 2021-04-30T20:10:28
| 340,241,300
| 0
| 1
| null | 2021-04-29T19:38:45
| 2021-02-19T02:53:11
|
HTML
|
UTF-8
|
R
| false
| false
| 2,003
|
r
|
do-stepcv.R
|
library(MASS)
## function to do cv with stepAIC
stepcv = function(ddf,yind,xind,fullform,folds,nstep) {
##function to extract sse using stepAIC
keepf = function(mod,maic) {
yhat = predict(mod,xpred)
return(sum((yhat-ypred)^2))
}
##null model
nullform = as.formula(paste(names(ddf)[yind],"~1"))
##loop over folds
nf = length(unique(folds)) #number of folds
ssemat = matrix(0,nstep+1,nf)
for(i in 1:nf) {
cat("in stepcv on fold: ",i,"\n")
ypred = ddf[(folds==i),yind]
xpred = ddf[(folds==i),xind,drop=FALSE]
n = nrow(ddf)
nullmod=lm(nullform,ddf[!(folds==i),])
fwd = stepAIC(nullmod,scope=fullform,direction="forward",
k=log(n),trace=0,keep=keepf,steps = nstep)
ssemat[,i]=as.double(fwd$keep)
}
return(ssemat)
}
## set up fold id
getfolds = function(nfold,n,dorand=TRUE) {
fs = floor(n/nfold) # fold size
fid = rep(1:nfold,rep(fs,nfold))
diff = n-length(fid)
if(diff>0) fid=c(1:diff,fid)
if(dorand) fid = sample(fid,n)
return(fid)
}
############################################################
if(0) {cat("### simulate data\n")
#simulate data
set.seed(66) # a good seed!!
nsim=1000
psim=20
xsim = matrix(rnorm(nsim*psim),ncol=psim)
bsim=rep(0,psim);bsim[1:3]=1:3
sigma=10.0
ftrue = xsim %*% matrix(bsim,ncol=1)
ysim = ftrue + sigma*rnorm(nsim)
ddfsim = data.frame(y=ysim,x=xsim)
lmall = lm(y~.,ddfsim)
print(summary(lmall))
}
############################################################
if(0) {cat("### try stepcv on the simulate data\n")
lmall = lm(y~.,ddfsim)
nstep=20
set.seed(99)
fid = getfolds(10,nsim)
fcvsim = stepcv(ddfsim,1,2:21,formula(lmall),fid,nstep)
rmse = sqrt(apply(fcvsim,1,sum)/nsim)
shat = summary(lmall)$sigma
yrg = range(c(sigma,shat,rmse))
plot(1:(nrow(fcvsim)-1),rmse[-1], type="b",col="magenta",xlab="numvar",
ylab="rmse",cex.axis=1.5,cex.lab=1.5,ylim=yrg)
abline(h=sigma,col="blue",lwd=2)
abline(h=shat,col="red",lwd=2)
}
|
5fd52ca7eac35a20a617f2098613d9f8cd1a11d7
|
edde3e8b8427fa4802ff9462e2d92e8eedc5ce00
|
/man/create_lh_list.Rd
|
d61dc0208bd19c21505c5b7553cd8e129dcb1001
|
[
"MIT"
] |
permissive
|
Henning-Winker/LIME
|
b0b8933532dbbcf557620e3eabc070483264f94c
|
9dcfc7f7d5f56f280767c6900972de94dd1fea3b
|
refs/heads/master
| 2022-11-08T20:23:44.288265
| 2020-06-18T18:23:55
| 2020-06-18T18:23:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,444
|
rd
|
create_lh_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_lh_list.R
\name{create_lh_list}
\alias{create_lh_list}
\title{Create new life history list}
\usage{
create_lh_list(
vbk,
linf,
lwa,
lwb,
S50,
M50,
S95 = NULL,
M95 = NULL,
Sslope = NULL,
Mslope = NULL,
selex_input = "length",
maturity_input = "length",
selex_type = "logistic",
dome_sd = NULL,
binwidth = 1,
t0 = -0.01,
CVlen = 0.1,
SigmaC = 0.001,
SigmaI = 0.001,
SigmaR = 0.737,
SigmaF = 0.2,
R0 = 1,
h = 1,
qcoef = 1e-05,
M = NULL,
AgeMax = NULL,
Fequil = 0.5,
Frate = 0.2,
start_ages = 0,
rho = 0,
theta = 1,
nseasons = 1,
nfleets = 1
)
}
\arguments{
\item{vbk}{von Bertalanffy k Brody growth coefficient}
\item{linf}{von Bertalanffy Linf asymptotic length}
\item{lwa}{length-weight scaling parameter}
\item{lwb}{length-weight allometric parameter}
\item{S50}{starting value for age or length at 50 percent selectivity (will be estimated in LIME method) -- can be vector for multiple fleets}
\item{M50}{age or length at 50 percent maturity}
\item{S95}{default=NULL for one-parameter logistic model; starting value for age or length at 95 percent selectivity -- can be vector for multiple fleets}
\item{M95}{default=NULL for one-parameter logistic model; age or length at 50 percent maturity}
\item{Sslope}{default=NULL, option to specify slope of logistic curve for length-at-selectivity -- can be vector for multiple fleets}
\item{Mslope}{default=NULL option to specify slope of logistic curve for length-at-maturity}
\item{selex_input}{specify whether argument S50 is an age or a length (default length)}
\item{maturity_input}{specify whether argument M50 is an age or a length (default length)}
\item{selex_type}{default="logistic" for 1-parameter logistic selex, alternate="dome" for dome-shaped selectivity and must specify dome-params LV and RV. -- can be vector for multiple fleets}
\item{dome_sd}{standard deviation of normal distribution to the right side of the fully selected age/length -- can be vector for multiple fleets}
\item{binwidth}{width of length bins (default = 1)}
\item{t0}{theoretical age at length=0 (default = -0.01); avoid fixing to zero due to some issues with the first age/length bin}
\item{CVlen}{CV of the growth curve (default = 0.1)}
\item{SigmaC}{standard deviation - observation error of catch data (default = 0.2)}
\item{SigmaI}{standard deviation - observation error of index data (default = 0.2)}
\item{SigmaR}{standard deviation - process error for recruitment time series (default = 0.6 -- starting value, will be estimated)}
\item{SigmaF}{standard deviation - process error for fishing mortality time series (default = 0.3) -- can be vector for multiple fleets}
\item{R0}{equilibrium recruitment (default = 1); when no information on scale is available, will estimate relative deviations around equilibrium 1}
\item{h}{steepness parameter (default = 1)}
\item{qcoef}{starting value for catchability coefficient (when index data is available, default = 1e-5) -- can be vector for multiple fleets}
\item{M}{value for natural mortality if there has been a study (default = NULL, calculated internally from vbk)}
\item{AgeMax}{option to specify maximum age; default=NULL will calculate as the age at which 1 percent of individuals are left in the unfished condition}
\item{Fequil}{equilibrium fishing mortality rate (used for simulation; default=0.2) -- can be vector for multiple fleets}
\item{Frate}{parameter used to simulate fishing moratality time series (default=NULL) -- can be vector for multiple fleets}
\item{start_ages}{age to start (either 0 or 1; default = 0)}
\item{rho}{first-order autocorrelation in recruitment residuals parameter, default=0 (recruitment not autocorrelated)}
\item{theta}{dirichlet-multinomial parameter related to effective sample size. default to 10, will not be used if length frequency distribution LFdist is set to multinomial (0). Only used if distribution is dirichlet-multinomial (LFdist=1)}
\item{nseasons}{specify number of sub-time periods per year; default=1 (instantaneous sampling)}
\item{nfleets}{specify number of fleets - fleet-specific parameters can be length nfleets, or shared by specifying only one number}
}
\value{
List, a tagged list of life history traits
}
\description{
\code{create_lh_list} Creates list of life history information
}
\author{
M.B. Rudd
}
|
2dbc47b86e62c690959e76880991e2b9bb04cc1c
|
49679b97305617476aa1acd685ae31e0c7fadb87
|
/R functions/CStask.r
|
fe594222cfada1165e57d21e5b9b0b5590a1b5a6
|
[] |
no_license
|
mvegavillar/Accumbens-Rew-learning
|
2541e07dc6e93f7ea1b39516f783f75f97470a20
|
be221cf5777ec62365927213c613bc9dd6066664
|
refs/heads/master
| 2020-05-24T11:19:13.151823
| 2019-07-09T17:01:57
| 2019-07-09T17:01:57
| 187,246,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 423
|
r
|
CStask.r
|
CStask.Rfunc=function(bigoutelement){
out = list(info = bigoutelement$info)
out$receptacleentries = bigoutelement$w
out$receptacleexits = bigoutelement$x
out$CSminuscue = bigoutelement$t
out$CSpluscue = bigoutelement$s
out$laseron = bigoutelement$y
out$rewarddelivery = bigoutelement$u
return(out)
}
save(CStask.Rfunc,file="C:/Users/Mercedes/Desktop/OneDrive - Cuny GradCenter/R functions/CStask.Rfunc")
|
ef9902d6ed0955511f4c7fd1736edef0d9d3c698
|
339f89e5a10c958859e2ffd369892d6673a38eb3
|
/Code/SVM/helper_radial.R
|
fc2ac624e25c6129cf9d2b34fe70f2796f8fa533
|
[] |
no_license
|
pvn25/Hamlet_Extension
|
4f7d0e718603a3d9ff638a897ba93fd7d92d562e
|
3f5f9fe4ef087385b637552b8ec60be805851648
|
refs/heads/master
| 2021-01-11T04:39:37.979467
| 2020-09-14T03:36:28
| 2020-09-14T03:36:28
| 71,120,697
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,606
|
r
|
helper_radial.R
|
#Copyright 2017 Vraj Shah, Arun Kumar
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
library(e1071)
co = 10
ga = 0.01
myfilter_radial <- function(attributes, targetcolname, traindata, testdata, errormetric, classes=7) {
numfeats = length(attributes);
if(numfeats == 0)
stop("attributes not specified!");
#get all nested subsets of topK - drop features from the head
newfeatvecs = list(attributes)
for (i in 1:(numfeats - 1)) {
newfeatvecs = append(newfeatvecs, list(attributes[-(1:i)]))
}
print("number of nested subsets to evaluate:")
print(length(newfeatvecs))
#evaluate each set and obtain accuracies
acclist = rep(0, length(newfeatvecs));
for (f in 1:length(newfeatvecs)) {
tpt = proc.time()
thisnb <- svm(weekly_sales ~., data = traindata[,c("weekly_sales",newfeatvecs[[f]])], cost = co, gamma = ga, kernel= "radial", cachesize = 40000)
svm.predicc <- predict(thisnb, testdata[,newfeatvecs[[f]], drop = FALSE])
newtab <- table(pred = svm.predicc, true = testdata[,targetcolname])
newacc = geterr(newtab, errormetric, nrow(testdata), classes);
acclist[f] = newacc; #R vector indices start from 1
print("runtime taken for this set")
print(proc.time() - tpt)
print("computed accuracy of nested subset:")
print(newacc)
print(newfeatvecs[[f]])
}
bestacc = max(acclist);
bestind = which.max(acclist);
bestvec = newfeatvecs[[bestind]];
print("best accuracy and feature set overall:")
print(bestacc)
print(bestvec)
return(bestvec)
}
geterr <- function(fulltab, errormetric, nexamples, classes) {
fullacc = -1000;
if(errormetric == 'RMSE') {
w = fulltab + t(fulltab);
fsum = 0;
for(c in 1:(classes - 1)) {
er = c*c;
for(l in 1:(classes - c)) {
fsum = fsum + w[l, (l + c)] * er
}
}
fullacc = sqrt(fsum/nexamples)
fullacc = -fullacc; #rmse sign is inverted to ensure the max is selected
}
else if(errormetric == '01') {
fullacc = sum(diag(fulltab))/nexamples;
}
else {
print ("Unrecognized error metric:")
print(errormetric)
}
return (fullacc);
}
|
8ab42d6807ad521c8dd14f82869e699f92a841eb
|
c457b4fa4fa50b1767f4766940f42f78e52087bd
|
/ecosystems/Thomas.R
|
5041ff01cdbabe4846e5dd6cb5c9c8b9d8466322
|
[] |
no_license
|
alanponce/ESEUR-code-data
|
f6efefbe63947103868d79c05149077b8353eedf
|
9573de006d4327f3ee1bfc823490809cc23e8db8
|
refs/heads/master
| 2020-04-08T10:11:35.567668
| 2018-11-24T22:49:02
| 2018-11-24T22:49:02
| 159,258,259
| 1
| 0
| null | 2018-11-27T01:38:36
| 2018-11-27T01:38:35
| null |
UTF-8
|
R
| false
| false
| 1,367
|
r
|
Thomas.R
|
#
# Thomas.R, 17 Mar 18
# Data from:
# Security metrics for computer systems
# Daniel R. Thomas
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("diagram")
plot_layout(1, 1, default_width=ESEUR_default_width+1,
default_height=ESEUR_default_height+3)
elpos=coordinates (c(4, 2, 3, 1, 2, 1))
# Nodes appear left to right, starting top left, finish bottom right
names=c(" ", "OpenSSL ", " BouncyCastle\n(176)", " ",
"Other\nprojects", "Linux",
" ", "Google", "Hardware\ndeveloper",
"Device manufacturer\n(402)",
" ", "Network operator\n(1,650)",
"Device\n(24,600)")
M=matrix(data=0, nrow=length(names), ncol=length(names))
colnames(M)=names
rownames(M)=names
#
M["OpenSSL ", "Google"]="52"; M[" BouncyCastle\n(176)", "Google"]="6"
M["Other\nprojects", "Google"]=""; M["Linux", "Google"]="602"
M["Google", "Device manufacturer\n(402)"]="30"; M["Hardware\ndeveloper", "Device manufacturer\n(402)"]=""
M["Hardware\ndeveloper", "Linux"]=""
M["Device manufacturer\n(402)", "Network operator\n(1,650)"]=""
M["Device manufacturer\n(402)", "Device\n(24,600)"]=""
M["Network operator\n(1,650)", "Device\n(24,600)"]="1650"
plotmat(t(M), pos=elpos, lwd=1, arr.lcol="green", arr.pos=0.6, arr.length=0.15, cex=1.2,
box.lcol="white", box.prop=0.5, box.size=0.05, box.cex=1.2, shadow.size=0)
|
7c750290ec4c7075f3d0e0ff2384d567272857a5
|
2a2ca3b3c603e39af268a7964062199a8120b44c
|
/fxrPrototypes/picCorrelation.R
|
7a5a1e112c7384ea58e5440b2d434c0c89a06a1d
|
[] |
no_license
|
arborworkflows/arborCollections
|
0953ce7634b17adb696fc19c755553145bee2d31
|
2f301d6c3946d4992dfeb2ec436e7c9bdbfd9ff5
|
refs/heads/master
| 2021-01-11T10:03:47.664769
| 2018-09-25T19:33:00
| 2018-09-25T19:33:00
| 35,436,091
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 950
|
r
|
picCorrelation.R
|
library(geiger)
library(aRbor)
table <-
read.csv("/home/lukeh/Documents/arborCollectionsWeb/assets/anolis.csv", row.names=1)
ind_variable<-"SVL"
dep_variable<-"awesomeness"
tree<-read.tree("/home/lukeh/Documents/arborCollectionsWeb/assets/anolis.phy")
plotPICs<-T
# Match tree and tip data
td<-make.treedata(phy, table)
phy<-td$phy
# get x and y data with names
# would be better to have an aRbor function that takes td directly?
x <- select_(td, ind_variable)$dat[[1]]
names(x)<-td$phy$tip.label
y <- select_(td, dep_variable)$dat[[1]]
names(y)<-td$phy$tip.label
# calculate independent contrasts
picX <- pic(x, tree)
picY <- pic(y, tree)
# run regression forced through the origin
res <- lm(picY~picX-1)
output <- anova(res)
# coerce into table
tableResults<-cbind(c(dep_variable, "Residuals"), output[,1:5])
colnames(tableResults)[1]<-"Effect"
# tableResults are the final results
if(plotPICs) {
plot(picX, picY)
abline(res)
}
|
3c61f2c4fcfff9f264bd6f1661f3eee3e1f43fbc
|
3d287de4d79b321e5112ddff1d02fce33841bb5f
|
/cachematrix.R
|
27d32002f94e3b39429e263059dc8648fb596ca1
|
[] |
no_license
|
subrata143/ProgrammingAssignment2
|
f89c1cca9a8ce7af49efa950a5062c0c78b6fa59
|
922303bff04cd6c58ce380050cc97c0d5219393d
|
refs/heads/master
| 2021-01-15T23:28:27.576018
| 2014-08-24T01:25:44
| 2014-08-24T01:25:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,244
|
r
|
cachematrix.R
|
## This program contains two functions the 1st one chaches the inverse of a matrix and the second one inverses a matrix if not available in cache. Note that only square matrix can be used.
makeCacheMatrix <- function(x = numeric()) {
mycache <- NULL ## initially cache is set to NULL
# store a matrix
setMatrix <- function(newValue) {
mymatrix <<- newValue
mycache <<- NULL
}
# returns matrix
getMatrix <- function() {
mymatrix
}
# cache the argument
cacheInverse <- function(solve) {
mycache <<- solve
}
# get the cached value
getInverse <- function() {
mycache
}
# return the list of functions
list(setMatrix = setMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
#function calculates the inverse of matrix created with makeCacheMatrix
cacheSolve <- function(y, ...) {
# get the cached value if available
inverse <- y$getInverse()
# if a cached value exists then return it
if(!is.null(inverse)) {
message("Results from cache")
return(inverse)
}
# otherwise caclulate the inverse and store it inthe cache
data <- y$getMatrix()
inverse <- solve(data)
y$cacheInverse(inverse)
# return inverse
inverse
}
|
3960195acf241b66a1e1e39eddc7fec950947d96
|
851dfbed249e9672f0f8588f6b75b2fe2743a576
|
/Wk11_Workshop_NotesAndCode.R
|
f7d67f25f8d7967f36bfc7709900055d9bc9863b
|
[] |
no_license
|
durfey/MSCA_BayesianMethods_class
|
9a3f57896ad5284dbd4c56c5df2726a15c3d8bf2
|
38329bced4af80a6da8694f9406e21ae119224f0
|
refs/heads/master
| 2021-01-19T01:02:08.809158
| 2016-07-01T23:39:16
| 2016-07-01T23:39:16
| 62,424,823
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,319
|
r
|
Wk11_Workshop_NotesAndCode.R
|
#################################
# Week 11 Workshop Notes & Code #
#################################
library(rstan)
source('//Users/rdurfey/R_misc/BayesianMethods/DBDA2Eprograms/DBDA2E-utilities.R')
##############################
# 1. ANOVA in Bayesian Setup #
##############################
modelString<-"
data {
int<lower=1> Ntotal;
vector[Ntotal] y;
int<lower=2> Nx1Lvl;
int<lower=2> Nx2Lvl;
int<lower=1, upper=Nx1Lvl> x1[Ntotal];
int<lower=1, upper=Nx2Lvl> x2[Ntotal];
real<lower=0> agammaShRa[2];
}
transformed data {
real meanY;
real sdY;
vector[Ntotal] zy;
meanY <- mean(y);
sdY <- sd(y);
zy <- (y - mean(y)) / sdY; // center & normalize
}
parameters {
real a0;
real<lower=0> a1Sigma;
real<lower=0> a2Sigma;
real<lower=0> a1a2Sigma;
vector[Nx1Lvl] a1;
vector[Nx2Lvl] a2;
matrix[Nx1Lvl,Nx2Lvl] a1a2;
real<lower=0> zySigma;
}
model {
a0 ~ normal(0, 1);
a1Sigma ~ gamma(agammaShRa[1], agammaShRa[2]);
a1 ~ normal(0, a1Sigma);
a2Sigma ~ gamma(agammaShRa[1], agammaShRa[2]);
a2 ~ normal(0, a2Sigma);
a1a2Sigma ~ gamma(agammaShRa[1], agammaShRa[2]);
for (j1 in 1:Nx1Lvl) {
a1a2[j1,] ~ normal(0, a1a2Sigma);
}
zySigma ~ uniform(1.0/10, 10);
for ( i in 1:Ntotal ) {
zy[i] ~ normal(a0 + a1[x1[i]] + a2[x2[i]]+ a1a2[x1[i],x2[i]], zySigma);
}
}
generated quantities {
// Convert a to sum-to-zero b :
real b0;
vector[Nx1Lvl] b1;
vector[Nx2Lvl] b2;
matrix[Nx1Lvl,Nx2Lvl] b1b2;
matrix[Nx1Lvl,Nx2Lvl] m;
real<lower=0> b1Sigma;
real<lower=0> b2Sigma;
real<lower=0> b1b2Sigma;
real<lower=0> ySigma;
for ( j1 in 1:Nx1Lvl ) { for ( j2 in 1:Nx2Lvl ) {
m[j1,j2] <- a0 + a1[j1] + a2[j2] + a1a2[j1,j2]; // cell means
} }
b0 <- mean(m);
for ( j1 in 1:Nx1Lvl ) { b1[j1] <- mean( m[j1,] ) - b0; }
for ( j2 in 1:Nx2Lvl ) { b2[j2] <- mean( m[,j2] ) - b0; }
for ( j1 in 1:Nx1Lvl ) { for ( j2 in 1:Nx2Lvl ) {
b1b2[j1,j2] <- m[j1,j2] - ( b0 + b1[j1] + b2[j2] );
} }
// transform to original scale:
b0 <- meanY + sdY * b0;
b1 <- sdY * b1;
b2 <- sdY * b2;
b1b2 <- sdY * b1b2;
b1Sigma <- sdY * a1Sigma;
b2Sigma <- sdY * a2Sigma;
b1b2Sigma <- sdY * a1a2Sigma;
ySigma <- sdY * zySigma;
}"
# create DSO
stanDsoANOVA2Way<-stan_model( model_code=modelString )
# 20_1: Metric Predicted Variable with Two Nominal Predictors
# load data from 'Salary.csv' (see Kruschke)
mydf = read.csv("//Users/rdurfey/R_misc/BayesianMethods/Week11/Salary.csv")
mean(mydf$Salary)
head(mydf)
dim(mydf)
colnames(mydf)
table(mydf$Pos)
table(mydf$Org)
length(table(mydf$Org))
# the output will be salary
y <- mydf$Salary;
x1 <- mydf$Pos;
x2 <- mydf$Org;
dataListSalary<-list(Ntotal=length(y),
y=y,
x1=as.integer(x1),
x2=as.integer(x2),
Nx1Lvl=nlevels(x1),
Nx2Lvl=nlevels(x2),
agammaShRa=unlist( gammaShRaFromModeSD(mode=1/2, sd=2) ))
# Create names of variables and their interactions for further reference.
namesPos<-names(table(mydf$Pos))
namesOrg<-names(table(mydf$Org))
as.vector(outer(1:4,1:2,paste,sep="-"))
namesInter<-as.vector(outer(namesOrg,namesPos,paste,sep="-"))
varNames<-c("Intercept",namesPos,namesOrg,namesInter,rep("Var",5))
# Run MCMC
# fit model
fit <- sampling (stanDsoANOVA2Way,
data=dataListSalary,
pars=c('b0',
'b1',
'b2',
'b1b2',
'b1Sigma',
'b2Sigma',
'b1b2Sigma',
'ySigma'),
iter=5000, chains = 2, cores = 2
)
# Check the results in shinystan.
library(shinystan)
launch_shinystan(fit)
# Create results including mean value, 2.5%, 50% and 97.5% quantiles.
# Add variable names as row names.
SalaryResults<-summary(fit)$summary[,c(1,4,6,8)]
varNames[nrow(SalaryResults)-(4:0)]<-rownames(SalaryResults)[nrow(SalaryResults)-(4:0)]
rownames(SalaryResults)<-varNames
SalaryResults
plot(fit,pars=c("b1"))
plot(fit,pars=c('b2'))
plot(fit,pars=c("b1b2"))
# Extract chains for the position variables.
fit_ext <- rstan::extract(fit)
names(fit_ext)
fit_ext.b1<-fit_ext$b1
colnames(fit_ext.b1)<-namesPos
head(fit_ext.b1)
# Extract chains for the department variables.
fit_ext.b2<-fit_ext$b2
colnames(fit_ext.b2)<-namesOrg
head(fit_ext.b2)
# Extract chains for interaction variables.
fit_ext.b1.b2<-fit_ext$b1b2
dim(fit_ext.b1.b2)
dimnames(fit_ext.b1.b2)[[2]]<-namesPos
dimnames(fit_ext.b1.b2)[[3]]<-namesOrg
dimnames(fit_ext.b1.b2)
fit_ext.b1.b2[1,,]
#############
# EXERCISES #
#############
# 1. Use contrasts to compare salaries at Business and Finance with Physics and with Chemistry departments.
contrast_BFIN_PHYS <- fit_ext.b1.b2[,,"BFIN"] - fit_ext.b1.b2[,,"PHYS"]
plot(contrast_BFIN_PHYS)
hist(contrast_BFIN_PHYS)
contrast_BFIN_CHEM <- fit_ext.b1.b2[,,"BFIN"] - fit_ext.b1.b2[,,"CHEM"]
plot(contrast_BFIN_CHEM)
hist(contrast_BFIN_CHEM)
# 2. Use contrasts to compare salaries of Endowment full Professor and Distinguished Full professor.
contrast_NDW_DST <- fit_ext.b1.b2[,"NDW",] - fit_ext.b1.b2[,"DST",]
plot(contrast_NDW_DST)
hist(contrast_NDW_DST)
# conclusion: they make about the same salaries
# 3. Use contrasts to compare salaries spreads between Full Professor and Assistant Professor at Physics Department and at Chemistry Department.
contrast_FT1_FT3_PHYS <- fit_ext.b1.b2[,"FT1","PHYS"] - fit_ext.b1.b2[,"FT3","PHYS"]
plot(contrast_FT1_FT3_PHYS)
hist(contrast_FT1_FT3_PHYS)
contrast_FT1_FT3_CHEM <- fit_ext.b1.b2[,"FT1","CHEM"] - fit_ext.b1.b2[,"FT3","CHEM"]
plot(contrast_FT1_FT3_CHEM)
hist(contrast_FT1_FT3_CHEM)
# note: we think FT1 is full prof and FT3 is assist. prof
# conclusion: full prof makes more. duh.
# 4. Analyze contrasts for comparison of salary spreads between the departments of Physics and Chemistry.
contrast_contrasts<-contrast_FT1_FT3_PHYS - contrast_FT1_FT3_CHEM
hist(contrast_contrasts)
# conclusion: CHEM has a higher spread of salaries
##############################################################################
# 2. Understanding the effect of scaling and transformations on interactions #
##############################################################################
# Nonlinear transformations may affect interactions very significantly.
# Illustrate it on a simple simulated example.
mean00<-1
mean10<-3
mean01<-4
mean11<-6
y00<-rnorm(5,mean00,.1)
y10<-rnorm(5,mean10,.1)
y01<-rnorm(5,mean01,.1)
y11<-rnorm(5,mean11,.1)
# Plot the effects. If the lines are parallel the effects are additive.
plot(c(0,1),c(mean(y00),mean(y10)),type="b",ylim=c(1,8),col="darkgreen",lwd=3,ylab="Response",xlab="Predictor 1")
lines(c(0,1),c(mean(y01),mean(y11)),type="b",col="lightblue",lwd=3)
legend("topleft",legend=c("Predictor2 at 0","Predictor2 at 1"),lty=1,lwd=3,col=c("darkgreen","lightblue"))
# plot shows no interaction
# Taking exponent of the same data introduces significant interaction.
plot(c(0,1),c(mean(exp(y00)),mean(exp(y10))),type="b",ylim=c(1,400),col="darkgreen",lwd=3,ylab="Response",xlab="Predictor 1")
lines(c(0,1),c(mean(exp(y01)),mean(exp(y11))),type="b",col="lightblue",lwd=3)
legend("topleft",legend=c("Predictor2 at 0","Predictor2 at 1"),lty=1,lwd=3,col=c("darkgreen","lightblue"))
# now, plot shows significant interaction. the slope of the response depends on the level of the 2nd predictor
|
69a21441a34c6bd0b5218a772600de89e72dade9
|
70a9be4f18fc0749ea4ddb27cfaad33d8734a3ea
|
/man/build_par.Rd
|
171caaa63514e5709f52c6ac0aa74f1c00b71635
|
[] |
no_license
|
fickse/RHEM
|
c40701bf8e16780437ef05476f737321dfa8865e
|
875232a26a962efd8ac5217b74f5f6dc2b8ae765
|
refs/heads/master
| 2020-03-10T11:02:54.882333
| 2018-12-28T23:15:14
| 2018-12-28T23:15:14
| 129,347,032
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,367
|
rd
|
build_par.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{build_par}
\alias{build_par}
\title{Write .par file for input to rhem}
\usage{
build_par(...)
}
\arguments{
\item{...}{named arguments. see \code{details}}
}
\value{
list of inputs for .par file
}
\description{
Write .par file for input to rhem
}
\details{
the complete list of input parameters may be found via the \code{\link{par_defaults}} function. Parameters include:
\describe{
\item{scenarioname}{ (defaults to \code{as.numeric(Sys.time())})}
\item{units}{ 'Metric' or 'English' }
\item{soiltexture}{ see \code{\link{texture_df}} }
\item{moisturecontent}{ Initial moisture content \% saturation ( default = 25)}
\item{bunchgrasscanopycover}{ integer (\%)}
\item{forbscanopycover}{ integer (\%)}
\item{shrubscanopycover}{ integer (\%)}
\item{sodgrasscanopycover}{ integer (\%)}
\item{rockcover}{ integer (\%)}
\item{basalcover}{ integer (\%)}
\item{littercover}{ integer (\%)}
\item{cryptogamscover}{ integer (\%)}
\item{slopelength }{ integer (m)}
\item{slopeshape }{ "uniform", "convex", "concave" or "s-shaped"}
\item{slopesteepness }{ integer (\%)}
\item{version }{ character (currently no effect)}
\item{OUTPUT_FOLDER}{place to save .par file. Defaults to '.'}
}
}
\examples{
a <- build_par() # defaults
a
unlink(a$handle)
}
|
6f4d02d9c31889afdbadf27bda2cfe700448af41
|
41d7c2ff4628f27213f90aca0be867c0f747b300
|
/man/hcsig.Rd
|
d135411175c6e585d752fafc0460e3ebcdb21103
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jlaffy/statistrics
|
69e17a043522d3bcad6127127e66eea9649c3a92
|
2de58328790ede712c3aa6bbeccda611d7eaa121
|
refs/heads/master
| 2020-03-14T03:03:24.759893
| 2018-08-23T12:37:04
| 2018-08-23T12:37:04
| 131,412,171
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,076
|
rd
|
hcsig.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-cluster-significance.R
\name{hcsig}
\alias{hcsig}
\title{hcsig: hcluster Significance}
\usage{
hcsig(k, mat, fc.value = 3, p.value = 10^(-4), p.value.2 = 10^(-5),
pval.adjust = NULL, reorder = TRUE, fc.sort = T, pval.sort = F,
returning = "all")
}
\arguments{
\item{k}{a list of character vectors; sets of cell members belonging to each cluster.}
\item{mat}{a matrix of gene expression data (cells by genes)}
\item{fc.value}{fold change value below which differential gene expression is deemed insignificant.}
\item{p.value}{p-value above which differential gene expression is deemed insignificant.}
\item{p.value.2}{p-value above which differential gene expression is deemed insignificant with higher cutoff (sig.2)}
\item{pval.adjust}{NULL or character string. If NULL, do not adjust p-values. If string, adjust p-values using the method specified.}
\item{reorder}{if TRUE, the list of clusters is reordered by most to least significant.}
\item{fc.sort}{if TRUE, significantly differentially expressed genes are sorted by fold change (highest first). Default is TRUE.}
\item{pval.sort}{if TRUE, significantly differentially expressed genes are sorted by p.value (highest first). \code{pval.sort=TRUE} overrides \code{fc.sort=TRUE}. Default is FALSE.}
\item{returning}{return one of p-values, fold changes, or both from call of \code{DEgenes()} to \code{sig()}.}
}
\value{
list of length 4. Each object in the list is also a list. Each list has the same length, which is the length of k arg (the number of clusters). The lists are list$k, same as input; list$sig.1, the significant genes' p-values for each cluster; list$sig.2, list$sig.1 filtered such that only genes with p value higher than p.value.2 are included. list$sig.3, list$sig.1 filtered such that each gene only appears once across the clusters, wherever it had the highest p-value.
}
\description{
hcluster Significance. For clusters derived from hierarchical clustering (with \code{hclust}), data is retrieved.
}
|
c2f3c356d1399633a8cd72e75320a3eb07914f30
|
f8b069d84fc20beb5300b26c0df4e31a4b924b4c
|
/R/zzz.R
|
634a8800932fd7a62b482c6d9d6c85572001d876
|
[
"MIT"
] |
permissive
|
shanmdphd/PKPDsim
|
df84dbf4f7ee3be9a081516c3f74a8ac7b85379e
|
5aa415da1047795c28091c47623dd0fc4f611c48
|
refs/heads/master
| 2020-03-26T21:23:52.752205
| 2018-08-20T08:05:04
| 2018-08-20T08:05:04
| 145,385,623
| 0
| 0
| null | 2018-08-20T08:00:51
| 2018-08-20T08:00:51
| null |
UTF-8
|
R
| false
| false
| 187
|
r
|
zzz.R
|
# message("\n----\nWarning: \n\nDevelopment of the PKPDsim package has moved from InsightRX/PKPDsim to InsightRX/PKPDsim, please get the latest version from the new repository.\n----\n")
|
e478adbd52e5f6deefb4a2e119e2235235a990a5
|
a57e1f283ce9473afc2459cd3cf7ba6ff424e88e
|
/plot2.R
|
29695d21cd97da80ebd0fa1fae6c5c6c7fa0fc9b
|
[] |
no_license
|
dusanmundjar/EDA-plots
|
aa31d052f7baf3921053d6ed4e39a9e87f4b8f55
|
85adb8274b5f49ad1d0e30de8be3adcc41d5b509
|
refs/heads/master
| 2021-01-13T04:34:42.931207
| 2015-02-08T19:03:03
| 2015-02-08T19:03:03
| 30,501,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 833
|
r
|
plot2.R
|
###import data
data <- read.csv("C:/exdata-data-household_power_consumption/household_power_consumption.txt", sep=";", na.strings="?")
data2 <- data[which(data$Date=="1/2/2007" | data$Date=="2/2/2007"),]
### new variable
dates<-data2$Date
times<-data2$Time
x <- paste(dates, times)
y<-strptime(x, "%d/%m/%Y %H:%M:%S")
data2$DTime<-y
#### first date
b<-strptime("1/2/2007","%d/%m/%Y")
###ploting device
png(filename = "plot2.png",
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"),antialias = "cleartype")
##plot
plot(data2$DTime,data2$Global_active_power, type="l",
ylab="Global Active Power (kilowatts)",
xaxt = "n",xlab="")
axis(1,at=c(0:2)*+3600*24+b, lab=c("Thu","Fri","Sat"))
###closing device
dev.off()
|
b8ea06ca4a29a0425e3f695efcbcbc4236bf7830
|
70e5bc555b9051a2fd043a3c55adcd82cd9bd847
|
/run_analysis.R
|
728fc714cfc5a8589da2052dd36c4cf214031c24
|
[] |
no_license
|
Bogstag/HumanActivityRecognitionUsingSmartphones
|
82e02e643a3372d95237fdac9e6434d4704df1e5
|
f7cfe73888e64423096c6ce8010aba13510055ce
|
refs/heads/master
| 2016-09-11T04:23:59.485750
| 2014-09-21T17:49:15
| 2014-09-21T17:49:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,044
|
r
|
run_analysis.R
|
library("plyr")
## Merges the training and the test sets to create one data set
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE)
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE)
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE, col.names = "subject")
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE, col.names = "subject")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE, col.names = "activity")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE, col.names = "activity")
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt", header = FALSE, col.names = c("activityid", "activity"))
features <- read.table("UCI HAR Dataset/features.txt", header = FALSE)
# Combining the Data with subjects
x_train <- cbind(x_train, subject_train, y_train)
x_test <- cbind(x_test, subject_test, y_test)
# Combine test and train data to single data frame
ardata <- rbind(x_test, x_train)
## You have now a single dataset called ardata
# Add columnnames from features to ardata
features <- as.vector(features$V2)
features <- c(features, "subject","activity")
names(ardata) <- features
## Extracts only the measurements on the mean and standard deviation for each measurement
intrestingcolumns <- grep("-mean\\(\\)|-std\\(\\)", names(ardata))
intrestingcolumns <- c(intrestingcolumns, 562, 563)
intrestingcolumns <- sort(intrestingcolumns)
ardata <- subset(ardata, select = intrestingcolumns)
## Extraction complete
# Uses descriptive activity names to name the activities in the data set.
ardata$activity <- factor(ardata$activity, levels=activity_labels$activityid, label = activity_labels$activity)
## Create a second, independent tidy data set with the average of each variable for each activity and each subject
tidydata <- ddply(ardata,c('subject','activity'),function(ardata) mean=colMeans(ardata[,1:66]))
write.table(tidydata, file = "tidydata.txt", row.name = FALSE)
|
a2e053b781b67c393055508075f090dff4f01df1
|
d57bfd5bbefab86d21ed46b4e15f1d489c61bcbc
|
/R/fisher_corr.R
|
eb2378bcaea0c0c7ec590b9097fb9ddc937ebf54
|
[] |
no_license
|
cran/smovie
|
553d3d6441a762a4b538699c47d028ad4e5c995a
|
9a05f94188335a1b79a98bdfa4011bbcbc3033e8
|
refs/heads/master
| 2021-11-24T16:55:00.478591
| 2021-10-31T04:30:02
| 2021-10-31T04:30:02
| 123,954,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,487
|
r
|
fisher_corr.R
|
#' Fisher's transformation of the product moment correlation coefficient
#'
#' Density, distribution function, quantile function and random generator
#' for the distribution of Fisher's transformation of product moment
#' correlation, based on a random sample from a bivariate normal distribution
#'
#' @param x,q Numeric vectors of quantiles.
#' @param p A numeric vector of probabilities in [0,1].
#' @param N Numeric vector. Number of observations, (N > 3).
#' @param rho Numeric vector. Population correlations, (-1 < rho < 1).
#' @param n Numeric scalar. The number of observations to be simulated.
#' If \code{length(n) > 1} then \code{length(n)} is taken to be the number
#' required.
#' @param log,log.p A logical scalar; if TRUE, probabilities p are given as
#' log(p).
#' @param lower.tail A logical scalar. If TRUE (default), probabilities
#' are P[X <= x], otherwise, P[X > x].
#' @details These functions rely on the
#' \code{\link[SuppDists:Pearson]{correlation coefficient}}
#' functions in the SuppDists package. SuppDists must be installed in order
#' for these functions to work.
#' @seealso \code{\link[SuppDists:Pearson]{correlation coefficient}} in the
#' SuppDists package for dpqr functions for the untransformed product moment
#' correlation coefficient.
#' @examples
#' dFcorr(-1:1, N = 10)
#' dFcorr(0, N = 11:20)
#'
#' pFcorr(0.5, N = 10)
#' pFcorr(0.5, N = 10, rho = c(0, 0.3))
#'
#' qFcorr((1:9)/10, N = 10, rho = 0.2)
#' qFcorr(0.5, N = c(10, 20), rho = c(0, 0.3))
#'
#' rFcorr(6, N = 10, rho = 0.6)
#' @seealso \code{\link{correlation}}: correlation sampling distribution movie.
#' @references Fisher, R. A. (1915). Frequency distribution of the values of
#' the correlation coefficient in samples of an indefinitely large
#' population. \emph{Biometrika}, \strong{10}(4), 507-521.
#' @references Fisher, R. A. (1921). On the "probable error" of a coefficient
#' of correlation deduced from a small sample. \emph{Metron}, \strong{1},
#' 3-32.
#' \url{https://digital.library.adelaide.edu.au/dspace/bitstream/2440/15169/1/14.pdf}
#' @name Fcorr
NULL
## NULL
# ------------------------------- dFcorr -----------------------------------
#' @rdname Fcorr
#' @export
dFcorr <- function (x, N, rho = 0.0, log = FALSE) {
if (any(rho <= -1) | any(rho >= 1)) {
stop("invalid rho: rho must be in (-1, 1)")
}
if (any(N < 4)) {
stop("invalid N: N must be at least 4")
}
max_len <- max(length(x), length(N), length(rho))
x <- rep_len(x, max_len)
N <- rep_len(N, max_len)
rho <- rep_len(rho, max_len)
r <- tanh(x)
d <- SuppDists::dPearson(x = r, N = N, rho = rho) * (1 - r ^ 2)
if (log) {
d <- log(d)
}
return(d)
}
# ------------------------------- pFcorr -----------------------------------
#' @rdname Fcorr
#' @export
pFcorr <- function(q, N, rho = 0.0, lower.tail = TRUE, log.p = FALSE) {
if (any(rho <= -1) | any(rho >= 1)) {
stop("invalid rho: rho must be in (-1, 1)")
}
if (any(N < 4)) {
stop("invalid N: N must be at least 4")
}
max_len <- max(length(q), length(N), length(rho))
q <- rep_len(q, max_len)
N <- rep_len(N, max_len)
rho <- rep_len(rho, max_len)
r <- tanh(q)
p <- SuppDists::pPearson(q = r, N = N, rho = rho, lower.tail = lower.tail,
log.p = log.p)
return(p)
}
# ------------------------------- qFcorr -----------------------------------
#' @rdname Fcorr
#' @export
qFcorr <- function(p, N, rho = 0.0, lower.tail = TRUE, log.p = FALSE) {
if (any(rho <= -1) | any(rho >= 1)) {
stop("invalid rho: rho must be in (-1, 1)")
}
if (any(N < 4)) {
stop("invalid N: N must be at least 4")
}
max_len <- max(length(p), length(N), length(rho))
p <- rep_len(p, max_len)
N <- rep_len(N, max_len)
rho <- rep_len(rho, max_len)
r <- SuppDists::qPearson(p = p, N = N, rho = rho, lower.tail = lower.tail,
log.p = log.p)
x <- atanh(r)
return(x)
}
# ------------------------------- rFcorr -----------------------------------
#' @rdname Fcorr
#' @export
rFcorr <- function(n, N, rho = 0.0, lower.tail = TRUE, log.p = FALSE) {
if (any(rho <= -1) | any(rho >= 1)) {
stop("invalid rho: rho must be in (-1, 1)")
}
if (any(N < 4)) {
stop("invalid N: N must be at least 4")
}
max_len <- ifelse(length(n) > 1, length(n), n)
N <- rep_len(N, max_len)
rho <- rep_len(rho, max_len)
r <- SuppDists::rPearson(n = n, N = N, rho = rho)
x <- atanh(r)
return(x)
}
|
09a57885a59ac3d64a8045b3db789317ca83e3a5
|
7e1834f16d51844c3ec4fefa1badafe7382784e2
|
/app.R
|
a4385627e2410b018381c03877e7148e782b19a5
|
[] |
no_license
|
RforOperations2018/Project_2_afierro
|
36735fcd0a0d7ba702f577ee75f91feafde8a0ba
|
1fcad9172f6972d63cc1cb3b6ce7384458495ea1
|
refs/heads/master
| 2020-04-01T21:16:34.570361
| 2018-10-21T17:31:02
| 2018-10-21T17:31:02
| 153,647,327
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,426
|
r
|
app.R
|
library(shiny)
library(shinydashboard)
library(rgdal)
library(RSocrata)
library(leaflet)
library(leaflet.extras)
library(dplyr)
library(readxl)
library(stringr)
library(httr)
library(jsonlite)
library(ggplot2)
library(reshape2)
library(DT)
library(plotly)
#Load data
schools <- rgdal::readOGR("https://opendata.arcgis.com/datasets/70baf6da243e40298ba9246e9a67409b_0.geojson") %>%
spTransform(CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"))
schools_list <- GET("https://maps.lacity.org/lahub/rest/services/LAUSD_Schools/MapServer/0/query?where=1%3D1&text=&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=FULLNAME&returnGeometry=false&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&returnDistinctValues=true&resultOffset=&resultRecordCount=&f=pjson")
#You did this, I don't know what it does
list <- content(schools_list)
schoolsList <- fromJSON(list)$features
districts <- rgdal::readOGR("https://opendata.arcgis.com/datasets/de0e7b572e2b4c9ba3c1f62227b88a96_8.geojson")
# So to get the unique field values from socrata you gotta use the group parameter: https://dev.socrata.com/docs/queries/group.html in combo with only selecting the column in question.
art_grants <- read.socrata ("https://data.lacounty.gov/resource/ahzu-94ky.json")
community <- read.socrata("https://data.lacounty.gov/resource/gut7-6rmk.json")
# Define UI
header <- dashboardHeader(title = "Los Angeles County Arts in Schools Programs")
#Make Sidebar
sidebar <- dashboardSidebar(
sidebarMenu(
id = "tabs",
menuItem("LA County Schools & Districts", tabName = "Map", icon = icon("map-marker")),
menuItem("Arts for All Grants", tabName = "Chart1", icon = icon("bar-chart")),
menuItem("Community Arts Partners", tabName = "Chart2", icon = icon("bar-chart")),
menuItem("Table", tabName = "Table", icon = icon("table"))
)
)
#Make body
body<- dashboardBody(
tabItems(
#Map tab
tabItem("Map",
fluidRow(
box(
selectInput("school_select",
"School:",
choices = schoolsList,
multiple = TRUE,
selectize = TRUE),
actionButton("reset", "Reset Filters", icon = icon("refresh"))
)
),
fluidRow(
box(width = 12,
leafletOutput("schoolmap", height = 700)
)
)),
#Chart of arts for all grants & input
tabItem("Chart1",
fluidRow(
box(
selectizeInput("DistrictSelect",
"District:",
choices = sort(unique(art_grants$district)),
multiple = TRUE,
selected = c("East Whittier City Elementary", "Hacienda La Puente Unified"),
options = list(maxItems = 15)),
actionButton("reset", "Reset Filters", icon = icon("refresh"))
)
),
fluidRow(
box(
title = "Arts for All Advancement Grants",
width = 12,
(plotlyOutput("ArtGrantsPlot", height = 600))
)
)
),
#Chart of community partners & input
tabItem("Chart2",
fluidRow(
box(
selectizeInput("ComSchoolSelect",
"School:",
choices = sort(unique(community$school_name)),
multiple = TRUE,
selected = c("Agua Dulce Elementary", "Martha Baldwin Elementary"),
options = list(maxItems = 15)),
actionButton("reset", "Reset Filters", icon = icon("refresh"))
)
),
fluidRow(
box(
title = "Community Arts Partners serving LA County Public Schools",
width = 12,
(plotlyOutput("ComArtPlot", height = 600))
)
)
),
#Data table of the community partners
tabItem("Table",
fluidRow(
box(
selectInput("ComSchool2Select",
"School:",
choices = sort(unique(community$school_name)),
multiple = TRUE,
selectize = TRUE),
actionButton("reset", "Reset Filters", icon = icon("refresh"))
)
),
fluidRow(
box(width = 12,
DT::dataTableOutput("table")
)
)
)
)
)
ui <- dashboardPage(header, sidebar, body)
# Define server logic
server <- function(input, output, session = session) {
#This is the only input called with the API that works and you did it for me...so I don't think it counts
schoolsInput <- reactive({
filter <- ifelse(length(input$school_select) > 0,
gsub(" ", "+", paste0("FULLNAME+IN+%28%27", paste(input$school_select, collapse = "%27,%27"),"%27)")), # I added a gsub since most of the school names have spaces.
"1=1")
# paste0() doesn't add spaces paste() does. We don't want spaces in our URLs
url <- paste0("https://maps.lacity.org/lahub/rest/services/LAUSD_Schools/MapServer/0/query?where=", filter, "&text=&objectIds=&time=&geometry=&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=*&returnGeometry=true&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=4326&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&returnDistinctValues=false&resultOffset=&resultRecordCount=&f=pjson")
# For Debugging
print(url)
# Make API Call
dat <- fromJSON(url)$features %>%
flatten()
# Remove "attributes." and "geometry." from rownames
colnames(dat) <- gsub("attributes.|geometry.", "", colnames(dat))
# Return the dataframe when the function is called
return(dat)
})
output$schoolmap <- renderLeaflet({
leaflet() %>%
addProviderTiles("Esri.WorldImagery") %>%
#No fill on polygons because changing the color of them all looked quite complicated
# I think subseting the districts to only show the ones that have schools on them would have been a nice addition here.
addPolygons(data = districts, fillOpacity = 0) %>%
addMarkers(data = schoolsInput(), lng = ~x, lat = ~y, clusterOptions = markerClusterOptions(), popup = schoolsInput()$TOOLTIP)
})
#This attempt at trying to figure out the API thing was wrong
# AwardAmountInput <- reactive({
# ifelse(length(input$DistrictSelect) > 0,
# (paste0("?where=school_district+IN+%28%27", paste(input$DistrictSelect, collapse = "%27,%27"), "%27",
# "")))
# url <- paste("https://data.lacounty.gov/resource/gut7-6rmk.json", filter)
# })
#This attempt was wrong as well. There was no clear answer on how to do a SELECT DISTINCT type query thing in SoQL so I tried this with no luck.
# AwardAmountInput <- reactive({
# read.socrata("https://data.lacounty.gov/resource/ahzu-94ky.json?$query=SELECTCOUNTDISTINCT(district)FROMhttps://data.lacounty.gov/resource/ahzu-94ky.json")
# })
#Selecting Districts for Chart 1
AwardAmountInput <- reactive({
DF <- art_grants
# ORG Filter
if (length(input$DistrictSelect) > 0 ) {
DF <- subset(DF, district %in% input$DistrictSelect)
}
return(DF)
})
#Chart 1
output$ArtGrantsPlot <- renderPlotly({
ggplot(data = AwardAmountInput(), aes(x = district, y = award_amount, fill = cycle)) +
geom_bar(stat = "identity") +
labs(x = "District", y = "Award Amount") +
scale_y_continuous(labels = function(x) format(x, big.mark = ",",
scientific = FALSE)) +
theme(axis.text.x = element_text(angle = 60, vjust = 1, hjust = 1))
})
#Selecting School for Chart 2
CommunityInput <- reactive({
DF <- community
# ORG Filter
if (length(input$ComSchoolSelect) > 0 ) {
DF <- subset(DF, school_name %in% input$ComSchoolSelect)
}
return(DF)
})
#Chart 2
output$ComArtPlot <- renderPlotly({
ggplot(data = CommunityInput(), aes(x = school_name, y = enrollment, fill = "value", na.rm = TRUE)) +
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(angle = 60, vjust = 1, hjust = 1)) +
labs(x = "School", y = "Enrollment") +
theme(legend.position="none")
})
#Selecting school for data table
Community2Input <- reactive({
DF <- community
# ORG Filter
if (length(input$ComSchool2Select) > 0 ) {
DF <- subset(DF, school_name %in% input$ComSchool2Select)
}
return(DF)
})
#Data Table
output$table <- DT::renderDataTable({
(data = Community2Input())
})
#This should reset my inputs but it doesn't work for each action button. It only resets everything if you click the action reset button on the map tab...
observeEvent(input$reset, {
updateSelectInput(session, "school_select")
updateSelectizeInput(session, "DistrictSelect", selected = c("East Whittier City Elementary", "Hacienda La Puente Unified"))
updateSelectizeInput(session, "ComSchoolSelect", selected = c("Agua Dulce Elementary", "Martha Baldwin Elementary"))
updateSelectInput(session, "ComSchool2Select")
showNotification("You have successfully reset the filters", type = "message")
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
f2594f9c4489a34e0d47c428460cff693eff8659
|
07469546c11552317e2efa00dcebb823846ba86e
|
/R/dictionaries-liwc_old.R
|
68a630a0e5412c83f6d673e51fc83f0957dc936f
|
[] |
no_license
|
LuigiC72/quanteda
|
b846a93e643ab30b419f5bb038592d19f743adf7
|
974b778322a9d56d5678d5a192e1fb69dcf01750
|
refs/heads/master
| 2021-07-08T04:10:25.684641
| 2017-10-06T08:42:27
| 2017-10-06T08:42:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,370
|
r
|
dictionaries-liwc_old.R
|
# Import a LIWC-formatted dictionary
#
# Make a flattened dictionary list object from a LIWC dictionary file.
# @param path full pathname of the LIWC-formatted dictionary file (usually a
# file ending in .dic)
# @param enc a valid input encoding for the file to be read, see
# \link{iconvlist}
# @param maxcats the maximum number of categories to read in, set by the
# maximum number of dictionary categories that a term could belong to. For
# non-exclusive schemes such as the LIWC, this can be up to 7. Set to 10 by
# default, which ought to be more than enough.
# @return a dictionary class named list, where each the name of element is a
# bottom level category in the hierarchical wordstat dictionary. Each element
# is a list of the dictionary terms corresponding to that level.
# @author Kenneth Benoit
# @export
read_dict_liwc_old <- function(path, encoding = "auto", toLower = FALSE) {
if (encoding == "") encoding <- getOption("encoding")
# d <- readLines(con <- file(path, encoding = encoding), warn = FALSE)
d <- stringi::stri_read_lines(path, encoding = encoding, fallback_encoding = 'windows-1252')
# close(con)
# remove any lines with <of>
oflines <- grep("<of>", d)
if (length(oflines)) {
catm("note: ", length(oflines), " term",
if (length(oflines) > 1L) "s" else "",
" ignored because contains unsupported <of> tag\n", sep = "")
d <- d[-oflines]
}
# get the row number that signals the end of the category guide
guideRowEnd <- max(grep("^%\\s*$", d))
if (guideRowEnd < 1) {
stop('Expected a guide (a category legend) delimited by percentage symbols at start of file, none found')
}
# extract the category guide
guide <- d[2:(guideRowEnd-1)]
guide <- data.frame(do.call(rbind, as.list(tokens(guide))), stringsAsFactors = FALSE)
colnames(guide) <- c('catNum', 'catName')
guide$catNum <- as.integer(guide$catNum)
# initialize the dictionary as list of NAs
dictionary <- list()
length(dictionary) <- nrow(guide)
# assign category labels as list element names
names(dictionary) <- guide[["catName"]]
# make a list of terms with their category numbers
catlist <- d[(guideRowEnd+1):length(d)]
# remove odd parenthetical codes
foundParens <- grep("^\\w+\\s+\\(.+\\)", catlist)
if (length(foundParens)) {
catm("note: ignoring parenthetical expressions in lines:\n")
for (i in foundParens)
catm(" [line ", foundParens + guideRowEnd, ":] ", catlist[i], "\n", sep = "")
catlist <- gsub("\\(.+\\)", "", catlist)
}
## clean up irregular dictionary files
# remove any repeated \t
catlist <- gsub("\t\t+", "\t", catlist)
# remove any spaces before a \t
catlist <- gsub(" +\t", "\t", catlist)
# replace any blanks that should be \t with \t (e.g. in Moral Foundations dictionary)
catlist <- gsub("(\\d+) +(\\d+)", "\\1\t\\2", catlist)
# remove any \t only lines or empty lines
if (length(blanklines <- grep("^\\s*$", catlist)))
catlist <- catlist[-blanklines]
# remove spaces before and after
catlist <- stri_trim_both(catlist)
catlist <- strsplit(catlist, "\\s")
catlist <- lapply(catlist, function(y) y[y != ""])
catlist <- as.data.frame(do.call(rbind, lapply(catlist, '[', 1:max(sapply(catlist, length)))), stringsAsFactors = FALSE)
suppressWarnings(catlist[, 2:ncol(catlist)] <- sapply(catlist[, 2:ncol(catlist)], as.integer))
names(catlist)[1] <- "category"
if (toLower) catlist$category <- char_tolower(catlist$category)
# remove any blank rows
blankRowIndex <- which(is.na(catlist$category))
if (length(blankRowIndex))
catlist <- catlist[-blankRowIndex, ]
# remove any parentheses
catlist[["category"]] <- gsub("(\\s|\\w|\\b)[()](\\w|\\s)", "\\1\\2", catlist[["category"]])
# merge terms that appear on more than one line
catlist <- split(catlist[, 2:ncol(catlist)], catlist$category)
catlist <- lapply(catlist, function(y) sort(unique(unlist(y))))
catnames <- names(catlist)
catlist <- as.data.frame(do.call(rbind, lapply(catlist, '[', 1:max(sapply(catlist, length)))), stringsAsFactors = FALSE)
rownames(catlist) <- catnames
terms <- as.list(rep(NA, nrow(catlist)))
names(terms) <- rownames(catlist)
for (i in seq_len(nrow(catlist))) {
terms[[i]] <- as.numeric(catlist[i, !is.na(catlist[i,])])
}
for (ind in seq_along(terms)) {
for(num in as.numeric(terms[[ind]])){
tmpIndex <- which(guide$catNum == num)
if (!length(tmpIndex))
stop("Dictionary ", path, "\n refers to undefined category ", num,
" for term \"", names(terms[ind]), "\"", call. = FALSE)
thisCat <- guide$catName[tmpIndex]
thisTerm <- names(terms[ind])
dictionary[[thisCat]] <- append(dictionary[[thisCat]], thisTerm)
}
}
# check if any keys are empty, and remove them if so
if (any(emptykeys <- sapply(dictionary, is.null))) {
message("note: removing empty keys: ", paste(names(emptykeys[which(emptykeys)]), collapse = ", "))
dictionary <- dictionary[-which(emptykeys)]
}
return(dictionary)
}
|
07efe30d307be2a7761b1fed1da090ac8bb2c7f6
|
84e94bd1a156115243b990c56bfa4a107c04a80d
|
/man/simulateOuterPerformance.Rd
|
36cbbe1ed8047c6f756318fc929f03f62ec27b55
|
[
"BSD-3-Clause"
] |
permissive
|
jakob-r/mlrOverfit
|
54868307513932e8c857a8133a4cfabe9d21a07d
|
5d8c1476ebe0156b2a075bd3384c94ec07972b4c
|
refs/heads/master
| 2021-01-01T18:51:05.241275
| 2020-07-01T16:42:16
| 2020-07-01T16:42:16
| 98,448,624
| 3
| 2
|
BSD-3-Clause
| 2019-07-23T09:11:52
| 2017-07-26T17:27:55
|
R
|
UTF-8
|
R
| false
| true
| 558
|
rd
|
simulateOuterPerformance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulateOuterPerformance.R
\name{simulateOuterPerformance}
\alias{simulateOuterPerformance}
\alias{SimulateOuterPerformanceResult}
\title{Simulates the outer test error}
\usage{
simulateOuterPerformance(outer.performance)
}
\arguments{
\item{outer.performance}{[\code{\link{OuterPerformanceResult}}]}
}
\value{
[\code{SimulateOuterPerformanceResult}]
}
\description{
Based on the best observed point in the inner resampling the performance on the outer test set will be chosen.
}
|
062b845ff7bd0facd04b749d70b638a42f3a75b7
|
3f90e417415a4b2808bbe4f4cff7601c2272a94e
|
/Class1RCode-updated.R
|
e27a41647624161329222d25adc2b4b3c4650a40
|
[] |
no_license
|
nuke705/MiscR
|
f977dcca8ce89e8e73c3b9bef8b5c0c8dd4f6a31
|
8c6005ed053925de0434c084ab5456ea26b6a22f
|
refs/heads/master
| 2020-04-03T07:57:22.358605
| 2018-10-28T21:49:52
| 2018-10-28T21:49:52
| 155,118,918
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,518
|
r
|
Class1RCode-updated.R
|
# This R Code demonstrates everything we did in class 1 with an example dataset.
# For more detailed and basic examples refer to the presentation.
# Before you work on the code, please set your working directory to the folder that
# contains the sample dataset. You can either do this by navigating to the folder in your RStudio
# Files tab, and then clicking on "More--> Set As Working Directory", or by using the setwd() command
# setwd("~/FIN 567/R")
# Setwd sets your working directory. I have used the filepath I have used to locate my folder.
# Please edit the filepath according to where your file is stored
df = read.csv("sample data.csv") # reading in the sample csv file containing stock data
# Some Basic functions before we manipulating the data
nrow(df) # number of rows in df, the data frame containing stock data
ncol(df) # number of columns
head(df, 5) # display 1st 5 rows on df
tail(df) # display 1st 6 (default value of the argument)
str(df) # function to compactly display the structure of df
summary(df) #summary statistics of each column
ftse = df[,3] # subsetting the 3rd column of df containing FTSE index and saving it in ftse vector
dax = df[,4] #doing the same for DAX, which is contained in the 4th column
gbp = df[,5] #storing GBP conversion rate separately
eur = df[,6] #doing the same for EUR
newftse = ftse*gbp #converting the prices of FTSE index to dollar byy dividing
#with the corresponding currency rate
newdax = dax*eur #same for DAX index
#Note that in practice you probably won't be saving each of the columns in seperate vectors.
#Usually we would just combine these operations and store just the final value in a vector
#To illustrate this I have combined the above commands together in the following two commands
#newftse = df[,3]*df[,5]
#newdax = df[,4]*df[,6]
newprices = data.frame(sp500 = df[,2], newFTSE = newftse, newDAX = newdax)
#creating a new data frame combining the stock indices of 3 markets in USD
logret = log(newprices[1:nrow(newprices)-1,])-log(newprices[2:nrow(newprices),])
#computing a new data frame that contains log returns of previous data frame
logret = data.frame(Date = df[1:nrow(logret),1],logret)
#combining logret with the "Date" column from the original data frame and overwriting logret
ftse_rvrse = ftse[length(ftse):1] #reversing the order of the ftse vector
ftse_rvrse[length(ftse_rvrse)] #accessing the last element of the newly reversed ftse_rvrse vector
ftse[1] #You will notice that the fist element of the ftse and the
#last element of the ftse_rvrse vector are the same
#Similarly let's try to reverse the logreturns dataframe to show the oldest dates first
logret_rvrse = logret[nrow(logret):1,] #notice the difference is only a comma giving a second attribute
logret_rvrse[1,] #displaying 1st row of the reversed dataframe logret_rvrse
logret[nrow(logret),] #displaying last row of logret. Note that they are the same
##Example for matrix multiplication
mat1 = matrix(1:4,2,2)
mat1
mat2 = matrix(2:5, 2,2)
mat2
mat_res = mat1*mat2
mat_res # element wise multiplication
mat_res1 = mat1%*%mat2 #matrix multiplication
mat_res1
mat3 = matrix(2:7, 2, 3)
mat3
mat4 = matrix(rep(10,6), 2,3) #matrix with 6 same elements
mat4
mat_res3 = mat3*mat4 # element wise multiplication
mat_res3
mat_res4 = mat3%*%mat4 #Matrix multiplication. Gives an error, as the condition for matrix multiplication is not met
|
36c6cf4c77339c2bb75f91b5f8aebaa8d8da2413
|
49274f1e603427e17419a480910df649680a02fc
|
/man/mgmL.Rd
|
5a1550b3755f43b18dd954562f70601cfc34333d
|
[] |
no_license
|
t-arae/prtclmisc
|
da7b21d9122f5b1a75565f9da1faad4b9e219555
|
08203daa321f63524562d33cca81db976febb1b6
|
refs/heads/master
| 2021-07-12T06:57:15.555934
| 2020-08-25T07:28:21
| 2020-08-25T07:28:21
| 195,718,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 196
|
rd
|
mgmL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/g_conc.R
\name{mgmL}
\alias{mgmL}
\title{mgmL}
\usage{
mgmL(x)
}
\arguments{
\item{x}{numeric}
}
\description{
mgmL
}
|
7656199ee7f77049d279b73d971cd3c2de07ba11
|
779b83a935336f68204aafb4954dfe81bea3b595
|
/demand/man/arima_day.Rd
|
536fd75b4e17aec233b8b1a4bed30e7475b75ec2
|
[
"MIT"
] |
permissive
|
aidowu/demand_acep
|
ba42f068efbc613c80b463f9edd98f1fb10fd4ec
|
57f838494c60140a17d7a930df4276341e2a0ce8
|
refs/heads/master
| 2020-05-05T05:55:25.442876
| 2019-08-28T03:13:19
| 2019-08-28T03:13:19
| 179,769,388
| 1
| 0
|
MIT
| 2019-06-25T04:09:34
| 2019-04-05T23:54:21
|
Jupyter Notebook
|
UTF-8
|
R
| false
| true
| 603
|
rd
|
arima_day.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forecast.R
\name{arima_day}
\alias{arima_day}
\title{arima_day}
\usage{
arima_day(data, meter_name, end_point)
}
\arguments{
\item{data}{Input a dataframe which has peak power values of a virtual meter and the total 4 meters.}
\item{meter_name}{Name of a meter for plotting.}
\item{end_point}{The last value of the ordinal day, until which the model will use to forecast the rest of months.}
}
\description{
An ARIMA forecast model in terms of day will be plotted.
}
\examples{
arima_day(data, "PQ", 404)
}
\keyword{ARIMA}
|
69ccb3f75419c56ad217de77fa544f3352bb06b0
|
1f9579466118b5303c2681fcc3e87970e94a9eb3
|
/global.R
|
cb36923c646743555835b119a13be37439dde9e8
|
[] |
no_license
|
Arevaju/shiny-app
|
bf2d28761d1cbc7332083575ec30604ea334be3c
|
865b1958d586968e0c77b4d367f9f6e14e609112
|
refs/heads/master
| 2021-01-22T02:33:43.435410
| 2015-07-28T12:24:36
| 2015-07-28T12:24:36
| 21,005,475
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39
|
r
|
global.R
|
library(shiny)
load('data.RData')
|
0e2e082da4e54473ed15c5aa01e25bec5e05fbfa
|
06b9d2ece554bda6b4402785bc9c7b7a627a6c2f
|
/man/checkHandlingMortalityConsistency.Rd
|
42ed61bdc9b7686d0e0ac1a41c5009aaa8b95243
|
[
"MIT"
] |
permissive
|
wStockhausen/rTCSAM2015
|
4f2dd392b32d9a3ea9cce4703e25abde6440e349
|
7cfbe7fd5573486c6d5721264c9d4d6696830a31
|
refs/heads/master
| 2020-12-26T04:56:03.783011
| 2016-09-30T01:59:06
| 2016-09-30T01:59:06
| 26,103,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 870
|
rd
|
checkHandlingMortalityConsistency.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkHandlingMortalityConsistency.R
\name{checkHandlingMortalityConsistency}
\alias{checkHandlingMortalityConsistency}
\title{Check handling mortality/fishing equations consistency.}
\usage{
checkHandlingMortalityConsistency(tcsam = NULL, rsim = NULL,
showPlot = TRUE, pdf = NULL, width = 8, height = 6)
}
\arguments{
\item{tcsam}{- single TCSAM2015 model results object, or named list of such}
\item{rsim}{- single rsimTCSAM results object, or named list of such}
\item{showPlot}{- flag to show/print plots immediately}
\item{pdf}{- name of pdf file to record plot output to}
\item{width}{- pdf page width (in inches)}
\item{height}{- pdf page width (in inches)}
}
\value{
list of ggplot2 objects
}
\description{
Function to check handling mortality/fishing equations consistency.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.