blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb62e6e182ae4b5621687c139ae6b6e3ac56a5ec
|
a4e0c1fe8b8e43d6b7fbd1bbb3d16cac96518551
|
/man/horns_curve.Rd
|
a03c7084492f4943f8b1e2e09418059e3377f6c7
|
[] |
no_license
|
Najah-lshanableh/anomalyDetection
|
4a2ea3a10164faf5c149928cdde0aea3951a5db5
|
5095249c9443c97871867a4c9c3ad4287f5a7638
|
refs/heads/master
| 2020-09-23T23:37:52.573504
| 2019-10-13T14:06:12
| 2019-10-13T14:06:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,195
|
rd
|
horns_curve.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/horns_curve.R
\name{horns_curve}
\alias{horns_curve}
\title{Horn's Parallel Analysis}
\usage{
horns_curve(data, n, p, nsim = 1000L)
}
\arguments{
\item{data}{A matrix or data frame.}
\item{n}{Integer specifying the number of rows.}
\item{p}{Integer specifying the number of columns.}
\item{nsim}{Integer specifying the number of Monte Carlo simulations to run.
Default is \code{1000}.}
}
\value{
A vector of length \code{p} containing the averaged eigenvalues. The
values can then be plotted or compared to the true eigenvalues from a dataset
for a dimensionality reduction assessment.
}
\description{
Computes the average eigenvalues produced by a Monte Carlo simulation that
randomly generates a large number of \code{n}x\code{p} matrices of standard
normal deviates.
}
\examples{
# Perform Horn's Parallel analysis with matrix n x p dimensions
x <- matrix(rnorm(200 * 10), ncol = 10)
horns_curve(x)
horns_curve(n = 200, p = 10)
plot(horns_curve(x)) # scree plot
}
\references{
J. L. Horn, "A rationale and test for the number of factors in factor
analysis," Psychometrika, vol. 30, no. 2, pp. 179-185, 1965.
}
|
54b7d32ab98926c83686ef45385bad547c86a3e6
|
84ae96b78d2b515080b89ae16e604a0e5e140ffb
|
/R/Brandi/Energy Tree/Multivariate_sim_0.5/plot.R
|
b389f96d42599f768dd0ed5733270ea862e0c194
|
[] |
no_license
|
tulliapadellini/energytree
|
437162cbc999794da3d37df3fa4dd68951f99f64
|
ca13a1a92fbcf49a00c8f78a0f0b4507296fe822
|
refs/heads/master
| 2021-12-27T08:46:24.495563
| 2020-05-12T08:46:03
| 2020-05-12T13:47:36
| 182,380,327
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,934
|
r
|
plot.R
|
pdf("plot_multi.pdf", width = 12)
par(mfrow=c(3,3))
plot(data[[14]]$V1[1:50], col="red", main="V1")
lines(data[[14]]$V1[20], lwd=2)
plot(data[[14]]$V2[1:50], col="red", main="V2")
lines(data[[14]]$V2[20], lwd=2)
plot(data[[14]]$V3[1:50], col="red", main="V3")
lines(data[[14]]$V3[20], lwd=2)
plot(data[[14]]$V1[51:100], col="lightblue", main="")
lines(data[[14]]$V1[70], lwd=2)
plot(data[[14]]$V2[51:100], col="lightblue", main="")
lines(data[[14]]$V2[70], lwd=2)
plot(data[[14]]$V3[51:100], col="lightblue", main="")
lines(data[[14]]$V3[70], lwd=2)
plot(data[[14]]$V1[101:150], col="green", main="")
lines(data[[14]]$V1[120], lwd=2)
plot(data[[14]]$V2[101:150], col="green", main="")
lines(data[[14]]$V2[120], lwd=2)
plot(data[[14]]$V3[101:150], col="green", main="")
lines(data[[14]]$V3[130], lwd=2)
dev.off()
pdf("plot_multi_rec.pdf", width = 12)
par(mfrow=c(3,3))
plot(datanew$V1$fdata.est[1:50], col="red", main="V1")
lines(datanew$V1$fdata.est[20], lwd=2)
plot(datanew$V2$fdata.est[1:50], col="red", main="V2")
lines(datanew$V2$fdata.est[20], lwd=2)
plot(datanew$V3$fdata.est[1:50], col="red", main="V3")
lines(datanew$V3$fdata.est[20], lwd=2)
plot(datanew$V1$fdata.est[51:100], col="lightblue", main="")
lines(datanew$V1$fdata.est[70], lwd=2)
plot(datanew$V2$fdata.est[51:100], col="lightblue", main="")
lines(datanew$V2$fdata.est[70], lwd=2)
plot(datanew$V3$fdata.est[51:100], col="lightblue", main="")
lines(datanew$V3$fdata.est[70], lwd=2)
plot(datanew$V1$fdata.est[101:150], col="green", main="")
lines(datanew$V1$fdata.est[120], lwd=2)
plot(datanew$V2$fdata.est[101:150], col="green", main="")
lines(datanew$V2$fdata.est[120], lwd=2)
plot(datanew$V3$fdata.est[101:150], col="green", main="")
lines(datanew$V3$fdata.est[130], lwd=2)
dev.off()
pdf("multi_output.pdf", width=16)
plot(myt, tp_args = list(fill = c("red", "lightblue", "green")),
ip_args = list(fill = c("cadetblue")))
dev.off()
|
fddb1bac1a67602d2cf744bfd1809a784c52cdbf
|
06b6a2c2008c7f5e8400f8eb402d490ebb4bfd54
|
/man/kir_frequencies.Rd
|
e1ade8c780bc9ada28a3cdb4d2c3954303947c3f
|
[
"MIT"
] |
permissive
|
BMEngineeR/midasHLA
|
55265be7baae2259c976bb5ea7f112737c0b7d1a
|
9ce02c8192852c16a296f63ecbd3e4791e5dbd83
|
refs/heads/master
| 2023-03-05T15:59:52.362313
| 2021-02-17T00:53:19
| 2021-02-17T00:53:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 743
|
rd
|
kir_frequencies.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{kir_frequencies}
\alias{kir_frequencies}
\title{KIR genes frequencies scraped from allelefrequencies.net}
\format{
A data frame with 3744 rows and 3 variables:
\describe{
\item{var}{allele number, character}
\item{population}{reference population name, character}
\item{frequency}{KIR genes carrier frequency in reference population, float}
}
}
\source{
\url{www.allelefrequencies.net}
}
\usage{
kir_frequencies
}
\description{
Accessed on 28.08.20
}
\details{
A dataset containing KIR genes frequencies across 16 genes.
For details visit the search results page in the allelefrequencies.net
database website.
}
\keyword{datasets}
|
72e0535a148d3c60254f4a7c69329a62631f7703
|
88c20b9bac5999fa1cc73d63ffb124958df2dc0d
|
/R/dbBind_MariaDBResult.R
|
284a0dc1ea6ab6744c365274f7da8185100318d6
|
[
"MIT"
] |
permissive
|
r-dbi/RMariaDB
|
2fb2656ba0e36391856943aa07d6391102802ebd
|
b5a54aecde60621ec600471ae894ed89e6eaeefe
|
refs/heads/main
| 2023-07-06T08:40:50.295370
| 2023-04-02T02:07:20
| 2023-04-02T02:07:20
| 96,334,875
| 103
| 38
|
NOASSERTION
| 2023-06-14T04:16:58
| 2017-07-05T15:35:16
|
R
|
UTF-8
|
R
| false
| false
| 389
|
r
|
dbBind_MariaDBResult.R
|
#' @name query
#' @usage NULL
dbBind_MariaDBResult <- function(res, params, ...) {
if (!is.null(names(params))) {
stopc("Cannot use named parameters for anonymous placeholders")
}
params <- sql_data(as.list(params), res@conn, warn = TRUE)
result_bind(res@ptr, params)
invisible(res)
}
#' @rdname query
#' @export
setMethod("dbBind", "MariaDBResult", dbBind_MariaDBResult)
|
cfef4e634cc96304e8dd14e475ddcc9c5e462beb
|
00f252ce6e708818399572bcc7c2058e3e006965
|
/pedR2jl.R
|
dec4a5d525894d061633ce8f5b45386983f3a8ad
|
[] |
no_license
|
thewart/ppc4ped
|
81efdf998697d5dddbdf97a2869731e61c400bd8
|
e8d7a3bd1e1419062366d699568b26fa67f49707
|
refs/heads/master
| 2021-01-18T22:49:51.426145
| 2016-11-22T00:08:17
| 2016-11-22T00:08:17
| 32,103,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,445
|
r
|
pedR2jl.R
|
source('~/Dropbox/monkeybris/rscript/pedigree.preproc.batch.R')
source('~/code/snparray/SNPpreproc.R')
dat <- fread("~/analysis/SNPannotation/SNPmaster_qc.csv")
SNPdat <- fread("~/analysis/SNPannotation/SNPdat.csv")
#source("~/code/snparray/processraw.R")
reped <- ped.matchnames(dat$ID,pedigree$id)
pedigree <- ped.replace(pedigree,reped$oldID,reped$ID)
redped <- ped.trace(dat$ID,pedigree)
n <- nrow(redped)
numped <- data.frame(id=1:n,sire=vector("numeric",n),dam=vector("numeric",n))
numped$sire <- match(redped$sire,redped$id,nomatch = 0)
numped$dam <- match(redped$dam,redped$id,nomatch = 0)
numped <- as.matrix(numped)
#fv <- cullSNP(as.data.frame(dat[,-1,with=F]),SNPdat,mthresh=0.25,lthresh = 0.1)
X <- array(-1,dim = c(n,ncol(dat)-1))
X[match(dat$ID,redped$id),] <- as.matrix(dat[,-1,with=F])
X[is.na(X)] <- -1
if (!exists("type")) type = "none"
if (type == "eigenanalysis")
{
shitlist <- SNPdelink(SNPdat)
X <- X[,-shitlist]
Xdat <- SNPdat[-shitlist]
Ximp <- as.matrix(fread("~/analysis/SNPannotation/SNPmaster_qc_imputed.csv")[,-1,with=F])[,-shitlist]
} else if (type=="LD")
{
m <- nrow(fv$SNP)
fv$SNP$chrom <- as.character(fv$SNP$chrom)
chrom_match <- matrix(fv$SNP$chrom,m,m) == matrix(fv$SNP$chrom,m,m,byrow = T)
dist <- abs(matrix(fv$SNP$loc,m,m) - matrix(fv$SNP$loc,m,m,byrow=T))
dist[!chrom_match] <- Inf
shitlist <- which(colSums(dist<3e5)==1)
X <- X[,-shitlist]
dist <- dist[-shitlist,-shitlist]
}
|
c41f22bb91f78e653dc3c65b5b1fab6a53d5c02b
|
6e7bc88d19eafe05c5923973ecbb64e775926c4c
|
/tests/testthat/test-utils.R
|
7ebb18029de721779b292abc47511bcc5d27bac4
|
[
"MIT"
] |
permissive
|
romainfrancois/ftExtra
|
a78371ca1649d85c534deaaff91a08b440fbb3de
|
7c47f662584de19ce7e725a91a5d72e844e34c16
|
refs/heads/master
| 2022-11-04T21:13:49.276775
| 2020-04-05T23:36:35
| 2020-04-05T23:36:35
| 279,871,415
| 1
| 0
|
NOASSERTION
| 2020-07-15T13:09:29
| 2020-07-15T13:09:28
| null |
UTF-8
|
R
| false
| false
| 288
|
r
|
test-utils.R
|
test_that("group_of grouped_df", {
expect_identical('Species', group_of(dplyr::group_by(iris, .data$Species)))
expect_identical(NULL, group_of(iris))
})
test_that("%||% returns right if left is NULL", {
expect_identical('a' %||% 'b', 'a')
expect_identical(NULL %||% 'b', 'b')
})
|
973c20b8b57e27142968195aef5823a79694983d
|
2682acb673c9e0307088b7d67a8adbb031d20717
|
/man/confuMat.Rd
|
e234f11d2d0df2fd756efec097f67fe8504fa104
|
[] |
no_license
|
january3/myfuncs
|
fd9dec50902d39c1dba01a935a9778a22eaf0c03
|
e5cb8c2fa16a209f599cdb96cb4209735d5170b2
|
refs/heads/master
| 2020-06-04T07:21:56.996685
| 2020-03-27T09:28:32
| 2020-03-27T09:28:32
| 191,922,252
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 607
|
rd
|
confuMat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confuMat.R
\name{confuMat}
\alias{confuMat}
\title{Calculate a confusion matrix}
\usage{
confuMat(reality, predictions, as.text = FALSE)
}
\arguments{
\item{reality}{vector with actual assignments of groups to items}
\item{predictions}{vector with predicted assignments of groups to items}
\item{as.text}{if TRUE, return a single character value; if FALSE, return a matrix}
}
\value{
\code{confuMat} invisibly returns the confusion matrix
}
\description{
Calculate a confusion matrix
}
\details{
Calculate a confusion matrix
}
|
3200195d407f08b5dbcecc2b5ca1a45f1f6eee64
|
25c5d243ffac4b4f4f9efcd6a28cb41d51b23c90
|
/src/test/scripts/installDependencies.R
|
af89f2b936ef60565bb67022e8a81fbbf56a44bd
|
[
"Apache-2.0"
] |
permissive
|
apache/systemds
|
5351e8dd9aa842b693e8c148cf3be151697f07a7
|
73555e932a516063c860f5d05c84e6523cc7619b
|
refs/heads/main
| 2023-08-31T03:46:03.010474
| 2023-08-30T18:25:59
| 2023-08-30T18:34:41
| 45,896,813
| 194
| 167
|
Apache-2.0
| 2023-09-13T08:43:37
| 2015-11-10T08:00:06
|
Java
|
UTF-8
|
R
| false
| false
| 2,304
|
r
|
installDependencies.R
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
print("Starting install RScripts")
args <- commandArgs(TRUE)
options(repos=structure(c(CRAN="http://cran.r-project.org")))
custom_install <- function(pkg) {
if(!is.element(pkg, installed.packages()[,1])) {
# Installing to temp folder, if you want to permenently install change lib path
if (length(args)==0) {
install.packages(pkg);
} else if (length(args) == 1){
install.packages(pkg, lib= args[1]);
}
}
}
list_user_pkgs <- function() {
print("List of user installed packages:")
ip <- as.data.frame(installed.packages()[,c(1,3:4)])
rownames(ip) <- NULL
ip <- ip[is.na(ip$Priority),1:2,drop=FALSE]
print(ip, row.names=FALSE)
}
custom_install("Matrix");
custom_install("psych");
custom_install("moments");
custom_install("boot");
custom_install("matrixStats");
custom_install("outliers");
custom_install("caret");
custom_install("sigmoid");
custom_install("DescTools");
custom_install("mice");
custom_install("mclust");
custom_install("dbscan");
custom_install("imputeTS");
custom_install("FNN");
custom_install("class");
custom_install("unbalanced");
custom_install("naivebayes");
custom_install("BiocManager");
custom_install("mltools");
BiocManager::install("rhdf5");
print("Installation Done")
# supply any two parameters to list all user installed packages
# e.g. "sudo Rscript installDependencies.R a b"
if (length(args) == 2) {
list_user_pkgs()
}
|
e42865d39e8a20690724b29b32fa6e810e7aff22
|
0ffe190b9d5e2baa9b0be6bece03b93c7f39537a
|
/custom_components/man/''ReactGridLayout.Rd
|
df3bbb4c8b15e3c8d1ea0f23abe8ad41f3c3ade3
|
[] |
no_license
|
lwang94/MDD
|
1f8587c74c9d8b860013326499eb75a73658206a
|
16f6e1cd8d0682c06ef2f806f3bdf4af6d488a33
|
refs/heads/master
| 2023-01-14T06:08:48.138203
| 2021-01-14T06:14:33
| 2021-01-14T06:14:33
| 252,357,300
| 0
| 0
| null | 2023-01-06T04:47:00
| 2020-04-02T04:43:58
|
JavaScript
|
UTF-8
|
R
| false
| false
| 748
|
rd
|
''ReactGridLayout.Rd
|
% Auto-generated: do not edit by hand
\name{''ReactGridLayout}
\alias{''ReactGridLayout}
\title{ReactGridLayout component}
\description{
A reactive, fluid grid layout with draggable, resizable components.
}
\usage{
''ReactGridLayout(autoSize=NULL, cols=NULL, className=NULL, style=NULL, draggableHandle=NULL,
draggableCancel=NULL, containerPadding=NULL, rowHeight=NULL, maxRows=NULL,
layout=NULL, margin=NULL, isDraggable=NULL, isResizable=NULL, isDroppable=NULL,
useCSSTransforms=NULL, transformScale=NULL, verticalCompact=NULL,
compactType=NULL, preventCollision=NULL, droppingItem=NULL, onLayoutChange=NULL,
onDragStart=NULL, onDrag=NULL, onDragStop=NULL, onResizeStart=NULL,
onResize=NULL, onResizeStop=NULL, onDrop=NULL)
}
\arguments{
}
|
6643b9a417fdbaa990a330314e39b7f11fd5acb3
|
7a14870def6bc4b5d9118405fb94ccc8c591ce33
|
/Exercise.R
|
f6e15e0031ccb88a4e5ab3ff2b146133ecc9b05d
|
[] |
no_license
|
RaninDarkhawaja/Exercise
|
e5104a71d941fe5d058b194f648a554ecb5ab257
|
5f39497a8541be547f8d144421e1badf9657c863
|
refs/heads/master
| 2023-03-22T16:09:27.801541
| 2021-03-04T13:08:35
| 2021-03-04T13:08:35
| 344,432,772
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,230
|
r
|
Exercise.R
|
download.file(url = "https://ndownloader.figshare.com/files/2292169", destfile = "data_raw/portal_data_joined.csv")
library(tidyverse)
surveys <- read_csv("data_raw/portal_data_joined.csv")
view (surveys)
library(lubridate)
BRIGHT <- read.table("BRIGHT/Bright_imputed.txt", sep="\t", header=T)
select(BRIGHT, A2_gend, age, educ_level5, work2,race, Depress, diabetes)
BRIGHT_Subset <- data.frame(select(BRIGHT, A2_gend, age, educ_level5, work2,race, Depress, diabetes))
summary(BRIGHT_Subset$age)
# Data frame with people less than 50 years old
Age_less_50 <- data.frame(filter(BRIGHT_Subset, age < 50))
# Data frame with people who have high depression
high_depression <- BRIGHT_Subset %>% filter(Depress > 20)
high_depression <- data.frame(high_depression)
library(ggplot2)
ggplot(data = surveys, aes(x = weight, y = hindfoot_length)) +geom_point()
ggplot(data = BRIGHT_Subset, aes(x = age, y = Depress)) +geom_point()
# Create a new column in the surveys with weight in Kg
surveys %>% mutate(weight_kg = weight / 1000)
# To create a second new column based on the first new column with the same call of mutate
surveys$surveys_weight <-surveys %>% mutate(weight_kg = weight/ 1000, weight_lb = weight_kg*2.2)
view(BRIGHT_Subset)
|
8eb0a847bd0872e4364991fe66de112154d86ada
|
d121f587f7e0678030d33a4c5428e594c5978dad
|
/man/qtltoolsPrepareSE.Rd
|
f4def76fb5f05c3df796976d3b745f95766a764f
|
[
"Apache-2.0"
] |
permissive
|
kauralasoo/eQTLUtils
|
fcf0907721b3a8f19fe68e611cecb4f16d7a0c9d
|
26242562a4e244334fd9691d03bc1ef4d2d6c1d9
|
refs/heads/master
| 2023-03-05T19:10:45.247191
| 2023-03-03T13:33:08
| 2023-03-03T13:33:08
| 149,779,618
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 713
|
rd
|
qtltoolsPrepareSE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qtltools_normalisation_wrappers.R
\name{qtltoolsPrepareSE}
\alias{qtltoolsPrepareSE}
\title{Normalise phenotype SummarisedExperiment object for QTL mapping with QTLtools}
\usage{
qtltoolsPrepareSE(
se,
quant_method,
filter_rna_qc = TRUE,
filter_genotype_qc = TRUE,
keep_XY = FALSE
)
}
\arguments{
\item{se}{SummarizedExperiment object used for QTL mapping}
\item{quant_method}{Quantification method used to generate the data. Valid options are: featureCounts, array, leafcutter}
}
\value{
Normalised SummarizedExperiment object
}
\description{
Normalise phenotype SummarisedExperiment object for QTL mapping with QTLtools
}
|
4c2cfa9e453933c4ae2f54e46f2c1c17c1f83f72
|
61f039fe068688cd846b6c62bb5bb3bb124fd83e
|
/scripts/99_make_predictor_key.R
|
3c62e388c1eb97480413acda3c14bfeee182915d
|
[
"MIT"
] |
permissive
|
CNHLakes/beyond_land_use
|
f45a8af34f7fe0b529879a3176369a108c287d84
|
8af34fe492cbb75a6d147cf3f62e997bce94811e
|
refs/heads/master
| 2023-04-11T21:18:20.126586
| 2022-08-10T03:10:48
| 2022-08-10T03:10:48
| 255,707,943
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,574
|
r
|
99_make_predictor_key.R
|
suppressMessages(library(dplyr))
pred_key <- data.frame(var = c(
"nitrogen_atmospheric_deposition","maxdepth","iwsla_ratio",
"hu12_baseflow_mean","hu12_ppt_mean",
"stream_cultivated_crops","wetlands","ag", "forest", "rowvcropvpct",
"buffer_cultivated_crops", "buffer_natural",
"nonag", "nfixer", "clay_pct","lake_area_ha",
"wetland_potential", "corn","nitrogen_livestock_manure",
"soil_org_carbon", "soybeans","n_input",
"nitrogen_fertilizer_use", "phosphorus_fertilizer_use","p_input",
"hu4vnitrogenvatmosphericvdeposition",
"phosphorus_livestock_manure", "wheat","pasture", "row_crop_pct"),
pretty = c(
"N deposition", "Max depth", "Watershed-lake ratio",
"Baseflow", "Precipitation",
"Stream-buffer Ag", "Wetlands", "Ag", "Forest", "Row-crop",
"Buffer Ag", "Buffer natural",
"Non-ag", "N-fixer", "Clay", "Lake area",
"Wetland potential", "Corn", "Manure N",
"Soil organic carbon", "Soybeans", "N input",
"Fertilizer N", "Fertilizer P", "P input",
"N Deposition",
"Manure P", "Wheat", "Pasture", "Row-crop Ag"),
granularity = c("Other", "Other", "Other",
"Other", "Other",
"Granular", "Other", "Aggregate", "Other", "Aggregate",
"Granular", "Granular",
"Aggregate", "Granular", "Other", "Other",
"Other", "Granular", "Granular",
"Other", "Granular", "Granular",
"Granular", "Granular", "Granular",
"Granular",
"Granular", "Granular", "Granular", "Aggregate"),
category = c(
"Nutrient inputs", "Lake", "Lake",
"Nutrient transport", "Nutrient transport",
"Buffer configuration","Land-use cover","Land-use cover", "Land-use cover", "Land-use cover",
"Buffer configuration", "Buffer configuration",
"Land-use cover", "Land-use cover", "Nutrient transport","Lake",
"Nutrient transport", "Land-use cover","Nutrient inputs",
"Nutrient transport", "Land-use cover", "Nutrient inputs",
"Nutrient inputs", "Nutrient inputs", "Nutrient inputs",
"Nutrient inputs",
"Nutrient inputs", "Land-use cover","Land-use cover", "Land-use cover"),
stringsAsFactors = FALSE)
pred_key$varv <- gsub("_", "v", pred_key$var)
pred_key <- dplyr::filter(pred_key,
!(pretty %in% c("Lake area", "N-fixer",
"Non-ag")))
# View(pred_key)
write.csv(pred_key, "data/predictor_key.csv", row.names = FALSE)
|
de1e2dc23133b390b26dc9650100a7dd1ec0cffb
|
4cf827146404badf6c4ffcc3237187ece23b6084
|
/demo/chapter11.R
|
e7e0a030c969ca6bd77ab847fa102ac24aef7542
|
[] |
no_license
|
Git294/ChemometricsWithR
|
1f883099604bfd375a54350ebdc067420e6037fe
|
9d15f50972ffa7fe254567c097eab7cbced586c6
|
refs/heads/master
| 2022-12-06T09:17:15.609101
| 2020-09-02T14:28:44
| 2020-09-02T14:28:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,347
|
r
|
chapter11.R
|
## Demo featuring all R code from Chapter 11 in the book "Chemometrics
## with R" by Ron Wehrens. The only additions to the book chapter code
## are loading the required libraries and data, and occasionally
## closing a graphics window.
graphics.off()
opar <- par(no.readonly = TRUE)
data(arabidopsis)
naLimitPerc <- 40
naLimit <- floor(nrow(arabidopsis) * naLimitPerc / 100)
nNA <- apply(arabidopsis, 2, function(x) sum(is.na(x)))
naIdx <- which(nNA < naLimit)
X.ara <- arabidopsis[, naIdx]
plot(sort(nNA), type = "h", col = 4, xlab = "Variables (sorted)",
ylab = "Number of missing values per metabolite",
main = "Arabidopsis data")
abline(h = naLimit, col = "darkgray")
text(1, naLimit, naLimit, pos = 3)
abline(v = length(naIdx) + .5, col = "darkgray", lty = 2)
text(length(naIdx), max(nNA), length(naIdx), pos = 4)
X.ara.l <- log(X.ara)
X.ara.l.sc <- scale(X.ara.l)
X.ara.cov <- cov(t(X.ara.l.sc), use = "pairwise.complete.obs")
sum(is.na(X.ara.cov))
X.ara.svd <- svd(X.ara.cov, nu = 2, nv = 2)
ara.PCA.svd <-
structure(
list(scores = X.ara.svd$u %*% diag(sqrt(X.ara.svd$d[1:2])),
var = X.ara.svd$d,
totalvar = sum(X.ara.svd$d),
centered.data = TRUE),
class = "PCA")
X.ara.imput1 <-
apply(X.ara, 2,
function(x) {
x[is.na(x)] <- min(x, na.rm = TRUE)
x
})
require(missMDA, quiet = TRUE)
X.ara.pcaimput <- imputePCA(X.ara.l, ncp = 2)$completeObs
par(mfrow = c(1, 3))
scoreplot(ara.PCA.svd, main = "PCA using cov")
ara.PCA.minimputation <- PCA(scale(log(X.ara.imput1)))
scoreplot(ara.PCA.minimputation,
main = "PCA using minimum imputation")
ara.PCA.pcaimputation <- PCA(scale(X.ara.pcaimput))
scoreplot(ara.PCA.pcaimputation,
main = "PCA using PCA imputation")
X.ara.imput1 <-
apply(X.ara, 2,
function(x) {
x[is.na(x)] <- min(x, na.rm = TRUE)
x
})
par(opar)
p1 <- hist(X.ara.l, plot = FALSE)
p2 <- hist(X.ara.pcaimput[is.na(X.ara.l)],
plot = FALSE)
p3 <- hist(log(X.ara.imput1)[is.na(X.ara.l)], plot = FALSE,
breaks = p2$breaks)
plot(p1, xlab = "Feature intensity (log scale)",
main = "Arabidopsis data")
col3 <- rgb(.5, .5, 0, .2)
col2 <- rgb(0, .5, .5, .2)
plot(p3, col = col3, add = TRUE)
plot(p2, col = col2, add = TRUE)
legend("topright",
legend = c("Measured values", "Imputed values (min)",
"Imputed values (PCA)"),
fill = c("white", col3, col2), bty = "n")
rownames(X.ara.l) <- rep("", nrow(X.ara.l))
colnames(X.ara.l) <- paste("V", 1:ncol(X.ara.l), sep = "")
countNAs <- apply(X.ara.l[, 1:20], 2, function(x) sum(is.na(x)))
countNAs[countNAs > 0]
ara.PCA.Minput <- MIPCA(X.ara.l[, 1:20], ncp = 2, scale = TRUE)
par(mfrow = c(1, 2))
plot(ara.PCA.Minput, choice = "dim", new.plot = FALSE)
plot(ara.PCA.Minput, choice = "var", new.plot = FALSE)
detach("package:missMDA", unload = TRUE)
require(MASS, quiet = TRUE)
data(wines, package = "kohonen")
wine.classes <- as.integer(vintages)
wines.sc <- scale(wines)
wines.odd <- seq(1, nrow(wines), by = 2)
wines.even <- seq(2, nrow(wines), by = 2)
twowines <- wines[vintages != "Barolo",]
twovintages <- factor(vintages[vintages != "Barolo"])
par(mfrow = c(1, 2))
X <- wines[vintages == "Grignolino", ]
X.sc <- scale(X)
X.clPCA <- princomp(X.sc)
X.robPCA <- princomp(X.sc, covmat = cov.mcd(X.sc))
biplot(X.clPCA, main = "Classical PCA")
biplot(X.robPCA, main = "MCD-based PCA")
require(rrcov, quiet = TRUE)
X.HubPCA5 <- PcaHubert(X.sc, k = 5)
summary(X.HubPCA5)
X.HubPCA <- PcaHubert(X.sc)
summary(X.HubPCA)
par(opar)
plot(X.HubPCA)
require(qcc, quiet = TRUE)
metabNr <- 2
B1 <- which(arabidopsis.Y$Batch == "B1")
B23 <- which(arabidopsis.Y$Batch %in% c("B2", "B3"))
ara.qcc <- qcc(data = arabidopsis[B1, metabNr], type = "xbar.one",
newdata = arabidopsis[B23, metabNr],
plot = FALSE)
ara.cusum <- cusum(data = arabidopsis[B1, metabNr],
newdata = arabidopsis[B23, metabNr],
plot = FALSE)
par(mfrow = c(1, 2))
qcc.options(bg.margin = "transparent",
beyond.limits = list(pch = 15, col = "red"),
violating.runs = list(pch = 17, col = "orange"))
ara.qcc$data.name <- ara.cusum$data.name <- "Batch 1"
ara.qcc$newdata.name <- ara.cusum$newdata.name <- "Batches 2, 3"
names(ara.qcc$statistics) <- names(ara.cusum$statistics) <-
names(ara.qcc$newstats) <- names(ara.cusum$newstats) <- " "
plot(ara.qcc, add.stats = FALSE, restore.par = FALSE)
plot(ara.cusum, add.stats = FALSE)
require(MSQC, quiet = TRUE)
par(mfrow = c(1, 2))
idx <- which(apply(arabidopsis, 2, function(x) sum(is.na(x)) == 0))
chi2chart <-
capture.output(mult.chart(arabidopsis[c(B1, B23), idx], type = "chi",
Xmv = colMeans(arabidopsis[B1, idx]),
S = cov(arabidopsis[B1, idx])))
abline(v = length(B1) + .5, lty = 2)
MCUSUMchart <- mult.chart(arabidopsis[c(B1, B23), idx], type = "mcusum2",
Xmv = colMeans(arabidopsis[B1, idx]),
S = cov(arabidopsis[B1, idx]))
abline(v = length(B1) + .5, lty = 2)
require(pls, quiet = TRUE)
data(gasoline, package = "pls")
wavelengths <- seq(900, 1700, by = 2)
gas.odd <- seq(1, nrow(gasoline$NIR), by = 2)
gas.even <- seq(2, nrow(gasoline$NIR), by = 2)
gasolineSC <- gasoline
gasolineSC$NIR <-
scale(gasolineSC$NIR, scale = FALSE,
center = colMeans(gasolineSC$NIR[gas.odd, ]))
gasolineSC.pls <- plsr(octane ~ ., data = gasolineSC, ncomp = 5,
subset = gas.odd, validation = "LOO")
ww <- gasolineSC.pls$loading.weights[, 1]
pp <- gasolineSC.pls$loadings[, 1]
w.ortho <- pp - c(crossprod(ww, pp)/crossprod(ww)) * ww
t.ortho <- gasolineSC$NIR[gas.odd, ] %*% w.ortho
p.ortho <- crossprod(gasolineSC$NIR[gas.odd, ], t.ortho) /
c(crossprod(t.ortho))
Xcorr <- gasolineSC$NIR[gas.odd, ] - tcrossprod(t.ortho, p.ortho)
gasolineSC.osc1 <- data.frame(octane = gasolineSC$octane[gas.odd],
NIR = Xcorr)
gasolineSC.opls1 <- plsr(octane ~ ., data = gasolineSC.osc1,
ncomp = 5, validation = "LOO")
pp2 <- gasolineSC.opls1$loadings[, 1]
w.ortho2 <- pp2 - c(crossprod(ww, pp2)/crossprod(ww)) * ww
t.ortho2 <- Xcorr %*% w.ortho2
p.ortho2 <- crossprod(Xcorr, t.ortho2) / c(crossprod(t.ortho2))
Xcorr2 <- Xcorr - tcrossprod(t.ortho2, p.ortho2)
gasolineSC.osc2 <- data.frame(octane = gasolineSC$octane[gas.odd],
NIR = Xcorr2)
gasolineSC.opls2 <- plsr(octane ~ ., data = gasolineSC.osc2,
ncomp = 5, validation = "LOO")
par(opar)
plot(gasolineSC.pls, "validation", estimate = "CV",
ylim = c(0.2, 1.5),
main = "GasolineSC training data (validation)")
lines(0:5, c(RMSEP(gasolineSC.opls1, estimate = "CV"))$val,
col = 2, lty = 2)
lines(0:5, c(RMSEP(gasolineSC.opls2, estimate = "CV"))$val,
col = 4, lty = 4)
legend("topright", lty = c(1, 2, 4), col = c(1, 2, 4),
legend = c("PLS", "OPLS: 1 OSC component",
"OPLS: 2 OSC components"))
Xtst <- gasolineSC$NIR[gas.even, ]
t.tst <- Xtst %*% w.ortho
p.tst <- crossprod(Xtst, t.tst) / c(crossprod(t.tst))
Xtst.osc1 <- Xtst - tcrossprod(t.tst, p.tst)
gasolineSC.opls1.pred <- predict(gasolineSC.opls1,
newdata = Xtst.osc1,
ncomp = 2)
t.tst2 <- Xtst.osc1 %*% w.ortho2
p.tst2 <- crossprod(Xtst.osc1, t.tst2) / c(crossprod(t.tst2))
Xtst.osc2 <- Xtst.osc1 - tcrossprod(t.tst2, p.tst2)
gasolineSC.opls2.pred <- predict(gasolineSC.opls2,
newdata = Xtst.osc2,
ncomp = 1)
RMSEP(gasolineSC.pls, newdata = gasolineSC[gas.even, ],
ncomp = 3, intercept = FALSE)
rms(gasolineSC$octane[gas.even], gasolineSC.opls1.pred)
rms(gasolineSC$octane[gas.even], gasolineSC.opls2.pred)
require(glmnet, quiet = TRUE)
set.seed(7)
data(spikedApples, package = "BioMark")
X <- sqrt(spikedApples$dataMatrix)
Y <- rep(0:1, each = 10)
apple.df <- data.frame(Y = Y, X = X)
apple.pls <- plsr(Y ~ X, data = apple.df, ncomp = 5,
validation = "LOO")
nPLS <- selectNcomp(apple.pls, method = "onesigma")
apple.lasso <- cv.glmnet(X, Y, family = "binomial")
tvals <- apply(X, 2,
function(x) t.test(x[1:10], x[11:20])$statistic)
allcoefs <-
data.frame(studentt = tvals,
pls = c(coef(apple.pls, ncomp = nPLS)),
lasso = coef(apple.lasso,
lambda = apple.lasso$lambda.1se)[-1])
par(opar)
N <- ncol(spikedApples$dataMatrix)
biom <- spikedApples$biom
nobiom <- (1:N)[-spikedApples$biom]
pairs(allcoefs,
panel = function(x, y, ...) {
abline(h = 0, v = 0, col = "lightgray", lty = 2)
points(x[nobiom], y[nobiom], col = "lightgray")
points(x[biom], y[biom], cex = 2)
})
require(BioMark, quiet = TRUE)
biomarkerSets <-
get.biom(X, factor(Y), type = "coef", ncomp = nPLS,
fmethod = c("studentt", "pls", "lasso"))
set.seed(17)
apple.stab <- get.biom(X = X, Y = factor(Y), ncomp = 1:3,
type = "stab", fmethod = c("pls", "pcr"))
selected.variables <- selection(apple.stab)
unlist(sapply(selected.variables, function(x) sapply(x, length)))
data(shootout)
wl <- seq(600, 1898, by = 2)
indices <- which(wl >= 1100 & wl <= 1700)
nir.training1 <-
data.frame(X = I(shootout$calibrate.1[, indices]),
y = shootout$calibrate.Y[, 3])
mod1 <- plsr(y ~ X, data = nir.training1,
ncomp = 5, validation = "LOO")
RMSEP(mod1, estimate = "CV")
nir.training2 <-
data.frame(X = I(shootout$calibrate.2[, indices]),
y = shootout$calibrate.Y[, 3])
mod2 <- plsr(y ~ X, data = nir.training2,
ncomp = 5, validation = "LOO")
par(opar)
layout(matrix(c(1, 1, 1, 2, 2), nrow = 1))
plot(seq(1100, 1700, by = 2), coef(mod1, ncomp = 3), type = "l",
xlab = "wavelength (nm)", ylab = "model coefficients",
col = 1)
lines(seq(1100, 1700, by = 2), coef(mod2, ncomp = 3), col = "red",
lty = 2, lwd = 2)
legend("top", legend = c("set 1", "set 2"), bty = "n",
col = 1:2, lty = 1:2, lwd = 1:2)
plot(seq(1100, 1700, by = 2), coef(mod1, ncomp = 3), type = "l",
xlab = "wavelength (nm)", ylab = "model coefficients",
col = 1, xlim = c(1600, 1700))
lines(seq(1100, 1700, by = 2), coef(mod2, ncomp = 3), col = "red",
lty = 2, lwd = 2)
RMSEP(mod1, estimate = "test", ncomp = 3, intercept = FALSE,
newdata = data.frame(y = shootout$test.Y[, 3],
X = I(shootout$test.1[, indices])))
RMSEP(mod1, estimate = "test", ncomp = 3, intercept = FALSE,
newdata = data.frame(y = shootout$test.Y[, 3],
X = I(shootout$test.2[, indices])))
recalib.indices <- 1:5 * 10
F1 <- ginv(shootout$calibrate.2[recalib.indices, indices]) %*%
shootout$calibrate.1[recalib.indices, indices]
RMSEP(mod1, estimate = "test", ncomp = 3, intercept = FALSE,
newdata = data.frame(y = shootout$test.Y[, 3],
X = I(shootout$test.2[, indices] %*% F1)))
require(lattice, quiet = TRUE)
levelplot(F1, contour = TRUE)
require(fpc, quiet = TRUE)
aveBhatta <- function(Xmat, batches) {
N <- nlevels(batches)
batch.means <- lapply(levels(batches),
function(btch)
colMeans(Xmat[batches == btch, ]))
batch.covs <- lapply(levels(batches),
function(btch)
cov(Xmat[batches == btch, ]))
alldists <- 0
for (ii in 1:(N-1))
for (jj in (ii+1):N)
alldists <- alldists +
bhattacharyya.dist(batch.means[[jj]], batch.means[[ii]],
batch.covs[[jj]], batch.covs[[ii]])
alldists / (0.5*N*(N-1))
}
par(opar)
Xsample <- arabidopsis
Xsample <- Xsample[, apply(Xsample, 2, function(x) !all(is.na(x)))]
for (i in 1:ncol(Xsample))
Xsample[is.na(Xsample[, i]), i] <- mean(Xsample[, i], na.rm = TRUE)
Xsample <- Xsample[, apply(Xsample, 2, sd, na.rm = TRUE) > 0]
X.PCA <- PCA(scale(Xsample))
pchs <- as.integer(arabidopsis.Y$Batch)
cols <- rep("gray", nrow(Xsample))
cols[arabidopsis.Y$Batch == "B1"] <- "red"
cols[arabidopsis.Y$Batch == "B2"] <- "blue"
cols[arabidopsis.Y$Batch == "B8"] <- "purple"
lcols <- rep("gray", 10)
lcols[c(1, 2, 8)] <- c("red", "blue", "purple")
scoreplot(X.PCA, col = cols, pch = pchs)
legend("bottomright", legend = levels(arabidopsis.Y$Batch),
ncol = 2, pch = 1:nlevels(arabidopsis.Y$Batch), col = lcols)
bhatta.orig <- aveBhatta(scores(X.PCA, 2), arabidopsis.Y$Batch)
title(main = paste("Average between-batch distance:",
round(bhatta.orig, 3)))
ara.df <- cbind(data.frame(X = arabidopsis[, 2]),
arabidopsis.Y)
ref.idx <- ara.df$SCode == "ref"
plot(X ~ SeqNr, data = ara.df, ylim = c(20, 24),
col = as.numeric(ref.idx) + 1,
pch = c(1, 19)[as.numeric(ref.idx) + 1],
xlab = "Injection number", ylab = "Intensity (log-scaled)",
main = paste("Metabolite 2 before correction"))
batch.lims <- aggregate(ara.df$SeqNr,
by = list(ara.df$Batch),
FUN = range)$x
abline(v = batch.lims[-1, 1] - 0.5, lty = 2)
BLines <- lm(X ~ SeqNr * Batch, data = ara.df,
subset = SCode == "ref")
ara.df$QCpredictions <- predict(BLines, newdata = ara.df)
for (ii in levels(ara.df$Batch))
lines(ara.df$SeqNr[ara.df$Batch == ii],
predict(BLines, newdata = ara.df[ara.df$Batch == ii, ]),
col = 2)
ara.df$corrected <-
ara.df$X - ara.df$QCpredictions + mean(ara.df$X)
plot(corrected ~ SeqNr, data = ara.df, ylim = c(20, 24),
col = as.numeric(ref.idx) + 1,
pch = c(1, 19)[as.numeric(ref.idx) + 1],
xlab = "Injection number", ylab = "Intensity (log-scaled)",
main = paste("Metabolite 2 after correction"))
abline(v = batch.lims[-1, 1] - 0.5, lty = 2)
BLines2 <- lm(corrected ~ SeqNr * Batch, data = ara.df,
subset = SCode == "ref")
for (ii in levels(ara.df$Batch))
lines(ara.df$SeqNr[ara.df$Batch == ii],
predict(BLines2, newdata = ara.df[ara.df$Batch == ii, ]),
col = 2)
correctfun <- function(x, seqnr, batch) {
huhn.df <- data.frame(x = x, seqnr = seqnr, batch = batch)
blines <- lm(x ~ seqnr * batch, data = huhn.df)
huhn.df$qcpred <- predict(blines, newdata = huhn.df)
huhn.df$x - huhn.df$qcpred
}
nna.threshold <- 50
x.idx <- apply(arabidopsis, 2, function(x) sum(is.na(x)) < 50)
correctedX <- apply(arabidopsis[, x.idx], 2, correctfun,
seqnr = arabidopsis.Y$SeqNr,
batch = arabidopsis.Y$Batch)
XsampleC <- correctedX
for (i in 1:ncol(XsampleC))
XsampleC[is.na(XsampleC[, i]), i] <- mean(XsampleC[, i], na.rm = TRUE)
Xc.PCA <- PCA(scale(XsampleC))
bhatta.corr <- aveBhatta(scores(Xc.PCA, 2), arabidopsis.Y$Batch)
X.PCA2 <- PCA(scale(Xsample[, x.idx]))
bhatta.origsub <- aveBhatta(scores(X.PCA2, 2), arabidopsis.Y$Batch)
scoreplot(Xc.PCA, col = cols, pch = pchs)
legend("topleft", legend = levels(arabidopsis.Y$Batch),
ncol = 2, pch = 1:nlevels(arabidopsis.Y$Batch), col = lcols)
title(main = paste("Average between-batch distance:",
round(bhatta.corr, 3)))
X <- arabidopsis[, x.idx]
na.idx <- is.na(X)
X[na.idx] <- min(X, na.rm = TRUE)
require(RUVSeq, quiet = TRUE)
idx <- which(arabidopsis.Y$SCode == "ref")
replicates.ind <-
matrix(-1, nrow(X) - length(idx) + 1, length(idx))
replicates.ind[1, ] <- idx
replicates.ind[-1, 1] <- (1:nrow(X))[-idx]
X.RUVcorrected <-
t(RUVs(x = t(X), cIdx = 1:ncol(X), k = 3,
scIdx = replicates.ind, isLog = TRUE)$normalizedCounts)
X.RUVcorrected[na.idx] <- NA
XR <- X.RUVcorrected
for (i in 1:ncol(XR))
XR[is.na(XR[, i]), i] <- mean(XR[, i], na.rm = TRUE)
XRUV.PCA <- PCA(scale(XR))
bhatta.RUV <- aveBhatta(scores(XRUV.PCA, 2), arabidopsis.Y$Batch)
data(bdata)
par(mar = c(0, 3, 0, 0), mfrow = c(1, 2))
persp(bdata$d1, phi = 20, theta = 34, expand = .5,
xlab = "Time", ylab = "Wavelength")
par(mar = c(7.1, 4.5, 4.1, 4.5))
matplot(cbind(c(bdata$sp1), c(bdata$sp2)), type = "l", lty = 1,
xlab = "Wavelength number", ylab = "Intensity")
efa <- function(x, ncomp) {
nx <- nrow(x)
Tos <- Fros <- matrix(0, nx, ncomp)
for (i in 3:nx)
Tos[i, ] <- svd(scale(x[1:i, ], scale = FALSE))$d[1:ncomp]
for (i in (nx-2):1)
Fros[i, ] <- svd(scale(x[i:nx, ], scale = FALSE))$d[1:ncomp]
Combos <- array(c(Tos, Fros[, ncomp:1]), c(nx, ncomp, 2))
list(forward = Tos, backward = Fros,
pure.comp = apply(Combos, c(1, 2), min))
}
par(opar)
par(mfrow = c(1, 2))
X <- bdata$d1
X.efa <- efa(X, 3)
matplot(X.efa$forward, type = "l", ylab = "Singular values", lty=1)
matplot(X.efa$backward, type = "l", ylab = "Singular values", lty=1)
par(opar)
matplot(X.efa$pure.comp, type = "l", ylab = "", lty = 1)
legend("topright", legend = paste("Comp", 1:3),
lty = 1, col = 1:3, bty="n")
require(alsace, quiet = TRUE)
X.opa <- opa(X, 3)
matplot(X.opa, type = "l", col = 1:3, lty = 1,
ylab = "response", xlab = "wavelength number")
legend("topleft", legend = paste("Comp", 1:3), col = 1:3,
lty = 1, bty = "n")
normS <- function(S) {
sweep(S,
1,
apply(S, 1, function(x) sqrt(sum(x^2))),
FUN = "/")
}
getS <- function(data, C) {
normS(ginv(C) %*% data)
}
getC <- function(data, S) {
data %*% ginv(S)
}
mcr <- function(x, init, what = c("row", "col"),
convergence = 1e-8, maxit = 50) {
what <- match.arg(what)
if (what == "col") {
CX <- init
SX <- getS(x, CX)
} else {
SX <- normS(init)
CX <- getC(x, SX)
}
rms <- rep(NA, maxit + 1)
rms[1] <- sqrt(mean((x - CX %*% SX)^2))
for (i in 1:maxit) {
CX <- getC(x, SX)
SX <- getS(x, CX)
resids <- x - CX %*% SX
rms[i+1] <- sqrt(mean(resids^2))
if ((rms[i] - rms[i+1]) < convergence) break;
}
list(C = CX, S = SX, resids = resids, rms = rms[!is.na(rms)])
}
par(mfrow = c(1, 2))
X.mcr.efa <- mcr(X, X.efa$pure.comp, what = "col")
matplot(X.mcr.efa$C, type = "n",
main = "Concentration profiles", ylab = "Concentration")
abline(h = 0, col = "lightgray")
matlines(X.efa$pure.comp * 5, type = "l", lty = 2, col = 1:3)
matlines(X.mcr.efa$C, type = "l", col = 1:3, lty = 1, lwd = 2)
legend("topright", legend = paste("Comp", 1:3),
col = 1:3, lty = 1, bty = "n")
matplot(t(X.mcr.efa$S), col = 1:3, type = "l", lty = 1,
main = "Pure spectra", ylab = "Intensity")
abline(h = 0, col = "lightgray")
legend("topright", legend = paste("Comp", 1:3), lty = 1,
bty = "n", col = 1:3)
X.mcr.opa <- mcr(X, t(X.opa), what = "row")
X.mcr.efa$rms
X.mcr.opa$rms
X.als.efa <- als(CList = list(X.efa$pure.comp),
PsiList = list(X), S = matrix(0, 73, 3),
nonnegS = TRUE, nonnegC = TRUE,
normS = 0.5, uniC = TRUE)
X.als.opa <- als(CList = list(matrix(0, 40, 3)),
PsiList = list(X), S = X.opa,
nonnegS = TRUE, nonnegC = TRUE,
optS1st = FALSE, normS = 0.5, uniC = TRUE)
par(mfrow = c(2, 2))
matplot(X.als.efa$S, type = "n", main = "Pure spectra (EFA)",
ylab = "Intensity")
abline(h = 0, col = "lightgray")
matlines(X.als.efa$S, type = "l", lty = 1, col = 1:3)
legend("topright", legend = paste("Comp", 1:3), lty = 1,
bty = "n", col = 1:3)
matplot(X.als.efa$CList[[1]], type = "n",
main = "Concentration profiles (EFA)",
ylab = "Concentration")
abline(h = 0, col = "lightgray")
matlines(X.als.efa$CList[[1]], lty = 1, col = 1:3)
matplot(X.als.opa$S, type = "n", main = "Pure spectra (OPA)",
ylab = "Intensity")
abline(h = 0, col = "lightgray")
matlines(X.als.opa$S, type = "l", lty = 1, col = 1:3)
legend("topright", legend = paste("Comp", 1:3), lty = 1,
bty = "n", col = 1:3)
matplot(X.als.opa$CList[[1]], type = "n",
main = "Concentration profiles (OPA)",
ylab = "Concentration")
abline(h = 0, col = "lightgray")
matlines(X.als.opa$CList[[1]], lty = 1, col = 1:3)
C0 <- matrix(0, 40, 3)
X2.als.opa <-
als(CList = list(C0, C0),
PsiList = list(bdata$d1, bdata$d2),
S = X.opa, normS = 0.5,
nonnegS = TRUE, nonnegC = TRUE,
optS1st = FALSE, uniC = TRUE)
cor(X.als.opa$S, cbind(c(bdata$sp1), c(bdata$sp2)))
cor(X2.als.opa$S, cbind(c(bdata$sp1), c(bdata$sp2)))
|
2cc30a97c4ee1049bf2953986f4aa751572b4f0e
|
996ec6041006c378c197f4b40ac8be4d06fdcd82
|
/web_scrapping_zomato_mumbai.R
|
5bd013807ddc58a2410ae45b576e94d56f3463ad
|
[] |
no_license
|
akash1182/R
|
fc227d58e4378d244d72da810e26d35e98b641bf
|
282ab5183e32f2f80d095d73b271a3a48a68e285
|
refs/heads/main
| 2023-01-09T12:04:56.653768
| 2020-11-06T13:20:39
| 2020-11-06T13:20:39
| 310,593,013
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,065
|
r
|
web_scrapping_zomato_mumbai.R
|
library(robotstxt)
p = paths_allowed("https://www.zomato.com/pune/order-food-online?delivery_subzone=1165")
#reading the website
library(rvest)
web = read_html("https://www.zomato.com/pune/order-food-online?delivery_subzone=1165")
View(web)
#importing dplyr library to work with dataframes
library(dplyr)
#reading names of restaurants
name = web%>%html_nodes(".ln20")%>%html_text()
View(name)
#reading ratings
ratings = web%>%html_nodes(".rating-value")%>%html_text()
View(ratings)
#reading cuisines
cuisines = web%>%html_nodes(".clear+ .ln24")%>%html_text()
View(cuisines)
#reading average cost for 1 person
cost_for_one = web%>%html_nodes(".ln24+ .grey-text")%>%html_text()
View(cost_for_one)
#reading minimum cost and delivery time
min_cost_delivery_time = web%>%html_nodes(".ln24~ .grey-text+ .ln24")%>%html_text()
View(min_cost_delivery_time)
#reading payment methods
payment_methods = web%>%html_nodes(".ln24:nth-child(5)")%>%html_text()
View(payment_methods)
#reading total number of reviews
total_reviews = web%>%html_nodes("#orig-search-list .medium")%>%html_text()
View(total_reviews)
#creating a dataframe and concatinating the above lists
zomato_mumbai_df = data.frame(name, cuisines, ratings, total_reviews,cost_for_one, min_cost_delivery_time, payment_methods)
View(zomato_mumbai_df)
#analyze
dim(zomato_mumbai_df)
str(zomato_mumbai_df)
zomato_mumbai_df$cost_for_one = gsub("\\Cost ???|\\ for one","",zomato_mumbai_df$cost_for_one)
View(zomato_mumbai_df)
zomato_mumbai_df$total_reviews = gsub("\\(|\\)","",zomato_mumbai_df$total_reviews)
zomato_mumbai_df$total_reviews = gsub("\\.|\\ Reviews","",zomato_mumbai_df$total_reviews)
zomato_mumbai_df$total_reviews = gsub("K","000",zomato_mumbai_df$total_reviews)
zomato_mumbai_df$total_reviews = gsub(",","",zomato_mumbai_df$total_reviews)
zomato_mumbai_df$payment_methods = gsub("\\Accepts |\\ payments only","",zomato_mumbai_df$payment_methods)
zomato_mumbai_df$payment_methods = gsub("payments","",zomato_mumbai_df$payment_methods)
|
ddbbf583a3c24261652fc903a2040cffc2b48c34
|
16dd0786662a4e84610742932b38917092ee20c7
|
/R/matrix.shuffle.r
|
95ceeb347dc0ba5ca84728b1852131317c092683
|
[] |
no_license
|
th86/gislkit
|
33ceab874425c0890ce0c24595e83eaf0bb160b5
|
0b40e769e811a2f4fe5e4e16fce88b550e0580aa
|
refs/heads/master
| 2021-01-17T07:49:58.925235
| 2018-10-09T00:36:30
| 2018-10-09T00:36:30
| 21,303,685
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 371
|
r
|
matrix.shuffle.r
|
#Shuffle a matrix by label or by each feature
matrix.shuffle<-function(covar,bylabel=FALSE){
M<-matrix(0,nrow(covar), ncol(covar))
rownames(M)<-rownames(covar)
colnames(M)<-colnames(covar)
if(bylabel==TRUE){
M<-M[sample.int(nrow(M)),]
}else{
for( protein.itr in 1:ncol(covar))
M[,protein.itr]<-covar[sample.int(nrow(M)), protein.itr]
}
return(M)
}
|
c5a7bf00cabee6d0d06267d9daa8f9b0bf4bb9e6
|
1486b7de41ac7a9882ecd92722d886802d49f9aa
|
/man/topo2.Rd
|
f9e4dc7b0bf70017233d3ee37c5fc54d38f7f6d0
|
[] |
no_license
|
richardsc/ocedata
|
4f155f7ad72ac90a42c6003fb316c5e41b7e5a8b
|
ad804a151f3d50ea41df6f7cfd401e0446ddac6a
|
refs/heads/master
| 2021-01-01T17:58:48.937286
| 2017-07-24T16:20:25
| 2017-07-24T16:20:25
| 98,209,705
| 0
| 0
| null | 2017-07-24T16:08:07
| 2017-07-24T16:08:07
| null |
UTF-8
|
R
| false
| false
| 1,004
|
rd
|
topo2.Rd
|
\name{topo2}
\docType{data}
\alias{topo2}
\title{World topography data, on a 2-degree grid.}
\description{A matrix containing world topography data, on a 2-degree grid.
This is provided for occasions where the higher resolution topography in
\link[oce]{topoWorld} is not needed. See \dQuote{Examples} for a plot that
illustrates the longitude and latitude grid for the data.}
\usage{data(topo2)}
\examples{
\dontrun{
# Compare with topoWorld in oce
library("oce")
data(topoWorld, package="oce")
w <- topoWorld
contour(w[['longitude']], w[['latitude']], w[['z']], level=0, drawlabels=FALSE)
data(topo2, package="ocedata")
lon <- seq(-179.5, 178.5, by=2)
lat <- seq(-89.5, 88.5, by=2)
contour(lon, lat, topo2, level=0, add=TRUE, col='red', lty='dotted', drawlabels=FALSE)
}
}
\source{The data are calculated by applying \code{\link[oce]{decimate}} to the
\code{topoWorld} dataset from the \code{oce} package, followed by
extraction of the \code{"z"} value.}
\keyword{datasets}
|
c06987abacfc3a3bd671f3e60b5edde1f1f2075f
|
b8a9e9b97cde8c739d40558870988e4d8adf4262
|
/analysis_sub3/plot_result_delta_missing.r
|
8998754becc5aa7027b7e484b8200a705e599bc7
|
[] |
no_license
|
GuanLab/phosphoproteome_prediction
|
7bdcd5d5b4feed63aa30bab0313fc1c0afc07e16
|
8ee9bc130de85b3cef1c5e2e3dd2b365d351229a
|
refs/heads/master
| 2020-04-19T14:54:39.181792
| 2019-07-25T23:22:56
| 2019-07-25T23:22:56
| 168,258,057
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,351
|
r
|
plot_result_delta_missing.r
|
library(ggplot2)
library(reshape2)
source("~/function/my_palette.r")
source("~/function/multiplot.R")
# delta-correlation (improvement after multisite)
b1=read.delim("../sub3/prediction/breast/multisite/cor_nrmse_b1.txt",header=F,row.names=1)
b2=read.delim("../sub3/prediction/breast/multisite/cor_nrmse_b2.txt",header=F,row.names=1)
b3=read.delim("../sub3/prediction/breast/multisite/cor_nrmse_b3.txt",header=F,row.names=1)
b4=read.delim("../sub3/prediction/breast/multisite/cor_nrmse_b4.txt",header=F,row.names=1)
b5=read.delim("../sub3/prediction/breast/multisite/cor_nrmse_b5.txt",header=F,row.names=1)
mat1=cbind(b1[,1],b2[,1],b3[,1],b4[,1],b5[,1])
mat3=cbind(b1[,2],b2[,2],b3[,2],b4[,2],b5[,2])
b1=read.delim("../sub3/prediction/breast/individual_transplant/cor_nrmse_b1.txt",header=F,row.names=1)
b2=read.delim("../sub3/prediction/breast/individual_transplant/cor_nrmse_b2.txt",header=F,row.names=1)
b3=read.delim("../sub3/prediction/breast/individual_transplant/cor_nrmse_b3.txt",header=F,row.names=1)
b4=read.delim("../sub3/prediction/breast/individual_transplant/cor_nrmse_b4.txt",header=F,row.names=1)
b5=read.delim("../sub3/prediction/breast/individual_transplant/cor_nrmse_b5.txt",header=F,row.names=1)
mat2=cbind(b1[,1],b2[,1],b3[,1],b4[,1],b5[,1])
mat4=cbind(b1[,2],b2[,2],b3[,2],b4[,2],b5[,2])
cor1=apply(mat1-mat2,1,mean,na.rm=T)
rmse1=apply(mat3-mat4,1,mean,na.rm=T)
mean(cor1,na.rm=T) #[1] 0.01384422
mean(rmse1,na.rm=T) #[1] -0.001436225
o1=read.delim("../sub3/prediction/ova/multisite/cor_nrmse_o1.txt",header=F,row.names=1)
o2=read.delim("../sub3/prediction/ova/multisite/cor_nrmse_o2.txt",header=F,row.names=1)
o3=read.delim("../sub3/prediction/ova/multisite/cor_nrmse_o3.txt",header=F,row.names=1)
o4=read.delim("../sub3/prediction/ova/multisite/cor_nrmse_o4.txt",header=F,row.names=1)
o5=read.delim("../sub3/prediction/ova/multisite/cor_nrmse_o5.txt",header=F,row.names=1)
mat1=cbind(o1[,1],o2[,1],o3[,1],o4[,1],o5[,1])
mat3=cbind(o1[,2],o2[,2],o3[,2],o4[,2],o5[,2])
o1=read.delim("../sub3/prediction/ova/individual_transplant/cor_nrmse_o1.txt",header=F,row.names=1)
o2=read.delim("../sub3/prediction/ova/individual_transplant/cor_nrmse_o2.txt",header=F,row.names=1)
o3=read.delim("../sub3/prediction/ova/individual_transplant/cor_nrmse_o3.txt",header=F,row.names=1)
o4=read.delim("../sub3/prediction/ova/individual_transplant/cor_nrmse_o4.txt",header=F,row.names=1)
o5=read.delim("../sub3/prediction/ova/individual_transplant/cor_nrmse_o5.txt",header=F,row.names=1)
mat2=cbind(o1[,1],o2[,1],o3[,1],o4[,1],o5[,1])
mat4=cbind(o1[,2],o2[,2],o3[,2],o4[,2],o5[,2])
cor2=apply(mat1-mat2,1,mean,na.rm=T)
rmse2=apply(mat3-mat4,1,mean,na.rm=T)
mean(cor2,na.rm=T) #[1] 0.04051451
mean(rmse2,na.rm=T) #[1] -0.004932747
# number of missing
b=as.matrix(read.delim("../sub3/data/trimmed_set/breast_phospho.txt",check.names=F,row.names=1))
o=as.matrix(read.delim("../sub3/data/trimmed_set/ova_phospho.txt",check.names=F,row.names=1))
num_miss1=apply(b,1,function(x){sum(is.na(x))})
num_miss2=apply(o,1,function(x){sum(is.na(x))})
# weak correlation
cor(cor1,num_miss1,use="pairwise.complete.obs") #[1] 0.2228953 p-value < 2.2e-16
cor(cor2,num_miss2,use="pairwise.complete.obs") #[1] 0.164645 p-value < 2.2e-16
avg_cor=c(mean(cor1[num_miss1>0 & num_miss1<=10],na.rm=T),
mean(cor1[num_miss1>10 & num_miss1<=20],na.rm=T),
mean(cor1[num_miss1>20 & num_miss1<=30],na.rm=T),
mean(cor1[num_miss1>30 & num_miss1<=40],na.rm=T),
mean(cor1[num_miss1>40 & num_miss1<=50],na.rm=T),
mean(cor1[num_miss1>50 & num_miss1<=60],na.rm=T),
mean(cor1[num_miss1>60 & num_miss1<=70],na.rm=T),
mean(cor1[num_miss1>70 & num_miss1<=80],na.rm=T),
mean(cor1[num_miss1>80 & num_miss1<=90],na.rm=T),
mean(cor1[num_miss1>90 & num_miss1<=100],na.rm=T))
dat=data.frame(x=seq(10,100,10),y=avg_cor)
p1=ggplot(data=dat, aes(x, y)) +
geom_bar(stat="identity",color=p_jco[6],fill=p_jco[6]) +
theme_light() +
ylim(0,0.1) +
geom_segment(aes(x = 5, y = 0.0138, xend = 105, yend = 0.0138), colour = p_jco[2], linetype=2, size=1) +
theme(plot.title = element_text(hjust = 0.5)) + # hjust=0.5 set the title in the center
labs(x='number of missing values',y="delta correlation",title="breast") + # customize titles
scale_x_continuous(breaks=seq(10,100,10),labels=c("(0,10]","(10,20]","(20,30]","(30,40]","(40,50]",
"(50,60]","(60,70]","(70,80]","(80,90]","(90,100]")) +
annotate("text", x = seq(10,100,10), y = 0.1, label=format(dat[,"y"],digits=2),size=3)
p1
avg_cor=c(mean(cor2[num_miss2>0 & num_miss2<=10],na.rm=T),
mean(cor2[num_miss2>10 & num_miss2<=20],na.rm=T),
mean(cor2[num_miss2>20 & num_miss2<=30],na.rm=T),
mean(cor2[num_miss2>30 & num_miss2<=40],na.rm=T),
mean(cor2[num_miss2>40 & num_miss2<=50],na.rm=T),
mean(cor2[num_miss2>50 & num_miss2<=60],na.rm=T),
mean(cor2[num_miss2>60 & num_miss2<=70],na.rm=T))
dat=data.frame(x=seq(10,70,10),y=avg_cor)
p2=ggplot(data=dat, aes(x, y)) +
geom_bar(stat="identity",color=p_jco[6],fill=p_jco[6]) +
theme_light() +
ylim(0,0.15) +
geom_segment(aes(x = 5, y = 0.0405, xend = 75, yend = 0.0405), colour = p_jco[2], linetype=2, size=1) +
theme(plot.title = element_text(hjust = 0.5)) + # hjust=0.5 set the title in the center
labs(x='number of missing values',y="delta correlation",title="ovary") + # customize titles
scale_x_continuous(breaks=seq(10,70,10),labels=c("(0,10]","(10,20]","(20,30]","(30,40]","(40,50]","(50,60]","(60,70]")) + # x-axis label
annotate("text", x = seq(10,70,10), y = 0.15, label=format(dat[,"y"],digits=2),size=3)
p2
# weak correlation
cor.test(rmse1,num_miss1,use="pairwise.complete.obs") #[1] -0.2672205 p-value < 2.2e-16
cor.test(rmse2,num_miss2,use="pairwise.complete.obs") #[1] -0.202962 p-value < 2.2e-16
avg_rmse=c(mean(rmse1[num_miss1>0 & num_miss1<=10],na.rm=T),
mean(rmse1[num_miss1>10 & num_miss1<=20],na.rm=T),
mean(rmse1[num_miss1>20 & num_miss1<=30],na.rm=T),
mean(rmse1[num_miss1>30 & num_miss1<=40],na.rm=T),
mean(rmse1[num_miss1>40 & num_miss1<=50],na.rm=T),
mean(rmse1[num_miss1>50 & num_miss1<=60],na.rm=T),
mean(rmse1[num_miss1>60 & num_miss1<=70],na.rm=T),
mean(rmse1[num_miss1>70 & num_miss1<=80],na.rm=T),
mean(rmse1[num_miss1>80 & num_miss1<=90],na.rm=T),
mean(rmse1[num_miss1>90 & num_miss1<=100],na.rm=T))
dat=data.frame(x=seq(10,100,10),y=avg_rmse)
p3=ggplot(data=dat, aes(x, y)) +
geom_bar(stat="identity",color=p_jco[6],fill=p_jco[6]) +
theme_light() +
ylim(0.003,-0.03) +
geom_segment(aes(x = 5, y = -0.00144, xend = 105, yend =-0.00144), colour = p_jco[2], linetype=2, size=1) +
theme(plot.title = element_text(hjust = 0.5)) + # hjust=0.5 set the title in the center
labs(x='number of missing values',y="delta NRMSE",title="breast") + # customize titles
scale_x_continuous(breaks=seq(10,100,10),labels=c("(0,10]","(10,20]","(20,30]","(30,40]","(40,50]",
"(50,60]","(60,70]","(70,80]","(80,90]","(90,100]")) +
annotate("text", x = seq(10,100,10), y = -0.029, label=format(dat[,"y"],digits=2),size=3)
p3
avg_rmse=c(mean(rmse2[num_miss2>0 & num_miss2<=10],na.rm=T),
mean(rmse2[num_miss2>10 & num_miss2<=20],na.rm=T),
mean(rmse2[num_miss2>20 & num_miss2<=30],na.rm=T),
mean(rmse2[num_miss2>30 & num_miss2<=40],na.rm=T),
mean(rmse2[num_miss2>40 & num_miss2<=50],na.rm=T),
mean(rmse2[num_miss2>50 & num_miss2<=60],na.rm=T),
mean(rmse2[num_miss2>60 & num_miss2<=70],na.rm=T))
dat=data.frame(x=seq(10,70,10),y=avg_rmse)
p4=ggplot(data=dat, aes(x, y)) +
geom_bar(stat="identity",color=p_jco[6],fill=p_jco[6]) +
theme_light() +
ylim(0,-0.03) +
geom_segment(aes(x = 5, y = -0.00493, xend = 75, yend = -0.00493), colour = p_jco[2], linetype=2, size=1) +
theme(plot.title = element_text(hjust = 0.5)) + # hjust=0.5 set the title in the center
labs(x='number of missing values',y="delta NRMSE",title="ovary") + # customize titles
scale_x_continuous(breaks=seq(10,70,10),labels=c("(0,10]","(10,20]","(20,30]","(30,40]","(40,50]","(50,60]","(60,70]")) + # x-axis label
annotate("text", x = seq(10,70,10), y = -0.028, label=format(dat[,"y"],digits=2),size=3)
p4
pdf(file="figure/result_delta_missing.pdf",width=12,height=6,useDingbats=F)
list_p=c(list(p1),list(p2),list(p3),list(p4))
mat_layout=matrix(1:4,nrow=2,byrow=T)
multiplot(plotlist=list_p,layout = mat_layout)
dev.off()
|
0e59e7c8e2374151aa354d79f71752a4fbccac5e
|
dd0228372735d2f34f1a022202f1df427c94cbd2
|
/FSTR04.build.FSdb.R
|
4e9860cda411be585d296255da04eb99cd4aac61
|
[] |
no_license
|
poyuliu/metaomics
|
90ced66649a1702253c8d2605ea6a3ab3f38ae5b
|
505170607bcfb9d972514bbcad11186f13953d91
|
refs/heads/master
| 2021-05-12T05:50:38.856703
| 2018-01-13T15:26:34
| 2018-01-13T15:26:34
| 117,205,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,449
|
r
|
FSTR04.build.FSdb.R
|
rm(list=ls())
setwd("~/FS_transcriptome/GSEA/")
load("2016-01-19/FSTR02.SIG.RData")
library(gage)
# Load KEGG KO sets
kegg.gs <- kegg.gsets(species = "ko", id.type = "kegg") # "ko":reference dataset
kegg.met <- kegg.gs$kg.sets[kegg.gs$met.idx] # metabolic pathway set
kegg.gs <- kegg.gs[[1]] # all pathways, without subset indice
#####20160905#####
## output K IDs by pathways
c1 <- substr(khier[which(khier[,1]=="Metabolism"),3],1,5)
c2 <- substr(khier[which(khier[,1]=="Genetic Information Processing"),3],1,5)
c3 <- substr(khier[which(khier[,1]=="Environmental Information Processing"),3],1,5)
c4 <- substr(khier[which(khier[,1]=="Cellular Processes"),3],1,5)
kegg.gs <- kegg.gsets(species = "ko", id.type = "kegg") # "ko":reference dataset
kegg.met <- kegg.gs$kg.sets[substr(names(kegg.gs$kg.sets),3,7) %in% c1]# metabolic pathway set
kegg.gip <- kegg.gs$kg.sets[substr(names(kegg.gs$kg.sets),3,7) %in% c2] # Genetic Information Processing
kegg.eip <- kegg.gs$kg.sets[substr(names(kegg.gs$kg.sets),3,7) %in% c3] # Environmental Information Processing
kegg.cep <- kegg.gs$kg.sets[substr(names(kegg.gs$kg.sets),3,7) %in% c4] # Cellular Processes
kegg.gs <- kegg.gs[[1]] # all pathways, without subset indice
save(kegg.gs,kegg.met,kegg.gip,kegg.eip,kegg.cep,file="~/FS_transcriptome/GSEA/kegg_path_20160905.RData")
#####20160905#####
# Gut compartmental K ID libraries
# QUALITATIVE!!!
lib.cc <- rownames(GC.mean)[which(GC.mean$CC!=0)]
lib.lc <- rownames(GC.mean)[which(GC.mean$LC!=0)]
lib.sc <- rownames(GC.mean)[which(GC.mean$SC!=0)]
# QUANTITATIVE >>> threshold??
# venn diagram
# library(VennDiagram)
# venn.diagram(x=list(Cecum=lib.cc,LargeIntestine=lib.lc,SmallIntestine=lib.sc),filename="ts_venn.tiff",
# fill=c("cornflowerblue","green","yellow"),alpha = 0.50,imagetype = "tiff",
# fontfamily = "serif", fontface = "bold",cat.cex = 1.5,cat.fontfamily = "serif")
# DBs with all pathway sets (signalling, metabolic, disease pathways)
# Cecum
db.cc <- kegg.gs
for(i in 1:length(db.cc)){
db.cc[[i]] <- db.cc[[i]][which(db.cc[[i]] %in% lib.cc)]
}
# Large intestine
db.lc <- kegg.gs
for(i in 1:length(db.lc)){
db.lc[[i]] <- db.lc[[i]][which(db.lc[[i]] %in% lib.lc)]
}
# Small intestine
db.sc <- kegg.gs
for(i in 1:length(db.sc)){
db.sc[[i]] <- db.sc[[i]][which(db.sc[[i]] %in% lib.sc)]
}
# Save FSdb.GS
if(paste0("./",Sys.Date()) %in% list.dirs()){
print("Saving folder does exist!")
save(db.cc,db.lc,db.sc,file=paste0(Sys.Date(),"/FSdb.GS.RData"))
} else{
print("Saving folder does not exist!")
system(paste("mkdir",Sys.Date()))
save(db.cc,db.lc,db.sc,file=paste0(Sys.Date(),"/FSdb.GS.RData"))
}
# DBs with Metabolic pathway set
# Cecum
met.db.cc <- kegg.met
for(i in 1:length(met.db.cc)){
met.db.cc[[i]] <- met.db.cc[[i]][which(met.db.cc[[i]] %in% lib.cc)]
}
# Large intestine
met.db.lc <- kegg.met
for(i in 1:length(met.db.lc)){
met.db.lc[[i]] <- met.db.lc[[i]][which(met.db.lc[[i]] %in% lib.lc)]
}
# Small intestine
met.db.sc <- kegg.met
for(i in 1:length(met.db.sc)){
met.db.sc[[i]] <- met.db.sc[[i]][which(met.db.sc[[i]] %in% lib.sc)]
}
# Save FSdb.MET
if(paste0("./",Sys.Date()) %in% list.dirs()){
print("Saving folder does exist!")
save(met.db.cc,met.db.lc,met.db.sc,file=paste0(Sys.Date(),"/FSdb.MET.RData"))
} else{
print("Saving folder does not exist!")
system(paste("mkdir",Sys.Date()))
save(met.db.cc,met.db.lc,met.db.sc,file=paste0(Sys.Date(),"/FSdb.MET.RData"))
}
|
405ba4cbc15f74d2e5838f90323d6f57d63de107
|
6dfa40f0b4ca611b22562ab4b8561a4a2a6929d7
|
/R/blbglm.R
|
3210b54fda0002684f1669a27c4e5b932a118360
|
[
"MIT"
] |
permissive
|
McChickenNuggets/blblm
|
28566d8b5c0943ecf5d771fbccb6d47947bd24b3
|
3ff530a0da028624d005bf9259cf0d841b8b54ba
|
refs/heads/master
| 2023-03-22T02:01:42.557457
| 2021-03-14T10:10:49
| 2021-03-14T10:10:49
| 347,285,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,034
|
r
|
blbglm.R
|
#' bag of little bootstrap for logistic regression model
#'
#' @param formula formula to use in blbglm
#' @param data data
#' @param m number of splits
#' @param B number of boostraps
#' @param family glm family to specify
#' @param Parallel boolean value to specify whether to use parallelization
#'
#' @return blbglm object
#' @export
#'
#' @examples
#' blbglm(Species ~ Sepal.Length * Sepal.Width, iris[1:100,], 3, 100, family = binomial)
#' blbglm(Species ~ Sepal.Length * Sepal.Width, iris[1:100,], 3, 100, binomial, TRUE)
blbglm <- function(formula, data, m = 10, B = 5000, family, Parallel = FALSE){
data_list<-split_data(data,m)
if(Parallel){
estimates <- future_map(
data_list,
~ glm_each_subsample(formula = formula, data = ., n = nrow(.), B = B , family)
)
}else{
estimates <- map(
data_list,
~ glm_each_subsample(formula = formula, data = ., n = nrow(.), B = B ,family)
)
}
res <- list(estimates = estimates, formula = formula)
class(res) <- "blbglm"
invisible(res)
}
|
594c04a92d29da6e8a5cb1bf30f648bf4eeadb0c
|
604924682e450b6911e6852f785e57542a6b4615
|
/man/format_console.Rd
|
b414c27752a971c6fe6ab799b50fdfb1f0d72375
|
[] |
no_license
|
tonyxv/mischelper
|
fbd2ebadb256b4f13dffe70fcd77da620f11b842
|
6cc79308b8f6ab8b9b8281f6d2b4152a109ff15f
|
refs/heads/master
| 2020-04-18T06:02:52.372256
| 2018-01-12T16:02:55
| 2018-01-12T16:02:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 357
|
rd
|
format_console.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{format_console}
\alias{format_console}
\title{Format console}
\usage{
format_console()
}
\description{
read console input and output from clipboard, format as script
}
\details{
Formated script is written back to clipboard, and inserted to current cursor
location
}
|
5188a9cbed8ece47ab7de2fc0297954cc5df18d1
|
bdfeda061e183e96cdb4a82896578592b01db3ff
|
/Course 2 - Data Analytics Predict Customer Preferences 2017.3/Task 3.R
|
5de507717473409ee4916fc09bcaf9e247d0550e
|
[] |
no_license
|
asid72/UT-Big-Data-and-Data-Analytics-Program
|
a5deed8ad5889902bff34b47ff8b5c8eb3445fbf
|
43ea862786715134776a6ab101af34b085c8b96b
|
refs/heads/master
| 2022-10-13T10:49:12.304332
| 2020-06-09T14:42:27
| 2020-06-09T14:42:27
| 271,025,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,790
|
r
|
Task 3.R
|
# get working directory
getwd()
# set working directory
setwd("C:\\Users\\asiddiqui\\Documents\\UT Data Analytics Course\\Course Weeks\\Course 2 - Data Analytics Predict Customer Preferences 2017.3\\Task 3\\")
dir()
# set a value for seed (to be used in the set.seed function)
seed <- 123
################
# Load packages
################
install.packages("caret")
install.packages("corrplot")
install.packages("readr")
library(caret)
library(corrplot)
#library(doMC)
#library(doParallel)
library(mlbench)
library(readr)
#####################
# Parallel processing
#####################
# NOTE: Be sure to use the correct package for your operating system.
#--- for WIN ---#
install.packages("doParallel") # install in 'Load packages' section above
library(doParallel) # load in the 'Load Packages' section above
detectCores() # detect number of cores
cl <- makeCluster(2) # select number of cores; 2 in this example
registerDoParallel(cl) # register cluster
getDoParWorkers() # confirm number of cores being used by RStudio
# Stop Cluster. After performing your tasks, make sure to stop your cluster.
stopCluster(cl)
###############
# Import data
##############
#### --- Load raw datasets --- ####
# --- Load Train/Existing data (Dataset 1) --- #
existingproduct <- read.csv("existingproductattributes2017.csv", stringsAsFactors = FALSE)
class(existingproduct) # "data.frame"
str(existingproduct)
# --- Load Predict/New data (Dataset 2) --- #
newproductattributes <- read.csv("newproductattributes2017.csv", stringsAsFactors = FALSE)
class(newproductattributes) # "data.frame"
str(newproductattributes)
################
# Evaluate data
################
#--- Dataset 1 ---#
str(existingproduct) #80 obs. of 18 variables:
names(existingproduct)
summary(existingproduct)
head(existingproduct)
tail(existingproduct)
# plot
hist(existingproduct$Volume)
#plot(WholeYear$TimeofDay, WholeYear$SolarRad)
#qqnorm(WholeYear$SolarRad)
# check for missing values
anyNA(existingproduct)
is.na(existingproduct)
summary(existingproduct)
#--- Dataset 2 ---#
str(newproductattributes) #24 obs. of 18 variables:
names(newproductattributes)
summary(newproductattributes)
head(newproductattributes)
tail(newproductattributes)
# plot
hist(newproductattributes$Volume)
#plot(WholeYear$TimeofDay, WholeYear$SolarRad)
#qqnorm(WholeYear$SolarRad)
# check for missing values
anyNA(newproductattributes)
is.na(newproductattributes)
summary(newproductattributes)
#############
# Preprocess
#############
#--- Dataset 1 ---#
#dumfiy the data
#Dummy variables for ProductType
newDataFrame <- dummyVars(" ~ .", data = existingproduct)
existingproduct_readyData <- data.frame(predict(newDataFrame, newdata = existingproduct))
str(existingproduct_readyData)
# remove obvious features (e.g., ID, other)
existingproductDV <- existingproduct_readyData # make a copy
existingproductDV$ProductNum <- NULL # remove ProductNum since ID
existingproductDV$BestSellersRank <- NULL # remove BestSellerRank since including NAs
str(existingproductDV) # 80 obs. of 27 variables:
summary(existingproductDV)
# save preprocessed dataset
write.csv(existingproductDV,file="existingproductDV.csv")
#--- Dataset 2 ---#
#dumfiy the data
#Dummy variables for ProductType
newDataFrame2 <- dummyVars(" ~ .", data = newproductattributes)
newproductattributes_readyData <- data.frame(predict(newDataFrame2, newdata = newproductattributes))
str(newproductattributes_readyData)
# remove obvious features (e.g., ID, other)
newproductattributesDV <- newproductattributes_readyData # make a copy
newproductattributesDV$ProductNum <- NULL # remove ProductNum since ID
newproductattributesDV$BestSellersRank <- NULL # remove BestSellerRank since including NAs
str(newproductattributesDV) # 24 obs. of 27 variables:
summary(newproductattributesDV)
# save preprocessed dataset
write.csv(newproductattributesDV,file="newproductattributesDV.csv")
###################
#Correlation Matrix
###################
#--- Dataset 1 ---#
corExistingProduct <- cor(existingproductDV)
corExistingProduct
corrplot(corExistingProduct)
corrplot(corExistingProduct, type="upper")
corrplot(corExistingProduct,method="number")
corrplot(corExistingProduct,method="number",type = "upper",number.digits = 2,number.cex=0.70, tl.cex=0.70)
# Remove any Feature highly correlated to the Dependent Variable
# 0.95
existingproductDV$x5StarReviews <- NULL
# Remove any idependent features that are highly correlated
#0.90
existingproductDV$x1StarReviews <- NULL
existingproductDV$x3StarReviews <- NULL
str(existingproductDV)
#--- Dataset 2 ---#
corExistingProduct2 <- cor(newproductattributesDV)
corExistingProduct2
corrplot(corExistingProduct2, type="upper")
corrplot(corExistingProduct2,method="number",type = "upper",number.digits = 2,number.cex=0.40, tl.cex=0.30)
# Remove any Feature highly correlated to the Dependent Variable
# 0.95
newproductattributesDV$x5StarReviews <- NULL
# Remove any idependent features that are highly correlated
#0.90
newproductattributesDV$x1StarReviews <- NULL
newproductattributesDV$x3StarReviews <- NULL
str(newproductattributesDV)
##################
# Train/test sets
##################
# create the training partition that is 75% of total obs
set.seed(seed) # set random seed
inTraining <- createDataPartition(existingproductDV$Volume, p=0.75, list=FALSE)
# create training/testing dataset
trainSet <- existingproductDV[inTraining,]
testSet <- existingproductDV[-inTraining,]
# verify number of obs
nrow(trainSet)
nrow(testSet)
################
# Train control
################
# set 10 fold cross validation
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
##############
# Train model
##############
?modelLookup()
modelLookup("rf")
## ------- LM ------- ##
# LM train/fit
set.seed(seed)
#lmFit1 <- train(SolarRad~., data=trainSet, method="leapSeq", trControl=fitControl)
lmFit1 <- train(Volume~., data=trainSet, method="lm", trControl=fitControl)
lmFit1
# Evaluate performance metrics, but don't add as comments to script file. Performance
# metrics will be added as comments to the script file in the Evaluate models below
# evaluate var importance
varImp(lmFit1)
## ------- RF ------- ##
# RF train/fit
set.seed(seed)
system.time(rfFit1 <- train(Volume~., data=trainSet, method="rf", importance=T, trControl=fitControl)) #importance is needed for varImp
rfFit1
varImp(rfFit1)
## ------- SVM ------- ##
# SVM train/fit
set.seed(seed)
svmFit1 <- train(Volume~., data=trainSet, method="svmLinear", trControl=fitControl)
svmFit1
varImp(svmFit1)
summary(svmFit1)
## ------- GBM --------##
# GBM train/fit
set.seed(seed)
gbmFit1 <- train(Volume~., data=trainSet, method="gbm", trControl=fitControl)
gbmFit1
varImp(gbmFit1)
summary(gbmFit1)
# C.5
#C50Fit <- train(brand~., data = trainSet, method = "C5.0", trControl=fitControl, tuneLength = 1)
#rfFit
#C50Fit
#################
# Evaluate models
#################
##--- Compare models ---##
# use resamples to compare model performance
ModelFitResults1k <- resamples(list(lm=lmFit1, rf=rfFit1, svm=svmFit1,gbm=gbmFit1))
# output summary metrics for tuned models
summary(ModelFitResults1k)
# Results:
# RMSE
# Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# lm 140.14184 427.7932 470.3557 730.4873 678.9288 2769.594 0
# rf 35.70961 154.6262 297.2972 785.2678 919.9273 3147.148 0
# svm 410.24460 484.6206 738.8094 999.2661 1029.3892 3019.513 0
# gbm 337.97734 365.4316 414.8010 941.9466 635.7744 3944.205 0
#
# Rsquared
# Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# lm 0.1218093 0.7707961 0.9193350 0.7789685 0.9502848 0.9890099 0
# rf 0.7018239 0.8308051 0.9288055 0.9011142 0.9928239 0.9975329 0
# svm 0.1276351 0.7444579 0.8718971 0.7822891 0.9366675 0.9932980 0
# gbm 0.3446906 0.7801303 0.8424995 0.8108935 0.9380622 0.9847783 0
##--- Conclusion ---##
# Make a note of which model is the top model, and why
########################
# Validate top model
########################
# make predictions
Fit1 <- rfFit1
#Fit1 <- gbmFit1
#Fit1 <- svmFit1
#Fit1 <- lmFit1
rfPred1 <- predict(Fit1, testSet)
# performace measurment
postResample(rfPred1, testSet$Volume)
# RMSE Rsquared
# (make note of performance metrics)
# plot predicted verses actual
plot(rfPred1,testSet$Volume)
# print predictions
rfPred1
########################
# Predict with top model
########################
# make predictions
finalPred <- predict(rfFit1, newproductattributesDV)
finalPred
########################
# Save validated model
########################
output <- newproductattributes
output$predictions <- finalPred
write.csv(output, file="C2.T3output.csv", row.names = TRUE)
|
27a79deb553bd9c5aa63cd076a6f4993d93b5c74
|
e2781779f4ab24d79098e39f742092c75bc752b5
|
/Machine Learning - KMeans - Categorical variant/KMeans.R.r
|
4847aeed9d52e109e1cffd3d462049d6a74183e0
|
[] |
no_license
|
praveenM417/Data-Engineering-Data-Science
|
2589601f51f384c667edc5f3106dc984d3f1921f
|
9c9cf5a77714474c0a5be2a7d09061951a8d34d3
|
refs/heads/master
| 2022-12-20T20:33:06.561492
| 2020-09-22T02:12:31
| 2020-09-22T02:12:31
| 275,698,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,074
|
r
|
KMeans.R.r
|
#### A TWO-STEP APPROACH FOR CLUSTERING A MIX OF NUMERICAL AND CATEGORICAL DATA #####
#### STEPS 1 - 11 as narrated in the document
library(sqldf)
d2=read.table("cmc.csv",sep=",")
# Step 1 - Numerical attributes scaling
numericVec <- c('V1','V4')
categoryVec <- c('V2','V3','V5','V6','V7','V8','V9','V10')
for(i in 1:length(categoryVec))
{d3[[categoryVec[i]]] <- as.factor(d3[[categoryVec[i]]])}
minVar<-10000
length(numericVec)
d2$V1<-as.numeric(d2$V1)
d2$V4<-as.numeric(d2$V4)
minV1 <-min(d2$V1)
maxV1 <-max(d2$V1)
minV4 <-min(d2$V4)
maxV4 <-max(d2$V4)
normalize <- function(x)
{
return((x- minV4) /(maxV4-minV4))
}
normalize_V1 <- function(x)
{
return((x- minV1) /(maxV1-minV1))
}
d2$V1<-sapply(d2$V1, normalize_V1)
d2$V4<-sapply(d2$V4, normalize)
#Step 2 - Find the base Categorical attribute (V2)
str(d2)
levels(d2$V2)
# Step 3 - Find the numerical variable with lowest variance for the base attribute (V4)
for(i in 1:length(numericVec))
{
#d2[,numericVec[i]] <- d2[,numericVec[i]].filter()
#d2_filter <- filter(d2,camera=='North'&week=='Week_1'&day==1&type=='In')
#d2[,numericVec[i]]<-scale(d2[,numericVec[i]],na.rm=TRUE)
print(numericVec[i])
tempSQL <- paste('select V2,stdev(',numericVec[i],') as variance from d2 group by V2',sep='')
print(tempSQL)
minVar2 <- as.numeric(sqldf(paste('select sum(variance) from (',tempSQL,')',sep='')))
print(minVar2)
if(minVar2<minVar)
{minVar<-minVar2
minVec<-numericVec[i]}
}
print(minVec) #V4
# Counting classes on the base attribute
baseFreq<-sqldf("select V2,count(*) as cnt from d2 group by V2")
#View(baseFreq)
baseFreq1<-sqldf("select V3,V2,count(*) as cnt from d2 group by V3,V2")
# Step 4 - Base attribute transformation into numercal one
V4_change<-sqldf("select V2,avg(V4) as V4_cng from d2 group by V2")
#View(V4_change)
# Base attributes Transfromation needed
baseNumberVec <- vector()
baseVector <- c('V2','V3','V5','V6','V7','V8','V9','V10')
BaseCat<-'V2'
basecatCount <- sqldf("select V2,count(*) as cnt from d2 group by V2")
comboList <- vector()
comboDF <- list()
indvDF <- list()
sql<-''
# Step 5 - Co-occurrence derivation and conversion of non base categorical into numerical
for(i in 1:length(baseVector))
{
sql0 <- paste("select", baseVector[i],",count(*) as cnt from d2 group by",baseVector[i],sep = " ")
indvDF[[baseVector[i]]] <- sqldf(sql0)
comboList[i] <- paste(baseVector[i],BaseCat,sep="-")
sql<-paste("select", baseVector[i],",V2, count(*) as cnt from d2 group by",baseVector[i],",V2",sep = " ")
##print(sql)
comboDF[[paste(baseVector[i],BaseCat,sep="-")]]<-sqldf(sql)
}
newDF <- data.frame()
for(i in 1:length(baseVector))
{#print(baseVector[i])
# ##print(-1)
mylist <- list()
newVec<-vector()
mainTable <- baseVector[i]
for(j in 1: nrow(d2))
{
temp<-as.data.frame(comboDF[[i]])
filter1<-toString(d2[[baseVector[i]]][j])
#print(filter1)
if(filter1 %in% names(mylist))
perEle <- mylist[[filter1]]
else
{
#print('else')
sql_temp_bs <- paste('select * from temp where ',mainTable,' = \'',filter1,'\'',sep = "")
#print(sql_temp_bs)
df1 <- sqldf(sql_temp_bs)
#print(2)
perEle<-0
#print(1)
for(k in 1:nrow(df1))
{
v1<-vector()
v2<-vector()
filter2<-df1$V2[k]
sql_temp <- paste('select cnt from temp where ',mainTable,' = \'',filter1,'\' and V2 = \'',filter2,'\'',sep = "")
#print(sql_temp)
val1<-sqldf(sql_temp)
#print(val1)
temp2<-as.data.frame(indvDF[[i]])
sql_temp2 <- paste('select cnt from temp2 where ',mainTable,' = \'',filter1,'\'',sep="")
#print(sql_temp2)
val2<-sqldf(sql_temp2)
#print('val2')
#print(val2)
sql_temp3 <- paste('select cnt from baseFreq where V2 = \'',filter2,'\'',sep="")
val3<-sqldf(sql_temp3)
#print('val3')
#print(val3)
co_occ<-abs(val1)/(abs(val2)+abs(val3)-abs(val1))
#print('co_occ')
#print(co_occ)
sql_temp4 <- paste('select V4_cng from V4_change where V2 = \'',filter2,'\'',sep="")
bs_num<-sqldf(sql_temp4)
#print('bs_num')
#print(bs_num)
perEleMul<-co_occ*bs_num
#print(perEleMul)
perEle<-perEle+perEleMul
#print(perEle)
}
#print('filter')
#print(filter1)
str(filter1)
mylist[[toString(filter1)]] <- perEle
#print(mylist)
}
newVec[length(newVec)+1]<-perEle
}
d2[,paste(baseVector[i],'new',sep='_')]<-as.vector(unlist(newVec))
#d2<-cbind(d2, newVec)
}
newDF<-sqldf('select b.V4_cng as v2_num from d2 a join V4_change b on a.V2=b.V2')
d2$V2_new <- newDF$v2_num
keeps <- c("V1", "V2_new","V3_new","V4","V5_new","V6_new","V7_new","V8_new","V9_new","V10_new")
alterDF<-(d2[keeps])
str(alterDF)
alterDF$V2<-as.numeric(as.character(alterDF$V2))
# Step 6 - Verifying if our dataframe consists of only numerical attributes
final <- alterDF[complete.cases(alterDF), ]
final$V2<-as.numeric(as.character(final$V2))
# Step 7 - hierarchical clustering and plots
#newDF<-sqldf('select b.V8_cng as v6_num from d2 a join V8_change b on a.V6=b.V6')
dist_mat <- dist(final, method = 'euclidean')
hclust_avg <- hclust(dist_mat, method = 'average')
cut_avg <- cutree(hclust_avg, k = 491)
#install.packages('dendextend', dependencies = TRUE)
rect.hclust(hclust_avg , k = 491, border = 2:6)
abline(h = 3, col = 'red')
library(dendextend)
library(dplyr)
avg_dend_obj <- as.dendrogram(hclust_avg)
avg_col_dend <- color_branches(avg_dend_obj, h = 3)
plot(avg_col_dend)
final_new<-final
final_cl <- mutate(final_new, cluster = cut_avg)
final_cl$cluster <- as.factor(final_cl$cluster)
levels(final_cl$cluster)
# Step 8 - Centroids Calculation -- can be tweaked as mode for categorical variables
centroids <-sqldf('select cluster,avg(V1) as v1,avg(V2_new) as v2,avg(V3_new) as v3,avg(V4) as v4,avg(V5_new) as v5,avg(V6_new) as v6,avg(V7_new) as v7,avg(V8_new) as v8,avg(V9_new) as v9,avg(V10_new) as v10 from final_cl group by cluster')
#test<-sqldf('select cluster,V1_new,V4_new,1 as value from final_cl')
library(reshape)
# Step 9 - Additional Attributes added to centroids
new_data <- sqldf('select *,1 as value from final_cl')
centroids_sort<-sqldf('select * from centroids order by cluster')
v2 <- cast(new_data, cluster~V2_new, sum)
v2_sort <-('select * from v2 order by cluster')
v2_centroids <-sqldf('select a.*,b.* from centroids_sort a join v2 b on a.cluster=b.cluster')
View(v2_centroids)
v2_centroids <- v2_centroids[,-12]
v3 <- cast(new_data, cluster~V3_new, sum)
v2_v3_centroids <-sqldf('select a.*,b.* from v2_centroids a join v3 b on a.cluster=b.cluster')
colnames(v2_v3_centroids)
v2_v3_centroids <- v2_v3_centroids[,-16]
v5 <- cast(new_data, cluster~V5_new, sum)
v2_v3_v5centroids <-sqldf('select a.*,b.* from v2_v3_centroids a join v5 b on a.cluster=b.cluster')
colnames(v2_v3_v5centroids)
v2_v3_v5centroids <- v2_v3_v5centroids[,-20]
v6 <- cast(new_data, cluster~V6_new, sum)
v2_v3_v5_v6centroids <-sqldf('select a.*,b.* from v2_v3_v5centroids a join v6 b on a.cluster=b.cluster')
colnames(v2_v3_v5_v6centroids)
v2_v3_v5_v6centroids <- v2_v3_v5_v6centroids[,-22]
v7 <- cast(new_data, cluster~V7_new, sum)
v2_v2_v5_v6_v7centroids <-sqldf('select a.*,b.* from v2_v3_v5_v6centroids a join v7 b on a.cluster=b.cluster')
colnames(v2_v2_v5_v6_v7centroids)
v2_v2_v5_v6_v7centroids <- v2_v2_v5_v6_v7centroids[,-24]
View(v7)
v8 <- cast(new_data, cluster~V8_new, sum)
v2_v2_v5_v6_v7_v8centroids <-sqldf('select a.*,b.* from v2_v2_v5_v6_v7centroids a join v8 b on a.cluster=b.cluster')
colnames(v2_v2_v5_v6_v7_v8centroids)
v2_v2_v5_v6_v7_v8centroids <- v2_v2_v5_v6_v7_v8centroids[,-28]
v9 <- cast(new_data, cluster~V9_new, sum)
View(v9)
v2_v2_v5_v6_v7_v8_v9centroids <-sqldf('select a.*,b.* from v2_v2_v5_v6_v7_v8centroids a join v9 b on a.cluster=b.cluster')
colnames(v2_v2_v5_v6_v7_v8_v9centroids)
v2_v2_v5_v6_v7_v8_v9centroids <- v2_v2_v5_v6_v7_v8_v9centroids[,-32]
v10 <- cast(new_data, cluster~V10_new, sum)
v2_v2_v5_v6_v7_v8_v9_v10centroids <-sqldf('select a.*,b.* from v2_v2_v5_v6_v7_v8_v9centroids a join v10 b on a.cluster=b.cluster')
View(v1_v4_v5_v6_v7_v9_v10centroids)
colnames(v2_v2_v5_v6_v7_v8_v9_v10centroids)
v2_v2_v5_v6_v7_v8_v9_v10centroids <- v2_v2_v5_v6_v7_v8_v9_v10centroids[,-34]
cluster_hier_clustering <- v2_v2_v5_v6_v7_v8_v9_v10centroids
v2_v2_v5_v6_v7_v8_v9_v10centroids <- v2_v2_v5_v6_v7_v8_v9_v10centroids[,-1]
# Step 10 - K-means clustering
nclusters <- 8 ##### give alternatives as 2,3 and 4
km_res <- kmeans(v2_v2_v5_v6_v7_v8_v9_v10centroids, nclusters,nstart=1)
cluster_kmeans_centroid <- cbind(v2_v2_v5_v6_v7_v8_v9_v10centroids, cluster_kmeans = km_res$cluster)
cluster_kmeans_centroid <- cbind(cluster_kmeans_centroid, cluster_hier = cluster_hier_clustering$cluster)
Both_final_clusters<-sqldf('select a.*,b.cluster_kmeans as kmeans from final_cl a left join cluster_kmeans_centroid b on a.cluster = b.cluster_hier')
colnames(Both_final_clusters)
# Step 11 - Entropy calculation
s1<-sqldf('select * from Both_final_clusters where kmeans=2')
# change the base vector - based on this class the cluster disorder is calculated
#baseVector <- c('V2_new','V3_new','V5_new','V6_new','V7_new','V9_new','V10_new')
baseVector <- c('V3_new') # try giving 'V3_new'
entrophy<-0
# library(sqldf)
for(i in 1:nclusters)
{ sql_temp_bs <- paste('select * from Both_final_clusters where kmeans = ',i,sep = "")
df1 <- sqldf(sql_temp_bs)
perEle<-0
inter<-0
for(k in 1:length(baseVector))
{
v1<-vector()
v2<-vector()
class_levels <- levels(as.factor(df1[[baseVector[k]]]))
classes <- length(class_levels)
c<-0
for(z in 1:classes)
{
sql_temp <- paste('select ',baseVector[k],',count(*) as cnt from df1 where round(',baseVector[k],',4) = round(',class_levels[z],',4) group by ',baseVector[k],sep = "")
print(sql_temp)
sql_temp_df <- sqldf(sql_temp)
val1<-as.integer(sqldf('select cnt from sql_temp_df'))
if(val1!=0)
{
c<-c+(((val1/nrow(df1)) * log(val1/nrow(df1))))
if(z==classes)
{ #c <- -(c)
inter<- inter+ c}
}
}
}
entrophy = entrophy+( (nrow(df1)/1473)*inter )
}
print(entrophy)
# Additional Work - Computation of k-prototype clustering
library(clustMixType)
kpres <- kproto(d3, 4)
#clprofiles(kpres, d3)
#View(sqldf("select * from d3 where kmeans = 3"))
Both_final_clusters_1 <- cbind(d3, cluster_kmeans = km_res$cluster)
#View(Both_final_clusters_1)
baseVector <- c('V3')
entrophy_1<-0
# library(sqldf)
for(i in 1:4)
{
#print(baseVector[i])
# ##print(-1)
sql_temp_bs <- paste('select * from Both_final_clusters_1 where cluster_kmeans = ',i,sep = "")
df1 <- sqldf(sql_temp_bs)
#print(2)
perEle<-0
#print(1)
inter<-0
for(k in 1:length(baseVector))
{
v1<-vector()
v2<-vector()
#filter2<-df1$V6[k]
class_levels <- levels(as.factor(df1[[baseVector[k]]]))
classes <- length(class_levels)
c<-0
for(z in 1:classes)
{
sql_temp <- paste('select ',baseVector[k],',count(*) as cnt from df1 where ',baseVector[k],' = ',class_levels[z],' group by ',baseVector[k],sep = "")
sql_temp_df <- sqldf(sql_temp)
val1<-sqldf('select cnt from sql_temp_df')
#print(val1)
if(val1!=0)
{
c<-c+(((val1/nrow(df1)) * log(val1/nrow(df1))))
if(z==classes)
{ c <- -(c)
inter<- inter+ c}
}
}
}
entrophy_1 = entrophy_1+( (nrow(df1)/1473)*inter )
}
print(entrophy_1)
|
2bc1cea05b490de5ebcb4393eaa79d75994f12f5
|
c3fced9fa3881b8d07000adfb5bebe4213eaa4a4
|
/ANALYSIS/PlotVariablesOverTime.R
|
b43452355c65c0069fafbd91e2e57d5045a81172
|
[] |
no_license
|
rafael-schuetz/Pareto
|
ea9c06cb588113bbdf6a3b5da27a2d2a22f37dc8
|
74c414268d429373b83ccfb27bf222ae25b97c32
|
refs/heads/master
| 2022-04-13T11:36:56.587595
| 2020-04-08T18:31:48
| 2020-04-08T18:31:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,597
|
r
|
PlotVariablesOverTime.R
|
#inspectinfluenceofvariablesovertime
#inpercentages
#setthevariables
responset = names(mergedData)
# [5:213]
# expl= names(mergedData)[4]
responset = purrr::set_names(responset)
responset
explt = purrr::set_names("year")
explt
#createfunctionHEREGGPLOT
plotOutcomeOverTime = function(x, y){
ggplot(mergedData, aes_string(fill = y, x = x, y = y) ) +
# geom_bar(position="stack", stat="identity") +
geom_col(position = position_stack(reverse = TRUE)) +
theme_bw() + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank())
}
# plotOutcomeOverTimeTest = function(x, y){
# ggplot(mergedData, aes_string(fill = y, x = x, y = y) ) +
# geom_bar(position = position_stack(reverse = TRUE), stat="identity") +
# # geom_col(position = position_stack(reverse = TRUE)) +
# theme_bw()
# }
#checkifthefunctionworks
test <- plotOutcomeOverTime("year", "lessIllOrdinal")
# plotOutcomeOverTimeTest("year", "lessIllOrdinal")
#mapthefunction
# YearlyOutcome <- map(mergedData, ~plotOutcomeOverTime("year", "lessIll"))
all_yearPlots = map(responset,
~map(explt, plotOutcomeOverTime, y = .x) )
#printtheplots #allvaribalescanbeplotted #examples
# all_yearPlots$age[1:2]
# all_yearPlots$eatersPerMealNo[1:2]
# all_yearPlots$newKidsNo[1:2]
# all_yearPlots$cateringNo[1:2]
# all_yearPlots$mealsInInstitutionNo[1:2]
# all_yearPlots$weeklyCooks
# all_yearPlots$monthlyCooks
# all_yearPlots$tasksLunch
# all_yearPlots$parentalDialog
# all_yearPlots$qualitySatisfies
# all_yearPlots$trainingCompletedNo
# all_yearPlots$trainingStartedNo
# all_yearPlots$selfworth
# all_yearPlots$influenceHome
# all_yearPlots$moreIndependent
# all_yearPlots$dayToDaySkills
# all_yearPlots$dayToDaySkillsOrdinal
#all_yearPlots$lessIll_ordered
#plotforpresentation..explainsubsidyvalue
plotOutcomeOverTime("tripsSubsidy", "tripsNo")
TripsSub_plot<- plotOutcomeOverTime("realTripsSubsidy", "tripsNo")
MealsSub_plot<- plotOutcomeOverTime("realSubsidy", "mealsNo")
####plots we need for the paper
lessIll_Time<- plotOutcomeOverTime("year", "lessIll_ordered")
appreciateHealthy_Time<- plotOutcomeOverTime("year", "appreciateHealthy_ordered")
dietaryKnowledge_Time<- plotOutcomeOverTime("year", "dietaryKnowledge_ordered")
Health_Year <- plot_grid(lessIll_Time, dietaryKnowledge_Time, appreciateHealthy_Time,
ncol = 1, nrow = 3, align = "v",
labels = "AUTO",
label_x = 0, label_y = 0, hjust = -3, vjust =
-1.5, label_fontface = "plain", label_size = 11)
saveRDS(Health_Year, "./ANALYSIS/GRAPHS/PAPER/Health_Year.Rds")
saveRDS(lessIll_Time, "./ANALYSIS/GRAPHS/PAPER/lessIll_Time.Rds")
saveRDS(dietaryKnowledge_Time, "./ANALYSIS/GRAPHS/PAPER/dietary_Time.Rds")
saveRDS(appreciateHealthy_Time, "./ANALYSIS/GRAPHS/PAPER/appreciate_Time.Rds")
selfworth_Time <- plotOutcomeOverTime("year", "selfworth_ordered")
dayToDaySkills_Time <- plotOutcomeOverTime("year", "dayToDaySkills_ordered")
saveRDS(selfworth_Time, "./ANALYSIS/GRAPHS/PAPER/selfworth_Time.Rds")
saveRDS(dayToDaySkills_Time, "./ANALYSIS/GRAPHS/PAPER/dayToDay_Time.Rds")
Equality_Year <- plot_grid(selfworth_Time, dayToDaySkills_Time,
ncol = 1, nrow = 2, align = "v",
labels = "AUTO",
label_x = 0, label_y = 0, hjust = -3, vjust =
-1.5, label_fontface = "plain", label_size = 11)
saveRDS(Equality_Year, "./ANALYSIS/GRAPHS/PAPER/Equality_Year.Rds")
|
c44e34cc11779ef134573dfb3a052c76f1b09a91
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mvinfluence/examples/Fertilizer.Rd.R
|
39785c5fa4d141007d8a9d53b8763f65c6f2074d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 479
|
r
|
Fertilizer.Rd.R
|
library(mvinfluence)
### Name: Fertilizer
### Title: Fertilizer Data
### Aliases: Fertilizer
### Keywords: datasets
### ** Examples
data(Fertilizer)
# simple plots
plot(Fertilizer, col=c('red', rep("blue",7)), cex=c(2,rep(1.2,7)), pch=as.character(1:8))
biplot(prcomp(Fertilizer))
#fit mlm
mod <- lm(cbind(grain, straw) ~ fertilizer, data=Fertilizer)
Anova(mod)
# influence plots (m=1)
influencePlot(mod)
influencePlot(mod, type='LR')
influencePlot(mod, type='stres')
|
ac3b3f623a94fd9af2f4a0148758360a97887bb8
|
5b91d691fd5d951bfced93bdb8715621f7abe2c8
|
/man/getTaskDictionary.Rd
|
e451339533a8fcc8be219132f9be11e2ceb62b01
|
[
"BSD-3-Clause"
] |
permissive
|
jakob-r/mlrHyperopt
|
9a28ace02b85a712194d0eb0404b386044355147
|
33720662daf90bc6b4c68c7dc927a57daa861d4e
|
refs/heads/master
| 2022-01-26T07:03:23.592194
| 2022-01-05T22:12:12
| 2022-01-05T22:12:12
| 78,555,466
| 31
| 6
| null | 2017-09-19T11:48:53
| 2017-01-10T17:10:29
|
HTML
|
UTF-8
|
R
| false
| true
| 921
|
rd
|
getTaskDictionary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getTaskDictionary.R
\name{getTaskDictionary}
\alias{getTaskDictionary}
\title{Create a dictionary based on the task.}
\usage{
getTaskDictionary(task)
}
\arguments{
\item{task}{[\code{Task}]
The Task}
}
\value{
[\code{\link[base]{list}}]. Used for evaluating the expressions
within a parameter, parameter set or list of parameters.
}
\description{
Returns a dictionary, which contains the \link{Task} itself
(\code{task}), the number of features (\code{p}) the model is trained on, the number of
observations (\code{n.task}) of the task in general, the number of observations (\code{n})
in the current subset, the task type (\code{type}) and in case of
classification tasks, the number of class levels (\code{k}) in the general task.
}
\examples{
task = makeClassifTask(data = iris, target = "Species")
getTaskDictionary(task)
}
\concept{task}
|
7b776fde0bff62eca91e74ff36b92c071b9548e1
|
e7d3e2c2172084243aec2bee959ea2b89a4977ee
|
/codes/var_preprocess.R
|
a121d763db6228e57aea3186181eb2d26a71f42c
|
[] |
no_license
|
DylanJoo/var.retail
|
8190ec8d6ae630eef5a34179211352e0583a4478
|
939ba4d2051bfe81678a11f15f03410fe2ac7edb
|
refs/heads/main
| 2023-07-18T17:00:50.915899
| 2021-09-27T04:24:12
| 2021-09-27T04:24:12
| 360,149,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,654
|
r
|
var_preprocess.R
|
setwd("/Users/jhjoo/Desktop/var/final.codes/")
source("codes/var_utils.R")
load(file="data/IRI.652759.prep.cate.RData")
load(file="data/IRI.652759.prep.sku.RData")
#### category level ####
data = Reduce(function(x, y) merge(x=x, y=y, by="WEEK", all=T), IRI.652759.prep.cate)
data = tsFixed(data)
dollars = sapply(data[, grep('SALES', colnames(data))],
function(x) tsTransform(x, T, 'raw', 0))
salesG = sapply(data[, grep('SALES', colnames(data))],
function(x) tsTransform(x, T, 'log.diff', 0))
promotion = sapply(data[, grep('PR.RATE', colnames(data))],
function(x) tsTransform(x, F, 'raw', 0))
pz.adjust = sapply(data[, grep('PZ.ADJ', colnames(data))],
function(x) tsTransform(x, F, 'raw', 0))
advertising = sapply(data[, grep('AD.RATE', colnames(data))],
function(x) tsTransform(x, F, 'raw', 0))
display = sapply(data[, grep('DI.RATE', colnames(data))],
function(x) tsTransform(x, F, 'raw', 0))
exog.cate.old=cbind(promotion, advertising, display)
exog.cate=cbind(pz.adjust, advertising, display)
dollars.cate=tsFixed(dollars)
salesG.cate=tsFixed(salesG)
colnames(salesG.cate)=gsub("SALES", "SALESG", colnames(salesG.cate))
exog.cate=tsFixed(exog.cate)
exog.cate.old=tsFixed(exog.cate.old)
##############################################
# SALESG:(2:53) Dollar sales growth of category
# PZ.REDUCT:(2:53) average price reduction ratio of (each SKU/category)
# AD: (2:53)advertising rate of category
# DI: (2:53)display rate of category
############################################
save(dollars.cate, file="data/dollars.cate.RData")
save(salesG.cate, file="data/salesG.cate.RData")
save(exog.cate, file="data/exog.cate.RData")
save(exog.cate.old, file="data/exog.cate.old.RData")
#### SKU level ####
data.sku = Reduce(function(x, y) merge(x=x, y=y, by="WEEK", all=T), IRI.652759.prep.sku)
data.sku = tsFixed(data.sku)
pr.sku = sapply(data.sku[, grep('PR\\.', colnames(data.sku))],
function(x) tsTransform(x, F, 'raw', 0))
pz.adj.sku = sapply(data.sku[, grep('PZ.ADJ', colnames(data.sku))],
function(x) tsTransform(x, F, 'raw', 0))
advertising.sku = sapply(data.sku[, grep('AD\\.', colnames(data.sku))],
function(x) tsTransform(x, F, 'raw', 0))
display.sku = sapply(data.sku[, grep('DI\\.', colnames(data.sku))],
function(x) tsTransform(x, F, 'raw', 0))
exog.sku=cbind(pz.adj.sku, advertising.sku, display.sku)
exog.sku=tsFixed(exog.sku)
##############################################
# PZ.REDUCT.sku:(2:53) price reduction respect to fixed price
# AD,sku: (2:53) advertising rate of category
# DI.sku: (2:53) display rate of category
############################################
save(exog.sku, file="data/exog.sku.RData")
###########################################
#ADF unit root test
###########################################
load(file='data/dollars.cate.RData')
load(file='data/salesG.cate.RData')
library(tseries)
adf.before=rep(NA, ncol(dollars.cate))
for (c in 1:ncol(dollars.cate)){
adf.before[c] = adf.test(dollars.cate[, c],
alternative = 'stationary')$p.value
}
plot(adf.before, type = 'h', xlab='Category', ylab='P-Value', lwd=5)
abline(h=0.05, col=2)
adf.after=rep(NA, ncol(salesG.cate))
for (c in 1:ncol(salesG.cate)){
adf.after[c] = adf.test(salesG.cate[, c],
alternative = 'stationary')$p.value
}
plot(adf.after, type = 'h', xlab='Category', ylab='P-Value', lwd=5)
abline(h=0.05, col=2)
|
6bca51c10796abc2ee0f4e5de15d3f24cb43f1f5
|
a4d5770c9a1fb3f8a36c302bc171156bd5c7118c
|
/Code/simper_models_secondary_traits_pa.R
|
8ae23b305c92a22fe38a4743ec0a7268f1f12e41
|
[
"CC-BY-4.0"
] |
permissive
|
CHANGElab-AlbacoreTuna/traits-review
|
c40a5b2d5ec5529e5292ef8d3f779d67ea1b1f63
|
73ad8ca0d26c3d66d551476445f0c653c2e1c5b3
|
refs/heads/master
| 2023-04-25T04:12:53.453286
| 2021-05-18T09:42:14
| 2021-05-18T09:42:14
| 292,337,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,881
|
r
|
simper_models_secondary_traits_pa.R
|
##########
##########
# This code contains the multivariate modeling component of the analysis presented in
# in Green et al. (2020)
# A review on the use of traits in ecological research
##########
##########
# AUTHOR: Cole B. Brookson
# DATE OF CREATION: 2020-06-30
##########
##########
library(knitr)
library(tidyverse)
library(vegan)
library(viridis)
library(PNWColors)
library(mvabund)
library(reshape2)
library(here)
################################## Simper for the different groups
#load data
secondary_traits = read_csv(here('./Data/Cole-Output-Data(readyforanalysis)/secondary_traits_dummy_pa_models.csv'))
secondary_categorical = secondary_traits[,1:10]
secondary_dummy = secondary_traits[,11:ncol(secondary_traits)]
######################## Ecosystem
simper_secondary_pa_ecos = simper(secondary_dummy, secondary_categorical$Ecosystem, permutations = 1000)
summary_ecos = summary(simper_secondary_pa_ecos)
write_csv(simper_secondary_pa_ecos$Freshwater_Terrestrial,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_ecos_Freshwater_Terrestrial.csv"))
write_csv(simper_secondary_pa_ecos$Freshwater_Marine,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_ecos_Freshwater_Marine.csv"))
write_csv(simper_secondary_pa_ecos$Freshwater_Broad,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_ecos_Freshwater_Broad.csv"))
write_csv(simper_secondary_pa_ecos$Terrestrial_Marine,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_ecos_Terrestrial_Marine.csv"))
write_csv(simper_secondary_pa_ecos$Terrestrial_Broad,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_ecos_Terrestrial_Broad.csv"))
write_csv(simper_secondary_pa_ecos$Marine_Broad,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_ecos_Marine_Broad.csv"))
simper_secondary_pa_filter = simper(secondary_dummy, secondary_categorical$Filter, permutations = 1000)
######################## Filter
summary_filter = summary(simper_secondary_pa_filter)
write_csv(simper_secondary_pa_filter$Ecological_Fundamental,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_filter_Ecological_Fundamental.csv"))
write_csv(simper_secondary_pa_filter$Ecological_Physical,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_filter_Ecological_Physical.csv"))
write_csv(simper_secondary_pa_filter$Ecological_Trophic,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_filter_Ecological_Trophic.csv"))
write_csv(simper_secondary_pa_filter$Fundamental_Physical,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_filter_Fundamental_Physical.csv"))
write_csv(simper_secondary_pa_filter$Fundamental_Trophic,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_filter_Fundamental_Trophic.csv"))
write_csv(simper_secondary_pa_filter$Physical_Trophic,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_filter_Physical_Trophic.csv"))
######################## Global Change
simper_secondary_pa_gc = simper(secondary_dummy, secondary_categorical$GlobalChangeCat, permutations = 1000)
summary_gc = summary(simper_secondary_pa_gc)
write_csv(simper_secondary_pa_gc$yes_no,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_gc_yes_no.csv"))
######################## Type of Study
simper_secondary_pa_tos = simper(secondary_dummy, secondary_categorical$TOS, permutations = 1000)
summary_tos = summary(simper_secondary_pa_tos)
write_csv(simper_secondary_pa_tos$Observational_Experiment,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Observational_Experiment.csv"))
write_csv(simper_secondary_pa_tos$Observational_Review,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Observational_Review.csv"))
write_csv(simper_secondary_pa_tos$Observational_Metanalysis,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Observational_Metanalysis.csv"))
write_csv(simper_secondary_pa_tos$Observational_TModel,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Observational_TModel.csv"))
write_csv(simper_secondary_pa_tos$Experiment_Review,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Experiment_Review.csv"))
write_csv(simper_secondary_pa_tos$Experiment_Metanalysis,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Experiment_Metanalysis.csv"))
write_csv(simper_secondary_pa_tos$Experiment_TModel,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Experiment_TModel.csv"))
write_csv(simper_secondary_pa_tos$Review_Metanalysis,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Review_Metanalysis.csv"))
write_csv(simper_secondary_pa_tos$Review_TModel,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Review_TModel.csv"))
write_csv(simper_secondary_pa_tos$Metanalysis_TModel,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_tos_Metanalysis_TModel.csv"))
######################## Prediction
simper_secondary_pa_predict = simper(secondary_dummy, secondary_categorical$Predictive, permutations = 1000)
write_csv(simper_secondary_pa_predict$`0_1`,
here("./Data/Cole-Output-Simper/Secondary-Classification-PA/simper_secondary_pa_PREDICT.csv"))
|
9ec62f263c7c0903e247639c863ee39ccd377acf
|
f41146d721917805eecaacf1079d635a79f24a6a
|
/man/matrix_idx.Rd
|
e2a5cab6dbbf444183c4982540e6fc53f733e8bc
|
[
"MIT"
] |
permissive
|
krzjoa/matricks
|
13fc2e6547534f7578202da8332fb86ef1a5f650
|
fd9987b6a4ee41be2520f0abfa935144fef06477
|
refs/heads/master
| 2020-09-14T05:39:12.488603
| 2020-03-03T22:15:06
| 2020-03-03T22:15:06
| 223,035,650
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 617
|
rd
|
matrix_idx.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{matrix_idx}
\alias{matrix_idx}
\title{Get available marix indices}
\usage{
matrix_idx(mat, n.row = NULL, n.col = NULL, mask = NULL)
}
\arguments{
\item{mat}{matrix}
\item{n.row}{number of rows; default: NULL}
\item{n.col}{number of columns; default: NULL}
\item{mask}{logical matrix; default: NULL}
}
\description{
Get available marix indices
}
\examples{
T <- TRUE; F <- FALSE
mat <- matrix(0, 3, 3)
mask <- m(T, T, F | T, F, T | F, F, T)
# All poss
matrix_idx(mat)
matrix_idx(mat, mask = mask)
matrix_idx(mask = mask)
}
|
05da7c19414232add0aa80f34a50f2bf14b160c7
|
3a95ff9a53da18ab8d554c34b75a83d3ac6da37e
|
/man/stratasnew.sp.Rd
|
6d0767682c64426ed8c5d23dc3fb78ce539f76f6
|
[] |
no_license
|
sigurdurthorjonsson/gisland
|
0c3213bcbd6aa6add9032af890ca7c6729ffc96c
|
41bc0f1f65e2fa34ae70938b87de3f5595642162
|
refs/heads/master
| 2021-01-19T21:09:12.999641
| 2017-05-31T17:18:04
| 2017-05-31T17:18:04
| 88,616,380
| 0
| 0
| null | 2017-04-18T11:16:24
| 2017-04-18T11:16:24
| null |
UTF-8
|
R
| false
| true
| 477
|
rd
|
stratasnew.sp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_misc.R
\docType{data}
\name{stratasnew.sp}
\alias{stratasnew.sp}
\title{stratasnew.sp}
\format{\code{SpatialPolygonsDataFrame}}
\usage{
stratasnew.sp
}
\description{
new STRATAS as spatialPolygonDataFrame
}
\details{
Modified object from \code{/net/hafkaldi/export/u2/reikn/Splus5/HAUSTRALLNewStrata/Stratifiering/.RData}
}
\author{
Höskuldur Björnsson <hoski@hafro.is>
}
\keyword{datasets}
|
48c511a86b54f94421c847be95d71d7b93fea63a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/etasFLP/examples/magn.plot.Rd.R
|
a640541a6ac27dd86f67b3546ad2cc8c56620c7d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 297
|
r
|
magn.plot.Rd.R
|
library(etasFLP)
### Name: magn.plot
### Title: Transformed plot of the magnitudes distribution of an
### earthquakes catalog
### Aliases: magn.plot
### Keywords: magnitude Gutenberg-Richter
### ** Examples
## Not run:
##D data(italycatalog)
##D magn.plot(italycatalog)
## End(Not run)
|
db06d95e10d310d7916dfa8d6368f0fd028b634c
|
9a91cb5e7d7a26d2d2b227d17328dff8211bcaf7
|
/brd_r_tools/AELB(plot point).R
|
7d51c3567acfc8c60d027027fab3bb8085e0b3ba
|
[] |
no_license
|
aazamuddin4/Data-Mapping-Using-R
|
def1b84bbbdb5cc544242759ff137c28fca9d22b
|
152cd5cba41e5e5cfbfcc081c440fea49e1c2fbc
|
refs/heads/main
| 2023-05-31T00:14:29.136804
| 2021-06-16T09:45:46
| 2021-06-16T09:45:46
| 373,898,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 861
|
r
|
AELB(plot point).R
|
library(sp)
library(sf)
library(viridis)
library(RColorBrewer)
library(readxl)
library(gstat)
library(tidyverse)
library(mapview)
library(lwgeom)
library(classInt)
library(grid)
library(raster)
library(lubridate)
library(ggplot2)
library("ggmap")
library(leaflet)
data_1 <- read.csv("data/data_aelb1.csv")
data_1 <- dplyr::select(data_1, c(1,2,3))
colnames(data_1) <- c("lat", "lon", "dose")
data_1 <- mutate(data_1, source = "data_aelb1")
summary(data_1)
register_google(key="AIzaSyBTnAyGePvGO94QIVH4LZTRMVkrt2SFWxs")
map <- get_map(location = c(101.7543083, 2.8967), zoom = 18, maptype = "hybrid", legend = 'bottomleft')
ggmap(map, extent = "device") +
geom_point(aes(x = lon, y = lat, colour = dose), alpha = 1, size = 3, data = data_1) +
scale_colour_gradient(low = "green", high = "red", name = "doserate(μSv/hr)")
|
ddf9ed77c1ac69f9a826bb5ae033d483d9db4bde
|
8ed400a40b6a1cfe5ce4022e14428f94ea01cbec
|
/scratch/PracticalMachineLearning/covariate.R
|
d308dd13ca18feef5c5f0610b1a63f02351f2eab
|
[] |
no_license
|
mkm29/DataScience
|
16269be78373faf013313eccf0e92f55b506308c
|
f046d2af1fb0caa9fb3504ae3006a9c9adec0e39
|
refs/heads/master
| 2022-02-13T21:22:51.200242
| 2022-02-02T01:08:47
| 2022-02-02T01:08:47
| 120,350,319
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,149
|
r
|
covariate.R
|
library(caret); library(ISLR); data(Wage)
inTrain <- createDataPartition(y=Wage$wage, p=0.7, list=F)
training <- Wage[inTrain,]; testing <- Wage[-inTrain,]
# visualize the data in order to get insight in creating new covariates/features
table(training$jobclass)
# create some dummy variables
# take a variable (factor) and convert it to an integer for class
dummies <- dummyVars(wage ~ jobclass, data = training)
head(predict(dummies,newdata=training))
nsv <- nearZeroVar(training, saveMetrics = T)
nsv
# so remove the region covariate
table(training$race) # all white lol
# BASIS - instead of linear models we might want a curve
library(splines)
# bs() creates a polynomial variable; in this case a third degree polynomial
bsBasis <- bs(training$age, df=3)
head(bsBasis)
lm1 <- lm(wage ~ bsBasis,data = training)
par(mfrow=c(1,1))
plot(training$age, training$wage, pch = 19, cex=0.5)
points(training$age, predict(lm1, newdata=training), col = "red", pch = 19, cex = 0.5)
# now lets predict on the test set
# recall that these values will be the polynomial age covariates created in the training set (bs())
predict(bsBasis, age=testing$age)
|
1a166e49a937994bec7b29569e063aebf65e8c92
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/gdalcubes/inst/testfiles/libgdalcubes_set_threads/libFuzzer_libgdalcubes_set_threads/libgdalcubes_set_threads_valgrind_files/1609875248-test.R
|
2f05885cf53543749d4a01e84f46ef9aadcade74
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 412
|
r
|
1609875248-test.R
|
testlist <- list(n = c(2864897L, 0L, 16777467L, -8462337L, -83951350L, 772410039L, 179789057L, -53760L, 1572274176L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 65535L, -1L, -1L, -1L, -16711681L, -49345L, -1058214676L, -320017172L, -320017172L, -320017172L, -320017172L, -320065300L, -320017172L, -320017172L, -320017408L, 33292158L, -536872193L))
result <- do.call(gdalcubes:::libgdalcubes_set_threads,testlist)
str(result)
|
b90f95d062c6d52c3f4e64084fb510abf4713ded
|
ed22113450e39b35d827f5dda10fe2fe6a1b62ce
|
/explo_ana_assignment2_plotq3_3.R
|
3c3f0e8dde1a667bb88d429e520d04e029d88003
|
[] |
no_license
|
luciferrkr007/explo-project-2
|
15861f858dc3139ad7b5aa68693811d87e9b7fa1
|
0826b086fe63a4b1562fa0b82d6232e213556e7e
|
refs/heads/master
| 2022-11-06T22:39:05.584714
| 2020-06-21T08:45:15
| 2020-06-21T08:45:15
| 273,866,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 457
|
r
|
explo_ana_assignment2_plotq3_3.R
|
# reading the datadframe
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#code
NEIF <- subset(NEI, fips == "24510")
data <- aggregate(Emissions~year + type,NEIF, sum)
#plotting
png("plotq3.png")
g <- ggplot(data, aes(year, Emissions, color = type))
g + geom_line() + xlab("Year") + ylab(expression("Total PM"[2.5]*" Emissions")) +ggtitle("Total Emissions per type in Baltimore")
dev.off()
|
0785828d8d0ebcdbde0d330b1281ed022177f372
|
ba014fadb8ff77ed44803095960c82a1308ee9cb
|
/R/clean_cloud.R
|
d4fa01bfdcbadab2f884e28a5174fdd88f9e08b2
|
[
"MIT"
] |
permissive
|
AdamWilsonLab/meshSDM
|
2ac6f2c14a74b5daf11d7f1f2286eb21517f7478
|
de58b57d0d89d65e07050afe138aee843888818b
|
refs/heads/master
| 2023-04-15T12:56:58.522947
| 2023-03-29T15:05:31
| 2023-03-29T15:05:31
| 134,459,202
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,745
|
r
|
clean_cloud.R
|
#' @import plotly
#' @import dplyr
#' @import scales
clean_cloud <- function(file,prefix=NULL){
message(paste("################# Importing data"))
d=read_csv(file,progress = F,col_types = cols())%>%slice(-1)
env=
select(d,
X="Coord. X",
Y="Coord. Y",
Z="Coord. Z",
Rf=one_of(c("Rf","R")),
Gf=one_of(c("Gf","G")),
Bf=one_of(c("Bf","B")),
Nx=Nx,
Ny=Ny,
Nz=Nz,
gc=contains("Gaussian curvature"),
Xs=contains("Coord. X.smooth"),
Ys=contains("Coord. Y.smooth"),
Zs=contains("Coord. Z.smooth"),
rough=contains("Roughness"),
aspect="Dip direction (degrees)",
density=contains("Volume density"),
slope="Dip (degrees)")%>%
# rowwise() %>%
mutate(dist=fdist(X,Xs,Y,Ys,Z,Zs))#, #distance to smooth
# angle = angle3D(X,Y,Z,Xs,Ys,Zs,Nx,Ny,Nz))
message(paste("################# Calculating hole metrics for quad:"))
# Angle to smooth
env$angle=apply(env[,c("X","Y","Z",
"Xs",
"Ys",
"Zs",
"Nx","Ny","Nz")],1,angle3D)
# Correct Sign of hole
# # Clean up with mutate
env=mutate(env,
sign=ifelse(angle<90,-1,1),
sign2=-cos(angle*(pi/180)),
hole=dist*sign,
gcs=gc*sign,
gcs_log=log1p(gc)*sign,
hole2=dist*sign2,
gcs2=gc*sign2,
gcs_log2=log1p(gc)*sign2
)
## add scale names to colnames
if(!is.null(prefix)){
env2=cbind.data.frame(
select(env, X,Y,Z,Rf,Gf,Bf,Nx,Ny,Nz),
select(env,-X,-Y,-Z,-Rf,-Gf,-Bf,-Nx,-Ny,-Nz,)%>%
select_all(.funs = funs(paste0(.,"_",prefix)))
)
}
return(env)
} #close function
|
6fca25dfcb4db30e957bbdcf3ab313c56ce6f0c0
|
4248c944ada8ee64779f9318fc180cb6f732decd
|
/cachematrix.R
|
11de1fdd7f7a8f3695730c3faa7fa82459bcb239
|
[] |
no_license
|
RonWilkinson/ProgrammingAssignment2
|
4a55feedee1ab534d6acbc07dd0e4991824aa235
|
0b2c47f7af05b3cdd00629bacd7eacbe84e9bbbd
|
refs/heads/master
| 2020-04-05T07:45:58.969908
| 2017-01-22T02:21:26
| 2017-01-22T02:21:26
| 32,333,807
| 0
| 0
| null | 2015-03-16T15:18:38
| 2015-03-16T15:18:37
| null |
UTF-8
|
R
| false
| false
| 4,287
|
r
|
cachematrix.R
|
#########################################################################################################################
##
## cachematrix.R code written by Ronald Wilkinson for Coursera's R Programming course, Programming Assignment 2
##
## Originally written when I took the course standalone in 2015. Redoing course as part of certificate program
## in 2017. The only change in 2017 is a slight revision of the comments.
##
#########################################################################################################################
##
## This code defines functions used to store a matrix and cache its inverse in a custom object class one could call
## a "cacheMatrix".
##
## makeCacheMatrix creates an object that holds:
##
## an internal storage location for a matrix
## an internal storage location for the inverse of the matrix
## get and set functions for the matrix storage location
## getinverse and setinverse functions for the inverse matrix storage location
##
## It returns a list containing the get and set functions for both matrix and inverse
## so their values can be accessed from outside the object.
##
##
## cacheSolve returns the inverse of a matrix that is stored in a "cacheMatrix" object.
##
## If the inverse is already cached, cacheSolve returns the cached copy.
##
## Otherwise, cacheSolve:
##
## 1. Calculates the inverse using the solve() function, passing it any optional arguments.
## 2. Caches the calculated inverse in the "CacheMatrix" object for later retrieval.
## 3. Returns the calculated inverse for immediate use.
##
## Note that if just returning the cached inverse, the optional arguments are ignored, so if you want to recalculate
## the inverse of a cacheMatrix x using different optional arguments, it is necessary to first clear the cache using
## x$setinverse(NULL) before calling cacheSolve(x).
##
##########################################################################################################################
makeCacheMatrix <- function(x = matrix()) {
## This function constructs an object
## that contains storage for a matrix and its inverse
## and defines object functions to get and set the storage values.
## The function argument already initializes storage for the candidate matrix x,
## so just need to explicitly initialize storage for the inverse of x.
xinverse <- NULL
## Create an object function to store the value of the x matrix.
## Since x is newly set, its inverse has not yet been calculated,
## so clear the inverse cache of any previous value.
set <- function(y) {
## Use <<- to store variables at object level, not subfunction level
x <<- y
xinverse <<- NULL
}
## Create an object function to retrieve the value of the x matrix.
get <- function() x
## Create an object function to store the value of the x inverse matrix.
setinverse <- function(xi) xinverse <<- xi ## Use <<- to store variable at object level, not subfunction level
## Create an object function to retrieve the value of the x inverse matrix.
getinverse <- function() xinverse
## Return a list of the storage and retrieval object functions for external access.
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(x, ...) {
## This function returns the inverse of a cacheMatrix's matrix,
## calculating the inverse only if it is not already cached.
## Return cached inverse if it exists
xinverse <- x$getinverse()
if(!is.null(xinverse)) {
message("getting cached data")
return(xinverse)
}
## Otherwise:
xmatrix <- x$get() ## Retrieve the x matrix
xinverse <- solve(xmatrix, ...) ## Calculate its inverse, passing optional parameters
x$setinverse(xinverse) ## Cache the calculated inverse
xinverse ## Return the calculated inverse for immediate use.
}
|
206165c411f33aa1c9f01c3e414faac982db2840
|
373fca8cc4489c7be1776f2e2881cf3385184bf0
|
/R/03_process_Imnaha_data.R
|
270c6afefde0e36ce65f4f756b9f3f811be1a07a
|
[
"Apache-2.0"
] |
permissive
|
ryankinzer/Imnaha_PITtracking
|
713aa078a204bd329d9e987efa988f82ce5ec1b0
|
948cd38c96cc511d93c90377faa3fd4291bfb9d2
|
refs/heads/master
| 2021-07-04T22:18:36.018451
| 2020-08-03T14:58:19
| 2020-08-03T14:58:19
| 136,219,141
| 0
| 1
|
Apache-2.0
| 2019-06-28T20:23:47
| 2018-06-05T18:34:15
|
HTML
|
UTF-8
|
R
| false
| false
| 7,888
|
r
|
03_process_Imnaha_data.R
|
#------------------------------------------------------------------------------
# Exploratory Data Analysis
# Joseph Feldhaus (ODFW) & Ryan Kinzer (NPT)
# Date: 6/11/18 Modified: 5/30/19
#------------------------------------------------------------------------------
# The purpose of this script is to process Imnaha only detections for more
# precise weir management and tracking of fish in the system. THe script
# requires the PITcleanR processed capture history file output by the "02..."
# R script.
#------------------------------------------------------------------------------
# load packages
library(tidyverse)
library(lubridate)
library(PITcleanr)
library(xlsx)
# Set Return Year ----
yr = year(Sys.Date())
# Load PITcleanR Data ----
PITcleanr_chs_bull<-readRDS(paste0("./data/PITcleanr_",yr,"_chs_bull.rds"))#I prefer RDS because we can explicity name the file
#load(paste0("./data/PITcleanr_",yr,"_chs_bull.rda"))
# filter for Imanaha River only
PITcleanr_chs_bull <- PITcleanr_chs_bull %>%
filter(Group == 'ImnahaRiver',
firstObsDateTime>=ymd_hm(paste0(yr,"/04/01 00:00")))#the configuration file from the 02_script contains node_order where "Group" is defined
# Trap Install Date ----
trap_install <- TRUE # TRUE OR FALSE
#if(trap_install){
install_date <- ymd_hm(paste0(yr,"/06/21 15:00")) # we could add time and second if we wanted
#}
#Write xlsx file and auto fit the column widths
#https://stackoverflow.com/questions/27322110/define-excels-column-width-with-r
PITcleanr_chs_bull2<-PITcleanr_chs_bull%>%select(-AutoProcStatus)
write.xlsx2(as.data.frame(PITcleanr_chs_bull2),paste0("./data/PITcleanr_",yr,"_chs_bull.xlsx"),row.names=FALSE)
wb <- loadWorkbook(paste0("./data/PITcleanr_",yr,"_chs_bull.xlsx"))
sheets <- getSheets(wb)
# autosize column widths
autoSizeColumn(sheets[[1]], colIndex=1:ncol(PITcleanr_chs_bull))#reference number of columns in original excel file
saveWorkbook(wb,paste0("./data/PITcleanr_",yr,"_chs_bull.xlsx"))
####Create the detection history##########
##This step seems to want the AutoProcStatus from the Original PITcleanr file
# this should be part of the load file and read-in!!!!!!!!
# We should add a field for TrapStatus - will help for summarization and grouping later
# need another field for passage route - use TagPath for ifelse (if IR5 was it IMNAHW = Handled); (if IR5 was it IML = Ladder Attempt ); if (IR5 was it IR4 = No Ladder Attempt)
# might need to consider fish going downstream
MaxTimes <- PITcleanr_chs_bull %>%
filter(SiteID %in% c('IR4', 'IML','IMNAHW','IR5')) %>%
select(TagID, lastObsDateTime, SiteID) %>%
mutate(SiteID = factor(SiteID,levels=c('IR4', 'IML','IMNAHW','IR5'))) %>%
group_by(TagID, SiteID) %>%
slice(which.max(lastObsDateTime)) %>%
spread(SiteID, lastObsDateTime, drop = FALSE) %>%
rename(IR4_max=IR4,IML_max=IML, IMNAHW_max=IMNAHW, IR5_max=IR5)
# need to use drop = FALSE in spread with all factor levels
detect_hist_simple <- PITcleanr_chs_bull %>%
filter(!SiteID %in% c('COC', 'BSC')) %>%
select(TagID, firstObsDateTime, SiteID) %>% # removed Mark.Species, Origin, ReleaseDate
mutate(SiteID = factor(SiteID,levels=c("IR1","IR2","IR3","IR4","IML","IMNAHW","IR5"))) %>%
group_by(TagID, SiteID) %>%
slice(which.min(firstObsDateTime)) %>%
spread(SiteID, firstObsDateTime, drop = FALSE) %>%
left_join(PITcleanr_chs_bull %>%
select(TagID, Mark.Species, Origin, Release.Site.Code, Release.Date) %>%
distinct(),
by = 'TagID') %>%
left_join(PITcleanr_chs_bull %>%
mutate(UserProcStatus = AutoProcStatus) %>%
rename(ObsDate = firstObsDateTime, lastObsDate = lastObsDateTime) %>%
estimateSpawnLoc(),
by = 'TagID') %>%
left_join(MaxTimes,by='TagID') %>%
select(Mark.Species, Origin, Release.Site.Code, Release.Date, everything())%>%
arrange(Mark.Species,Origin,TagID)
write.xlsx2(as.data.frame(detect_hist_simple),paste0("./data/",yr,"_detect_hist_simple.xlsx"),row.names=FALSE)
###I separated the mutate statements to calculate travel days because they don't work well with missing dates##
###We need detections at an interrogation site before the calculations work###
detect_hist <- detect_hist_simple%>%
mutate(min_IR1orIR2 = if_else(is.na(IR1), IR2, IR1),
IR1_IR3 = difftime(IR3, min_IR1orIR2, units = 'days'),
IR3_IR4 = difftime(IR4, IR3, units = 'days'),
IR4_IML = difftime(IML, IR4, units = 'days'),
IML_IMNAHW = difftime(IMNAHW, IML, units = 'days'),
IR4_IMNAHW = difftime(IMNAHW, IR4, units = 'days'),
IR4_IR5 = difftime(IR5, IR4, units = 'days')) %>%
mutate(NewTag = case_when(
Mark.Species == "Bull Trout" & trap_install & Release.Date > install_date ~ "TRUE", TRUE ~ "FALSE"),
WeirArrivalDate = if_else(!is.na(IR4), IR4, #if IR4 has a date, use IR4,
if_else(!is.na(IML), IML, # use IML,
if_else(!is.na(IMNAHW), IMNAHW, IR5))), # if IMNAHW has a date use IMNAHW otherwise use IR5
Arrival_Month = month(WeirArrivalDate, label = TRUE, abbr = FALSE))
###I separated into two parts to help with some error checking. The second half is more complicated
detect_hist2<-detect_hist%>%
mutate(TagStatus = ifelse(grepl("(IR4|IML|IMNAHW|IR5)",TagPath) & WeirArrivalDate <= install_date, paste0("Passed: <",format(install_date, "%d-%B")),
ifelse(grepl("IR5", TagPath),"Passed",
ifelse(grepl("IMNAHW", TagPath), "Trapped",
ifelse(grepl("IML", TagPath), "Attempted Ladder",
ifelse(grepl("IR4", TagPath), "At Weir", paste0("Last obs: ", AssignSpawnSite)))))))%>%
mutate(TrapStatus = ifelse(is.na(WeirArrivalDate), "No obs at weir sites",
ifelse(WeirArrivalDate <= install_date, "Panels Open", "Panels Closed")))%>%
mutate(PassageRoute = ifelse(!grepl("Passed", TagStatus), NA,
ifelse(grepl("IMNAHW", TagPath), "Handled",
ifelse(grepl("IML", TagPath), "IML obs = T", "IML obs = F"))))
detect_hist2$TagStatus[detect_hist2$IR4_max>detect_hist2$IMNAHW&detect_hist2$IMNAHW>install_date]<-"Trapped: Obs Below Weir"#tags without a detection at IR5 that fall below the weir
detect_hist2$TagStatus[detect_hist2$TagStatus=="Trapped: Obs Below Weir"&detect_hist2$IR5>detect_hist2$IR4_max]<-"Passed"#fell below weir, but made it back to IR5
detect_hist2$PassageRoute[detect_hist2$IMNAHW>install_date]<-"Handled"#tag paths that end at the trap
#detect_hist$PassageRoute[detect_hist$TagID=="3D9.1C2D90A52D"]<-"Handled 6/5/18"#tag paths that end at the trap
#Rearange variable names
detect_hist_out<-detect_hist2%>%select(TagID,Mark.Species,Origin,NewTag,TagStatus,TrapStatus,PassageRoute,Release.Date,WeirArrivalDate,everything())
saveRDS(detect_hist_out, file = paste0("./data/",yr,"_detect_hist.rds"))
#save(detect_hist_out, file = paste0("./data/",yr,"_detect_hist.rda")) #Save as rda
write.xlsx2(as.data.frame(detect_hist_out), paste0("./data/",yr, "_detect_hist.xlsx"),row.names=FALSE)
wb2 <- loadWorkbook(paste0("./data/",yr, "_detect_hist.xlsx"))
sheets2 <- getSheets(wb2)
#autoSizeColumn(sheets2[[1]], colIndex=1:ncol(detect_hist))# autosize column widths
setColumnWidth(sheets2[[1]],colIndex=1:ncol(detect_hist_out),colWidth=18)
saveWorkbook(wb2,paste0("./data/",yr, "_detect_hist.xlsx"))
# Compile pdf document.
#knitr::knit("2019_chinook_bull_report.Rmd")
##Amazon and Shiny####
source('./R/aws_keys.R')
setKeys()
aws.s3::s3write_using(detect_hist_out, FUN = write.csv,
bucket = "nptfisheries-pittracking",
object = paste0("detection_history_",yr))
# Clean the R-environment
rm(list = ls())
|
58a6c4deb6e956db8e61b95beaba7f68e4c213dc
|
a9f98eb379f416c232f50c69ba332968ad948e38
|
/NBA Project (hustle).R
|
7644f344099961fa8c4c56e059153c558afadf81
|
[] |
no_license
|
jkatz22/NBA_clustering_analysis
|
7d5946a6557c06182631c69155330236f0878adb
|
b06bd4ffcea09cfbd0f3bddb53a6a631d99f5a2e
|
refs/heads/main
| 2023-04-12T23:25:55.851237
| 2021-03-30T17:59:43
| 2021-03-30T17:59:43
| 350,870,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,472
|
r
|
NBA Project (hustle).R
|
data <- read.csv("/Users/joshkatz/Desktop/Junior Year/Sports Analytics/NBA Project/box_advanced.csv")
data <- data %>%
filter(MP_PG >= 12.4688 & GP >= 28)
hustle_data <- data %>%
mutate(scaled_STLP = scale(STLP), scaled_RimFreq = scale(RimFreq)) %>%
mutate(
hustle_known = ifelse(
PersonName == "Chris Paul" | PersonName == "Kawhi Leonard" |
PersonName == "Russell Westbrook" | PersonName == "Jimmy Butler" |
PersonName == "John Wall" | PersonName == "Marcus Smart" |
PersonName == "Draymond Green",1,0
)
)
distance_hustle <- dplyr::select(hustle_data, scaled_STLP, scaled_RimFreq) %>%
dist()
matrix_hustle <- as.matrix(distance_hustle)
hier_clust_hustle <- hclust(distance_hustle, method = "complete")
plot(hier_clust_hustle)
hustle_cluster_hier <- cutree(hier_clust_hustle, k = 3)
hustle_data$hustle_cluster <- as.factor(hustle_cluster_hier)
ggplot(hustle_data, aes(x= STLP, y= RimFreq, color=hustle_cluster, label=PersonName))+
geom_point() + geom_label_repel(aes(label=ifelse(hustle_known==1,as.character(PersonName),'')),
box.padding = 0.35, point.padding = 1, segment.color = "grey50") +
ggtitle("Quantifying Hustle in the NBA")
no_hustle <- hustle_data %>%
filter(hustle_cluster == 1)
high_hustle <- hustle_data %>%
filter(hustle_cluster == 2)
no_steal <- hustle_data %>%
filter(hustle_cluster == 3)
high_hustle$X3P <- high_hustle$X3FGM_PG/high_hustle$X3FGA_PG
|
5af3ab479acd72c1757c391a681f5faed37e92a9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bit/examples/chunk.rd.R
|
c39b0af93786817035c9223bf67724d75a9e6f43
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
chunk.rd.R
|
library(bit)
### Name: chunk
### Title: Chunked range index
### Aliases: chunk chunk.default
### Keywords: data
### ** Examples
chunk(1, 100, by=30)
chunk(1, 100, by=30, method="seq")
## Not run:
##D require(foreach)
##D m <- 10000
##D k <- 1000
##D n <- m*k
##D message("Four ways to loop from 1 to n. Slowest foreach to fastest chunk is 1700:1
##D on a dual core notebook with 3GB RAM\n")
##D z <- 0L;
##D print(k*system.time({it <- icount(m); foreach (i = it) %do% { z <- i; NULL }}))
##D z
##D
##D z <- 0L
##D print(system.time({i <- 0L; while (i<n) {i <- i + 1L; z <- i}}))
##D z
##D
##D z <- 0L
##D print(system.time(for (i in 1:n) z <- i))
##D z
##D
##D z <- 0L; n <- m*k;
##D print(system.time(for (ch in chunk(1, n, by=m)){for (i in ch[1]:ch[2])z <- i}))
##D z
##D
##D message("Seven ways to calculate sum(1:n).
##D Slowest foreach to fastest chunk is 61000:1 on a dual core notebook with 3GB RAM\n")
##D print(k*system.time({it <- icount(m); foreach (i = it, .combine="+") %do% { i }}))
##D
##D z <- 0;
##D print(k*system.time({it <- icount(m); foreach (i = it) %do% { z <- z + i; NULL }}))
##D z
##D
##D z <- 0; print(system.time({i <- 0L;while (i<n) {i <- i + 1L; z <- z + i}})); z
##D
##D z <- 0; print(system.time(for (i in 1:n) z <- z + i)); z
##D
##D print(system.time(sum(as.double(1:n))))
##D
##D z <- 0; n <- m*k
##D print(system.time(for (ch in chunk(1, n, by=m)){for (i in ch[1]:ch[2])z <- z + i}))
##D z
##D
##D z <- 0; n <- m*k
##D print(system.time(for (ch in chunk(1, n, by=m)){z <- z+sum(as.double(ch[1]:ch[2]))}))
##D z
##D
## End(Not run)
|
0e629015683087d3af95d51ebcc228fedd6ba151
|
0037521945d019b1176142e88c20b7c34d8a34ec
|
/cachematrix.R
|
be4938e8a32282a38f492dead64af9999b49d3c0
|
[] |
no_license
|
Wiwi0404/ProgrammingAssignment2
|
91a8eaaebcb31b73a531c6049ae1f4063d003ee9
|
28f58369f7efe5bd4c510312e42f6720c30e8a0f
|
refs/heads/master
| 2022-12-20T16:49:25.427454
| 2020-09-19T06:07:20
| 2020-09-19T06:07:20
| 296,786,265
| 0
| 0
| null | 2020-09-19T04:31:09
| 2020-09-19T04:31:08
| null |
UTF-8
|
R
| false
| false
| 684
|
r
|
cachematrix.R
|
## Create a special matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinvert <- function(invert) inv <<- invert
getinvert <- function() inv
list(set=set, get=get,
setinvert=setinvert,
getinvert=getinvert)
}
## Calculate the invert of the special matrix created by the above function
cacheSolve <- function(x, ...) {
inv <- x$getinvert()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinvert(inv)
inv
}
|
49ecc56bfe393ba22b02c282475b60e170fd47ec
|
5e9c50112a24d984d1a12c6f395a4c885a10c798
|
/plot2.R
|
5c819b237bdfa04b542a324d1c31d1e60545b14c
|
[] |
no_license
|
soynicola/ExData
|
5740c83fe20d5ae9c6671de642d08b429fd57e20
|
9f907b840dd9396ff0e26a9367a88c2fbdca7a07
|
refs/heads/master
| 2020-05-09T20:40:24.711957
| 2015-09-12T04:52:05
| 2015-09-12T04:52:05
| 42,344,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 725
|
r
|
plot2.R
|
# make sure you've unzipped exdata_data_household_power_consumption file
## and your working directory is set at the exdata file.
## read data from household power consumption data
dat <- read.table("household_power_consumption.txt", header=TRUE, na.strings="?", colClasses="character", sep=";")
## Extract only the data with date = 2007/2/1 & 20007/2/2
data <- dat[dat$Date %in% c("1/2/2007", "2/2/2007"),]
## convert the Date & time to POSIXct format
data$DateTime <- strptime(paste(data$Date, data$Time, sep=" "), format="%d/%m/%Y %H:%M:%S")
## generate plot
png(file= "plot2.png", width= 480, height= 480)
plot(data$DateTime, data$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
9c8fb595dd97fbac0f0d17e0f15ff5171551a78a
|
a55208f9bc54864b57d34e2f082fe08c984406c6
|
/3. Generate Cross Validation Data/CreateCVSTWam/CreateCVSTWam/LOOCV.R
|
d1ffc5e61adb17d4098f529b4c6e2283d3c393d8
|
[
"MIT"
] |
permissive
|
heathyates/AffectiveIntelligenceBuiltEnvironments
|
fe46f87e3912643841729087192b1cdd0140a210
|
acbdf3544e90bcf2fba5c9e0278e9acdf983c01d
|
refs/heads/master
| 2020-03-09T04:56:43.452727
| 2018-04-08T20:55:13
| 2018-04-08T20:55:13
| 128,599,774
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,509
|
r
|
LOOCV.R
|
#################################################################################################
#Author: Heath Yates #
#Date: 03/14/2018 #
#Description: This conducts leave one out cross validation into test and validation sets #
#################################################################################################
library(openxlsx)
library(plyr)
#Section 1: Primary Functions
CreateLOOCVTrainingAndTestingData <- function(masterData, LOOCVTrainingPath, LOOCVTestPath) {
#Create LOOCV training and testing data sets
#
#Args:
# masterData: The normalized or aggregated master data
#
#Returns:
# None (but does write/output training and testing data sets)
#Step 0: Obtain the number of unique participants in the data
participantsInData <- unique(masterData$PARTICIPANTID)
#Step 1: Start creating the folds by leaving out on participant at a time
#For example, train1 is all participants except participant 1, and test1 is just participant 1
#For another, train2 is all participants except participant 2, and test2 is just participant 2
for (i in 1:length(participantsInData)) {
print(paste0("Working on LOOCV training and testing fold: ", i))
#Step 1.1: Obtain the training data set
train <- masterData[masterData$PARTICIPANTID != i,]
#Step 1.2 Obtain the testing data set
test <- masterData[masterData$PARTICIPANTID == i,]
#Step 1.3 Output training sets
trainFileName <- paste0("train_", i,".csv")
write.csv(train, file.path(LOOCVTrainingPath, trainFileName))
#Step 1.2: Output the test sets
testFileName <- paste0("test_", i,".csv")
write.csv(test, file.path(LOOCVTestPath, testFileName))
print(paste0("Files output for LOOCV training and testing fold: ", i))
}
print(paste0("LOOCV complete"))
}
#Section 2: Main function
LOOCV <- function() {
#Does leave one out on the training data given to it
#
#Args:
# masterDataFilePath: Path to the file to read
#
#
#Returns:
# None
masterDataFilePath <- "C:/Users/heath/OneDrive/Documents/Research/Current/Dissertation/3. WAM/10. Appendix - Normalized Cleaned Data/normalized master data/all_participants_master_with_normalized.csv"
exportDirectory <- "C:/Users/heath/OneDrive/Documents/Research/Current/Dissertation/3. WAM/12. Appendix - ST Training and Test Data"
#Step 0: Define the parent, training, and testing paths
LOOCVParentPath <- file.path(exportDirectory, "LOOCV")
LOOCVTrainingPath <- file.path(LOOCVParentPath, "Train")
LOOCVTestPath <- file.path(LOOCVParentPath, "Test")
#Step 1: Create the paths
dir.create(LOOCVParentPath)
dir.create(LOOCVTrainingPath)
dir.create(LOOCVTestPath)
#Step 2: Load the master data
#Step 2: Load the master data
masterData <- read.csv(masterDataFilePath, header = TRUE)
masterData <- arrange(masterData, masterData$PARTICIPANTID, masterData$TIME)
masterData <- masterData[masterData$PARTICIPANTID != 19,]
#Step 3: Create training and testing data for LOOCV
CreateLOOCVTrainingAndTestingData(masterData, LOOCVTrainingPath, LOOCVTestPath)
}
LOOCV()
|
f0528d0bec632e9e66a7d81ee6a8d3ad9c099c1c
|
c22589ab508ff216ca8ca6d56fbd99d90a5f8654
|
/other_code/Making_RNA_Data_OG.R
|
5e888524e34a2ed71e36345d762cf344e2330739
|
[] |
no_license
|
pupster90/10k_Immunomes
|
52578ee396c7894db5dd294d830bd5f96be63e62
|
6164300f158ab357f7e9715076fdbaec0d5e760c
|
refs/heads/master
| 2021-07-14T13:55:15.401359
| 2020-08-08T09:29:19
| 2020-08-08T09:29:19
| 188,519,728
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,299
|
r
|
Making_RNA_Data_OG.R
|
# Scratch Paper
####################################
### Set Up File ###
####################################
### Scratch Paper: ensambl -> count
# USEFUL LINK:
# https://support.bioconductor.org/p/105430/
library(BiocManager)
#BiocManager::install("DESeq2")
library( DESeq2 )
#org.Hs.eg.db
#BiocManager::install("org.Hs.eg.db")
#BiocManager::install("AnnotationDbi")
library( org.Hs.eg.db )
####################################
### Create Map ###
####################################
# Load counts data
counts = read.table( "/Users/elliott/Desktop/SD1412/ResultFiles/RNA_sequencing_result/RNA_P09_D0.725156.count", sep = "\t", stringsAsFactors=FALSE )
counts$V2 = NULL
names(counts) = c("ensembl")
# Creates map
hgnc_map = select(org.Hs.eg.db, counts$ensembl , c("SYMBOL"),"ENSEMBL")
# Get HGNC names for 1 dataset
hgnc_names = c()
for( i in 1:length(counts$ensembl) ){
hgnc_match = which( hgnc_map$ENSEMBL == counts$ensembl[i] )[1]
hgnc_names = c( hgnc_names, hgnc_map$SYMBOL[hgnc_match] )
}
counts$hgnc = hgnc_names
####################################
### Read All Data as Table ###
####################################
# Get list of files
files = list.files("/Users/elliott/Desktop/SD1412/ResultFiles/RNA_sequencing_result", pattern="*.count")
for( a_file in files ){
# get data
file_path = paste("/Users/elliott/Desktop/SD1412/ResultFiles/RNA_sequencing_result/",a_file,sep="")
mini_counts = read.table( file_path, sep = "\t", stringsAsFactors=FALSE )
names(mini_counts) = c("ensembl","counts")
# check that data is in correct format
if( length(mini_counts$ensembl)!=length(counts$ensembl) || !all(mini_counts$ensembl==counts$ensembl) ){
print("something bad happend")
break
}
# add data to counts
col_name = substr( a_file, start=5, stop=10 )
counts[col_name] = mini_counts$counts
}
####################################
### Clean Data ###
####################################
# remove genes with super low expression
row_sums = rowSums(counts[3:length(counts)])
counts2= counts[which(row_sums>30),]
# remove genes with no hgnc name
counts3= counts2[ which( !is.na(counts2$hgnc) ), ]
#######################################
### Export to CSV ###
#####################################
# Export file
write.csv(counts3, file = "/Users/elliott/Desktop/Infants_RNA.csv",row.names=FALSE)
# check that it exported correctly
counts = read.table( "/Users/elliott/Desktop/Infants_RNA.csv", stringsAsFactors=FALSE, header=TRUE,sep="," )
head(counts)
dim(counts)
#######################################
### Make Graph ###
#####################################
which(counts3$hgnc=="CD4")
# Get a row
counts3 = read.table( "/Users/elliott/Desktop/Infants_RNA.csv", stringsAsFactors=FALSE, header=TRUE,sep="," )
num = 216
dim(counts3)
gene_name = counts3[216,2]
a_row = counts3[216,3:dim(counts3)[2]]
# Put data in data frame
cols = list()
to_graph= data.frame( counts = as.numeric(a_row) )
# groups data by age
groups = c()
for( i in names(a_row) ){
groups= c( groups, substr(i, start=nchar(i)-1, stop=nchar(i) ))
}
to_graph$groups = as.factor( groups )
# create X variable for scatter
`%+=%` = function(e1,e2) eval.parent(substitute(e1 <- e1 + e2))
to_graph$X = rnorm( length(a_row) , mean = 0, sd = .15 )
to_graph$X[ grep("*D0", names(a_row)) ] %+=% 0
to_graph$X[ grep("*D1", names(a_row)) ] %+=% 1
to_graph$X[ grep("*D3", names(a_row)) ] %+=% 3
to_graph$X[ grep("*D7", names(a_row)) ] %+=% 7
# Plot it
library(ggplot2)
library(plotly)
library(ggthemes)
graph_og = ggplot( to_graph, aes(x=X, y=counts, color=groups) ) +
geom_point() +
labs(x='Days Alive', y="Gene Expression (counts)", title=paste(gene_name," Expression in Infants"), fill = NULL, colour = NULL ) +
theme_gdocs()
)
#axis.title.y = element_text(margin = margin(t = 0, r = 20, b = 0, l = 0)) ) +#,
#panel.background = element_rect(fill = "white", colour = "gray"),
#panel.grid.major = element_line(size = 0.2, linetype = 'solid',colour = "gray"),#+
#ggtitle("Plot of length by dose") #+
#theme(plot.title = element_text( size=16, face="bold.italic")) #+
graph_og
graph = ggplotly(graph_og)
graph
|
c519712dcf43211b39d1560a49b3bd68625c36d7
|
45dee4934f377fba8a4fde7030f4741f0b610cb2
|
/man/lmList-class.Rd
|
da2eea6105cdb5cd10d99532eaf03a2f300ae4a5
|
[] |
no_license
|
jknowles/lme4
|
e7649d74ea5c6ed90686ce5cd2ea3376b7592c50
|
f8f760a512434199901db78b9269c245cca82e1f
|
refs/heads/master
| 2021-01-16T19:50:54.277800
| 2015-02-01T04:04:03
| 2015-02-01T04:04:03
| 30,161,374
| 1
| 0
| null | 2015-02-01T22:00:09
| 2015-02-01T22:00:08
| null |
UTF-8
|
R
| false
| false
| 463
|
rd
|
lmList-class.Rd
|
\docType{class}
\name{lmList-class}
\alias{lmList-class}
\alias{show,lmList-method}
\title{Class "lmList" of 'lm' Objects on Common Model}
\description{
Class \code{"lmList"} is an S4 class with basically a
list of objects of class \code{\link{lm}} with a common
model.
}
\section{Objects from the Class}{
Objects can be created by calls of the form
\code{new("lmList", ...)} or, more commonly, by a call to
\code{\link{lmList}}.
}
\keyword{classes}
|
71810f0007dd5b336fc524d346e9611702691afb
|
c7b26db73a4ae88a969ed0ea6a0793f0eb3d1854
|
/plot3.R
|
17f88e74c54cdc3da84d27bad91a21b9785aa9b6
|
[] |
no_license
|
albermm/Data-Analysis-Project-2
|
92bff884acfab5cdd5042faa974edfe30d58baf5
|
a7de383ff9f4909443fb5d1daa8cb0b041e87148
|
refs/heads/master
| 2020-06-04T17:02:24.063015
| 2014-08-20T17:40:55
| 2014-08-20T17:40:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 882
|
r
|
plot3.R
|
## Read data from files
emissions <- readRDS("./summarySCC_PM25.rds")
classification <- readRDS("./Source_Classification_Code.rds")
## Subset the required information
library(plyr)
Baltimore <- emissions[ which(emissions$fips == "24510"), ]
plotData <- ddply(Baltimore, c("type", "year"), summarise,
total = sum(Emissions))
## Plot data
library(ggplot2)
png("plot3.png")
g <- ggplot(plotData, aes(year, total))
g + geom_line(aes(color=type)) +
facet_grid(.~ type) +
labs(x="Year") +
labs(y = expression("Total" ~ PM[2.5] ~ "Emissions (tons)")) +
labs(title = expression("Baltimore City" ~ PM[2.5] ~ "Emissions by Source Type and Year"))
dev.off()
|
5928101e4836c186abd9c35ffdf431756a1990bb
|
d6469f9b60d3574cd178bf589ee169bd4a87e631
|
/man/cwdata.Rd
|
dde45d2f4e442711c6db3c62b3a0b5c09b88d6db
|
[] |
no_license
|
ledugan/ecoclim
|
ad6bd5fd92964e62b131214bbccfb553ce32eff0
|
b76eb1b127439d99c12316f6b8d31318823b4a1b
|
refs/heads/master
| 2020-03-23T00:40:31.337980
| 2015-10-29T02:46:31
| 2015-10-29T02:46:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 320
|
rd
|
cwdata.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/colorwheel.R
\name{cwdata}
\alias{cwdata}
\title{Generate circular legend data.}
\usage{
cwdata(data, xvar, yvar, resolution = 10, origin = c(0, 0))
}
\description{
Construct a data frame to be used in color wheel legend plotting.
}
|
b4173d2a60e31ab58efe8d37f22a4bca90fac132
|
018c485b22cb818125068e09ac3c0948864050fd
|
/man/FLCore-package.Rd
|
3c97a53fc49f69c7152f22b2bd32cee449d5649c
|
[] |
no_license
|
cran/FLCore
|
7f72953f57d1c42b8fec9dc0250d722dc884b0d9
|
1f4f639a9ddba6bf5bda5103295dfe54d7f9321f
|
refs/heads/master
| 2020-06-04T11:21:00.661163
| 1977-08-08T00:00:00
| 1977-08-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,792
|
rd
|
FLCore-package.Rd
|
\name{FLCore-package}
\alias{FLCore-package}
\alias{FLCore}
\docType{package}
\title{
Core package of FLR, fisheries modelling in R.
}
\description{
FLCore contains the core classes and methods for FLR, a
framework for fisheries modelling and management strategy
simulation in R. Developed by a team of fisheries scientists in
various countries. More information can be found at
http://flr-project.org/, including a development mailing list.
}
\details{
\tabular{ll}{
Package: \tab FLCore\cr
Version: \tab 2.0-2\cr
Date: \tab 2005\cr
Depends: \tab methods, R(>= 2.3.0), graphics, stats, lattice(>= 0.13-8)\cr
License: \tab GPL 2 or above\cr
Collate: \tab FLQuant.R FLQuants.R FLAccesors.R FLCohort.R FLStock.R
FLStocks.R io.FLStock.R FLBiol.R FLBiols.R FLCatch.R FLFleet.R
FLFleets.R FLIndex.R FLIndices.R io.FLIndices.R FLSR.R zzz.R\cr
Packaged: \tab Sat May 13 10:25:52 2006; imosqueira\cr
Built: \tab R 2.3.1; ; 2006-10-11 12:26:16; unix\cr
}
Index:
\preformatted{
FLBiol-class Class FLBiol
FLBiols-class Class FLBiols
FLCatch-class Class FLCatch
FLCohort-class Class "FLCohort" for information by cohort
FLCore-accesors Accesor and replacement methods for slots of
complex objects
FLFleet-class FLFleet class and methods
FLFleets-class Class FLFleets
FLGenerics-methods FLCore S4 Generic Functions
FLIndex-class Class FLIndex
FLIndices-class Class FLIndices
FLQuant FLQuant
FLQuant-class FLQuant class and methods
FLQuants-class Class "FLQuants"
FLSR FLSR
FLSR-class Class FLSR
FLStock-class Class FLStock for fish stock data and modelling
output
FLStocks-class Class FLStocks
as.FLBiol Method for creating an FLBiol object from other
classes
as.FLFleet as.FLFleet method
as.FLIndex-methods Method for creating an FLIndex object from
other classes
as.FLQuant as.FLQuant
as.FLSR Method for creating an FLSR object from other
classes.
as.FLStock FLStock
bubbles-methods Lattice style bubble plots
ccplot-methods Catch curves plot method
computeCatch Methods for estimating aggregated catch time
series from dissagregated data
convert6d Converts FLR objects from version 1.* to 2.*
createFLAccesors Create accesor and replecement methods for
slots of complex classes
dims Provide information onn the dimensions and
range of an object
flc2flq-methods Coerce "FLCohort" to "FLQuant"
is.FLBiol FLBiol
is.FLBiols Checks for objects of class FLBiols
is.FLFleet FLFleet
is.FLFleets FLFleets
is.FLIndex FLIndex
is.FLIndices FLIndices
is.FLSR FLSR
is.FLStock FLStock
is.FLStocks FLStocks
iter-methods Methods for getting information on, accessing
and modifying iterations of an FLQuant
ple4 FLCore datasets
propagate propagate for FLQuants based classes
quant Quant, first dimension of FLQuant
quantSums Common summary operations for FLQuants
read.FLIndices Import FLIndices data from a file
read.FLStock Import stock data from a file
revenue Calculate the revenue of a fleet
setPlusGroup setPlusgroup for FLStock
sop Calculates the sum of products correction
sr Stock-recruitment model function
srlkhd Likelihood of the S-R parameters
ssb Method for calculating Spawning Stock Biomass
ssbpurec Method for calculating Spawning Stock Biomass
per unit recruit
steepvirginbiom Change to steepness/virgin biomass
parameterisation
tofrm Method for generating fromulas from FLQuant
objects.
trim Method for trimming FLQuant objects.
units Extract and modify the units slot of an object
window window for FLQuants based classes
write.FLStock Write FLStock data to a file
xyplot,formula,FLQuant-method
FLCore lattice methods
}
}
\author{
FLR Team and various contributors. Initial design by Laurence
T. Kell & Philippe Grosjean.
Maintainer: FLR Team <flr-devel@lists.sourceforge.net>
}
\references{
}
\keyword{package}
\seealso{
}
\examples{
}
|
d1cb7d0f1be164b71ccf69f4dff738c8a1f8d158
|
4042c33db4606452e80bf20e763d388be2785372
|
/pkg/R/vaccine_dynamics.R
|
50f7165d6f413ff866749730d99c3f00b96edd25
|
[] |
no_license
|
RodrigoAnderle/EvoNetHIV
|
992a5b735f8fac5255ea558ec2758251634ffaab
|
4bef435b0c5739d6baba9f80c93ac046e540647e
|
refs/heads/master
| 2023-08-25T14:01:38.754647
| 2021-09-24T16:48:29
| 2021-09-24T16:48:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,360
|
r
|
vaccine_dynamics.R
|
#' @export
vaccine_dynamics <- function(dat,at){
#note: actual modification of transmission dynamics occurs
# in "transmission_vaccine" function
dat <- initialize_vaccine_agents(dat,at)
dat <- update_mu(dat,at)
dat <- update_sigma(dat,at)
dat <- initialize_phi(dat,at)
dat <- update_phi(dat,at)
#if mean degree by age (risk groups) for GF model
if(length(dat$param$age_nw_groups)>1){
age_cats <- 1:length(dat$param$age_nw_groups)
for(ii in 1:length(age_cats)){
age1 <- dat$param$age_nw_groups[[ii]][1]
age2 <- dat$param$age_nw_groups[[ii]][2]
ix <- which(dat$attr$age > age1 & dat$attr$age < age2+1)
dat$attr$att1[ix] <- ii
}
}
#vaccine trial setup----------
#assign trial status to agents at dat$param$trial_status_time_switch (default=365 days)
#so network filters out pairings of trial participants by time
#vaccine trial starts (dat$param$vaccine.rollout.year[1]*365)
if( at== (dat$param$vaccine.rollout.year[1]*365-dat$param$trial_status_time_switch) & dat$param$vaccine_trial ) {
trial_samples <- round(length(dat$attr$Status)*dat$param$fraction.vaccinated)
trial_index <- sample(1:length(dat$attr$Status), trial_samples,replace=F)
dat$attr$trial_status[trial_index] <- 1
}
#---------------------------
return(dat)
}
|
e1238a2fd59ab952fb0a15f8ec9d8d241b1639f7
|
9ac72f57681a91fad1b56143af156697fa9cc669
|
/Final Project Marco Dibo.R
|
20b39399605ae5997707d81257fefec47230bfea
|
[] |
no_license
|
mdibo/quantstrat
|
e1f57a6fcdeae9e3666d7fbff1f4b87a12c6ec5f
|
87e96669959fd6480e23f2b92c14bb1ed90962a0
|
refs/heads/master
| 2021-01-20T18:44:34.957953
| 2016-08-05T15:15:28
| 2016-08-05T15:15:28
| 65,023,427
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,224
|
r
|
Final Project Marco Dibo.R
|
.blotter <- new.env()
.strategy <- new.env()
ls(.blotter) # .blotter holds the portfolio and account object
ls(.strategy) # .strategy holds the orderbook and strategy object
# 1) Load Quantstrat library
library(quantstrat)
library(tseries)
library(IKTrading)
library(PerformanceAnalytics)
# 2) Initialize currency
currency('USD')
# 3) Initialize dates and initial equity
initDate <- '2008-12-31'
startDate <- '2009-01-01'
endDate <- '2012-12-31'
initEq <- 100000
N = 20
N.ADF = 60
alpha = 1
buyThresh = -2
sellThresh = -buyThresh
exitlong = 1
exitshort = 1
Sys.setenv(TZ = 'UTC')
# 4) fetch market data
symb1 <- 'C'
symb2 <- 'BAC'
getSymbols(symb1, from=startDate, to=endDate, adjust=TRUE)
getSymbols(symb2, from=startDate, to=endDate, adjust=TRUE)
spread <- OHLC(C)-OHLC(BAC)
colnames(spread)<-c("open","high","low","close")
head(spread)
symbols <- c("spread")
stock(symbols, currency = 'USD', multiplier = 1)
chart_Series(spread)
add_TA(EMA(Cl(spread), n=20), on=1, col="blue", lwd=1.5)
legend(x=5, y=50, legend=c("EMA 20"),
fill=c("blue"), bty="n")
# 5) Inititalize strategy, portfolio, account and orders
qs.strategy <- 'pairStrat'
initPortf(qs.strategy, symbols = symbols, initDate=initDate)
initAcct(qs.strategy, portfolios=qs.strategy, initDate=initDate, initEq=initEq)
initOrders(qs.strategy,initDate=initDate)
# 6) Save strategy
strategy(qs.strategy, store = TRUE)
# rm.strat(pairStrat) # only when trying a new test
ls(.blotter) # .blotter holds the portfolio and account object
ls(.strategy) # .strategy holds the orderbook and strategy object
# 7) Add indicators
# a) Z-Score
PairRatio <- function(x) { #returns the ratio of close prices for 2 symbols
x1 <- get(x[1])
x2 <- get(x[2])
rat <- log10(Cl(x1) / Cl(x2))
colnames(rat) <- 'Price.Ratio'
rat
}
Price.Ratio <- PairRatio(c(symb1[1],symb2[1]))
MaRatio <- function(x){
Mavg <- rollapply(x, N , mean)
colnames(Mavg) <- 'Price.Ratio.MA'
Mavg
}
Price.Ratio.MA <- MaRatio(Price.Ratio)
Sd <- function(x){
Stand.dev <- rollapply(x, N, sd)
colnames(Stand.dev) <- "Price.Ratio.SD"
Stand.dev
}
Price.Ratio.SD <- Sd(Price.Ratio)
ZScore <- function(x){
a1 <- x$Price.Ratio
b1 <- x$Price.Ratio.MA
c1 <- x$Price.Ratio.SD
z <- (a1-b1)/c1
colnames(z)<- 'Z.Score'
z
}
Z.Score <- ZScore(x=merge(Price.Ratio,Price.Ratio.MA,Price.Ratio.SD))
dev.new()
plot(main = "Z-Score Time Series", xlab = "Date" , ylab = "Z-Score",Z.Score, type = "l" )
abline(h = 2, col = 2, lwd = 3 ,lty = 2)
abline(h = -2, col = 3, lwd = 3 ,lty = 2)
# b) Augmented Dickey Fuller
ft2<-function(x){
adf.test(x)$p.value
}
Pval <- function(x){
Augmented.df <- rollapply(x, width = N.ADF, ft2)
colnames(Augmented.df) <- "P.Value"
Augmented.df
}
P.Value <- Pval(Price.Ratio)
add.indicator(strategy = qs.strategy, name = "ZScore", arguments =
list(x=merge(Price.Ratio,Price.Ratio.MA,Price.Ratio.SD)))
add.indicator(strategy = qs.strategy, name = "Pval", arguments =
list(x=quote(Price.Ratio)))
summary(get.strategy(qs.strategy))
# 8) Add signals
add.signal(qs.strategy, name="sigThreshold",arguments=list(column="Z.Score", threshold=buyThresh,
relationship="lt", cross=FALSE),label="longEntryZ")
add.signal(qs.strategy, name="sigThreshold",arguments=list(column="P.Value", threshold= alpha,
relationship="lt", cross=FALSE),label="PEntry")
add.signal(qs.strategy, name="sigAND",
arguments=list(columns=c("longEntryZ", "PEntry"), cross=FALSE),
label="longEntry")
add.signal(qs.strategy, name="sigThreshold",arguments=list(column="Z.Score", threshold= exitlong,
relationship="gt", cross=FALSE),label="longExit")
add.signal(qs.strategy, name="sigThreshold",arguments=list(column="Z.Score", threshold=sellThresh,
relationship="gt", cross=FALSE),label="shortEntryZ")
add.signal(qs.strategy, name="sigAND", arguments=list(columns=c("shortEntryZ", "PEntry"), cross=FALSE),
label="shortEntry")
add.signal(qs.strategy, name="sigThreshold",arguments=list(column="Z.Score", threshold= exitshort,
relationship="lt", cross=FALSE),label="shortExit")
summary(get.strategy(qs.strategy))
addPosLimit( portfolio = qs.strategy, # add position limit rules
symbol = 'spread',
timestamp = initDate,
maxpos = 3000,
longlevels = 1,
minpos = -3000)
add.rule(qs.strategy, name='ruleSignal',arguments = list(sigcol="longEntry",
sigval=TRUE, orderqty=3000, osFUN = osMaxPos, replace = FALSE, ordertype='market',
orderside='long', prefer = "open"), type='enter' )
add.rule(qs.strategy, name='ruleSignal', arguments = list(sigcol="shortEntry",
sigval=TRUE, orderqty=-3000, osFUN = osMaxPos, replace = FALSE,ordertype='market',
orderside='short', prefer = "open"), type='enter')
add.rule(qs.strategy, name='ruleSignal', arguments = list(sigcol="longExit",
sigval=TRUE, orderqty= 'all', ordertype='market', orderside='short', prefer = "open"), type='exit')
add.rule(qs.strategy, name='ruleSignal', arguments = list(sigcol="shortExit",
sigval=TRUE, orderqty= 'all' , ordertype='market', orderside='long', prefer = "open"), type='exit')
summary(get.strategy(qs.strategy))
# 10) Apply strategy
applyStrategy(strategy = qs.strategy, portfolios = qs.strategy, mktdata = spread)
tns <-getTxns(Portfolio=qs.strategy, Symbol= symbols)
# 11) Update portfolio, account, equity
updatePortf(qs.strategy)
#dateRange <- time(getPortfolio(qs.strategy)$summary)[-1]
updateAcct(qs.strategy)
updateEndEq(qs.strategy)
# 12) Plot the results
chart.P2 = function (Portfolio, Symbol, Dates = NULL, ..., TA = NULL)
{
pname <- Portfolio
Portfolio <- getPortfolio(pname)
if (missing(Symbol))
Symbol <- ls(Portfolio$symbols)[[1]]
else Symbol <- Symbol[1]
Prices = get(Symbol)
if (!is.OHLC(Prices)) {
if (hasArg(prefer))
prefer = eval(match.call(expand.dots = TRUE)$prefer)
else prefer = NULL
Prices = getPrice(Prices, prefer = prefer)
}
freq = periodicity(Prices)
switch(freq$scale, seconds = {
mult = 1
}, minute = {
mult = 60
}, hourly = {
mult = 3600
}, daily = {
mult = 86400
}, {
mult = 86400
})
if (!isTRUE(freq$frequency * mult == round(freq$frequency,
0) * mult)) {
n = round((freq$frequency/mult), 0) * mult
}
else {
n = mult
}
tzero = xts(0, order.by = index(Prices[1, ]))
if (is.null(Dates))
Dates <- paste(first(index(Prices)), last(index(Prices)),
sep = "::")
Portfolio$symbols[[Symbol]]$txn <- Portfolio$symbols[[Symbol]]$txn[Dates]
Portfolio$symbols[[Symbol]]$posPL <- Portfolio$symbols[[Symbol]]$posPL[Dates]
Trades = Portfolio$symbols[[Symbol]]$txn$Txn.Qty
Buys = Portfolio$symbols[[Symbol]]$txn$Txn.Price[which(Trades >
0)]
Sells = Portfolio$symbols[[Symbol]]$txn$Txn.Price[which(Trades <
0)]
Position = Portfolio$symbols[[Symbol]]$txn$Pos.Qty
if (nrow(Position) < 1)
stop("no transactions/positions to chart")
if (as.POSIXct(first(index(Prices))) < as.POSIXct(first(index(Position))))
Position <- rbind(xts(0, order.by = first(index(Prices) -
1)), Position)
Positionfill = na.locf(merge(Position, index(Prices)))
CumPL = cumsum(Portfolio$symbols[[Symbol]]$posPL$Net.Trading.PL)
if (length(CumPL) > 1)
CumPL = na.omit(na.locf(merge(CumPL, index(Prices))))
else CumPL = NULL
if (!is.null(CumPL)) {
CumMax <- cummax(CumPL)
Drawdown <- -(CumMax - CumPL)
Drawdown <- rbind(xts(-max(CumPL), order.by = first(index(Drawdown) -
1)), Drawdown)
}
else {
Drawdown <- NULL
}
if (!is.null(Dates))
Prices = Prices[Dates]
chart_Series(Prices, name = Symbol, TA = TA, ...)
if (!is.null(nrow(Buys)) && nrow(Buys) >= 1)
(add_TA(Buys, pch = 2, type = "p", col = "green", on = 1))
if (!is.null(nrow(Sells)) && nrow(Sells) >= 1)
(add_TA(Sells, pch = 6, type = "p", col = "red", on = 1))
if (nrow(Position) >= 1) {
(add_TA(Positionfill, type = "h", col = "blue", lwd = 2))
(add_TA(Position, type = "p", col = "orange", lwd = 2,
on = 2))
}
if (!is.null(CumPL))
(add_TA(CumPL, col = "darkgreen", lwd = 2))
if (!is.null(Drawdown))
(add_TA(Drawdown, col = "darkred", lwd = 2, yaxis = c(0,
-max(CumMax))))
plot(current.chob())
}
dev.new()
chart.P2(qs.strategy, "spread", prefer = "close")
returns <- PortfReturns(qs.strategy)
dev.new()
charts.PerformanceSummary(returns, geometric=FALSE, wealth.index=TRUE, main = "Pair Strategy Returns")
# 13) Get statistics
tStats <- tradeStats(qs.strategy, use="trades", inclZeroDays=FALSE)
tStats[,4:ncol(tStats)] <- round(tStats[,4:ncol(tStats)], 4)
tStats <- print(data.frame(t(tStats[,-c(1,2)])))
# 14) Get the order book
orderBook <- getOrderBook(qs.strategy)
|
f401ce146a9352cc119d04d1b9a3f01a311700ba
|
94bd847092dd13661cd64a2380fa4ba6d7491cf7
|
/R/createDataframe.R
|
2de65f62c0896de4cedebf61a782557c01e3b9d6
|
[] |
no_license
|
yoavram/diskImageR
|
e7ce9a08db284c3214d4769f284208dbab179bc2
|
767e2096c234a54fb260d1ffa697450a905c06c6
|
refs/heads/master
| 2021-01-21T18:51:01.155601
| 2015-03-19T21:28:28
| 2015-03-19T21:28:28
| 33,185,879
| 0
| 0
| null | 2015-03-31T13:06:53
| 2015-03-31T13:06:53
| null |
UTF-8
|
R
| false
| false
| 10,654
|
r
|
createDataframe.R
|
#' Dataframe creation
#' @description Writes the main dataframe with resistance, tolerance and sensitivity parameters
#' @inheritParams maxLik
#' @param nameVector either a logial value or a character vector. Supported values are \code{nameVector} = "TRUE" to assign the photograph name to the 'name' column, \code{nameVector} = "FALSE" to assign th photograph number to the 'name' column, or \code{nameVector} = a vector the same length as the number of photographs indicating the desired names.
#' @param typeVector a logical value. \code{typeVector} = "TRUE" will add a 'type' vector to the dataframe using values found in the \code{typePlace} position of the photograph names (see \code{\link{IJMacro}} for more details) while \code{typeVector} = "FALSE" will not add a type column.
#' @param typePlace a number that indicates the position of the photograph name to be stored as the 'type' vector'. Defaults to 2. For more details see \code{\link{IJMacro}}
#' @param typeName a character string that indicates what to name the typeVector. Defaults to "type".
#' @details A dataframe with 11 columns:
#' \itemize{
#' \item\bold{name:} determined by \code{nameVector}, either photograph names, photograph numbers, or a user-supplied list of names
#' \item\bold{line:} the first components of the \code{namesVector}; everything that comes before the first "_" in the photograph name
#' \item\bold{type:} the location within the \code{name} of the phograph type is supplied by \code{typePlace}. Use \code{\link{addType}} if more than one type column are desired.
#' \item\bold{ZOI80, ZOI50, ZOI20:} resistance parameters, coresponding to the distance in mm of 80\%, 50\% and 20\% reduction in growth
#' \item\bold{fAUC80, fAUC50, fAUC20:} tolerance parameters, coresponding to the fraction of growth achieved above the 80\%, 50\% and 20\% reduction in growth points
#' \item\bold{slope:} sensitivity parameter, indicating the slope at the midpoint, i.e., how rapidly the population changes from low growth to full growth
#' }
#' @return A dataframe "projectName.df" is saved to the global environment and a .csv file "projectName_df.csv" is exported to the "parameter_files" directory.
#' @export
#' @author Aleeza C. Gerstein
#addin removal of blank disk plate
#try to automate clearHalo
createDataframe <- function(projectName, clearHalo, diskDiam = 6, maxDist = 30, nameVector=TRUE, typeVector=TRUE, typePlace=2, typeName = "type"){
if(!(hasArg(clearHalo))){
cont <- readline(paste("Please specify photograph number with a clear halo ", sep=""))
clearHalo <- as.numeric(cont)
}
data <- eval(parse(text=projectName))
df <- data.frame()
dotedge <- diskDiam/2 + 0.4
standardLoc <- 2.5
newdir <- file.path(getwd(), "parameter_files")
newdir2 <- file.path(getwd(), "parameter_files", projectName)
newdir3 <- file.path(getwd(), "figures", projectName)
filename <- file.path(getwd(), "parameter_files", projectName, paste(projectName, "_df.csv", sep=""))
if (!file.exists(newdir)){
dir.create(newdir, showWarnings = FALSE)
cat(paste("\n\tCreating new directory: ", newdir), sep="")
}
if (!file.exists(newdir2)){
dir.create(newdir2, showWarnings = FALSE)
cat(paste("\nCreating new directory: ", newdir2), sep="")
}
if (!file.exists(newdir3)){
dir.create(newdir3, showWarnings = FALSE)
cat(paste("\nCreating new directory: ", newdir3), sep="")
}
df <- data.frame(row.names = seq(1, length(data)))
ML <- paste(projectName, ".ML", sep="")
ML2 <- paste(projectName, ".ML2", sep="")
ML <- eval(parse(text=ML))
ML2 <- eval(parse(text=ML2))
dotMax <- max(sapply(data, function(x) {x[which(x[,1] > standardLoc)[1], 2]}))
stand <-c( sapply(data, function(x) {dotMax-x[which(x[,1] > standardLoc)[1], 2]}))
clearHaloData <- data[[clearHalo]]
startX <- which(clearHaloData[,1] > dotedge+0.5)[1]
stopX <- which(clearHaloData[,1] > maxDist - 0.5)[1]
clearHaloData <- clearHaloData[startX:stopX, 1:2]
clearHaloData$x <- clearHaloData$x + stand[clearHalo]
clearHaloData$distance <- clearHaloData$distance - (dotedge+0.5)
clearHaloStand <- clearHaloData[1,2]
slope <- sapply(c(1:length(data)), .findSlope, data=data, ML=ML, stand = stand, dotedge = dotedge, maxDist = maxDist, clearHaloStand = clearHaloStand)
AUC.df <- sapply(c(1:length(data)), .findAUC, data=data, ML=ML, ML2 = ML2, stand = stand, dotedge = dotedge, maxDist = maxDist, clearHaloStand = clearHaloStand, standardLoc = standardLoc)
x80 <- unlist(AUC.df[1,])
x50 <- unlist(AUC.df[2,])
x20 <- unlist(AUC.df[3,])
AUC80 <- unlist(AUC.df[4,])
AUC50 <- unlist(AUC.df[5,])
AUC20 <- unlist(AUC.df[6,])
maxAUC <- unlist(AUC.df[7,])
maxAUC80 <- unlist(AUC.df[8,])
maxAUC50 <- unlist(AUC.df[9,])
maxAUC20 <- unlist(AUC.df[10,])
AUC80[slope < 5] <- NA
AUC50[slope < 5] <- NA
AUC20[slope < 5] <- NA
x80[slope < 5] <- 1
x50[slope < 5] <- 1
x20[slope < 5] <- 1
aveAUC80 <- AUC80/x80
aveAUC50 <- AUC50/x50
aveAUC20 <- AUC20/x20
param <- data.frame(ZOI80 =round(x80, digits=0), ZOI50 = round(x50, digits=0), ZOI20 = round(x20, digits=0), fAUC80 = round(AUC80/maxAUC80, digits=2), fAUC50 = round(AUC50/maxAUC50, digits=2), fAUC20 = round(AUC20/maxAUC20, digits=2), slope=round(slope, digits=1))
if (is.logical(nameVector)){
if (nameVector){
line <- unlist(lapply(names(data), function(x) strsplit(x, "_")[[1]][1]))
df <- data.frame(name = names(data), line)
}
if (!nameVector){
line <- seq(1, length(data))
df <- data.frame(name = names(data), line, df)
}
}
if (!is.logical(nameVector)){
line <- nameVector
names <- unlist(lapply(names(data), function(x) strsplit(x, "_")[[1]][1]))
df <- data.frame(names=names, line=line, df)
}
if (typeVector){
type <- unlist(lapply(names(data), function(x) strsplit(x, "_")[[1]][typePlace]))
df <- data.frame(df, type, param)
}
else {
df$type <- 1
df <- data.frame(df, param)
}
names(df)[3] <- typeName
df <- df[order(df$line),]
df$fAUC80[df$fAUC80 >1] <- 1
df$fAUC50[df$fAUC50 >1] <- 1
df$fAUC20[df$fAUC20 >1] <- 1
df$fAUC80[df$ZOI80 == 1] <- 1
df$fAUC50[df$ZOI50 == 1] <- 1
df$fAUC20[df$ZOI20 == 1] <- 1
write.csv(df, file=filename, row.names=FALSE)
dfName <- paste(projectName, ".df", sep="")
cat(paste("\n", dfName, " has been written to the global environment", sep=""))
cat(paste("\nSaving file: ", filename, sep=""))
cat(paste("\n", projectName, "_df.csv can be opened in MS Excel.", sep=""))
assign(dfName, df, envir=globalenv())
}
#Determine the slope
.findSlope <- function(data, ML, i, stand, clearHaloStand, dotedge = 3.4, maxDist = 35){
startX <- which(data[[i]][,1] > dotedge+0.5)[1]
stopX <- which(data[[i]][,1] > maxDist - 0.5)[1]
data[[i]] <- data[[i]][startX:stopX, 1:2]
data[[i]]$x <- data[[i]]$x + stand[i] - clearHaloStand
data[[i]]$distance <- data[[i]]$distance - (dotedge+0.5)
xx <- seq(log(data[[i]]$distance[1]), log(max(data[[i]][,1])), length=200)
yy<- .curve(ML[[i]]['par'][1]$par[1], ML[[i]]['par'][1]$par[2], ML[[i]]['par'][1]$par[3],xx)
yycor <- (yy+min(data[[i]]$x))
xcross <- exp(ML[[i]]['par'][1]$par[2])
xxmid <- which.max(exp(xx) > xcross)
if ((xxmid-10) > 1){
xxSlope <- xx[(xxmid-10):(xxmid+10)]
yySlope <- yy[(xxmid-10):(xxmid+10)]
}
else {
xxSlope <- xx[1:(xxmid+10)]
yySlope <- yy[1:(xxmid+10)]
}
slope <- lm(yySlope ~ xxSlope)$coefficients[2]
return(slope)
}
.findAUC <- function(data, ML, ML2, stand, clearHaloStand, dotedge = 3.4, maxDist = 35, standardLoc = 2.5, i){
startX <- which(data[[i]][,1] > dotedge+0.5)[1]
stopX <- which(data[[i]][,1] > maxDist - 0.5)[1]
data[[i]] <- data[[i]][startX:stopX, 1:2]
data[[i]]$x <- data[[i]]$x + stand[i] - clearHaloStand
data[[i]]$distance <- data[[i]]$distance - (dotedge+0.5)
xx <- seq(log(data[[i]]$distance[1]), log(max(data[[i]][,1])), length=200)
yy<- .curve2(ML2[[i]]$par[1], ML2[[i]]$par[2], ML2[[i]]$par[3], ML2[[i]]$par[5], ML2[[i]]$par[6], ML2[[i]]$par[7], xx)
# ic50 <- ML[[i]]$par[2]
ploty <- data[[i]]$x
ploty[ploty < 0] <-0
# slope <- ML[[i]]$par[3]
asym <- (ML[[i]]$par[1]+min(data[[i]]$x))
xx <- seq(log(data[[i]]$distance[1]), log(max(data[[i]][,1])), length=200)
yy <- (yy+min(data[[i]]$x))
yy[yy < 0] <- 0
x80 <- xx[which.max(yy> asym * 0.2)]
x50 <- xx[which.max(yy> asym * 0.5)]
x20 <- xx[which.max(yy> asym * 0.8)]
if (x20 < x50) x20 <- xx[which.max(yy> yy[length(yy)] * 0.8)]
if(exp(x80)>1) xx80 <- seq(log(data[[i]]$distance[1]), log(round(exp(x80))), length=200)
else xx80 <- seq(log(data[[i]]$distance[1]), log(data[[i]]$distance[2]), length=200)
if(exp(x50)>1) xx50 <- seq(log(data[[i]]$distance[1]), log(round(exp(x50))), length=200)
else xx50 <- seq(log(data[[i]]$distance[1]), log(data[[i]]$distance[2]), length=200)
if(exp(x20)>1) xx20 <- seq(log(data[[i]]$distance[1]), log(round(exp(x20))), length=200)
else xx20 <- seq(log(data[[i]]$distance[1]), log(data[[i]]$distance[2]), length=200)
yy <- .curve2(ML2[[i]]$par[1], ML2[[i]]$par[2], ML2[[i]]$par[3], ML2[[i]]$par[5], ML2[[i]]$par[6], ML2[[i]]$par[7], xx)
yy80 <- .curve2(ML2[[i]]$par[1], ML2[[i]]$par[2], ML2[[i]]$par[3], ML2[[i]]$par[5], ML2[[i]]$par[6], ML2[[i]]$par[7], xx80)
yy50<- .curve2(ML2[[i]]$par[1], ML2[[i]]$par[2], ML2[[i]]$par[3], ML2[[i]]$par[5], ML2[[i]]$par[6], ML2[[i]]$par[7], xx50)
yy20<- .curve2(ML2[[i]]$par[1], ML2[[i]]$par[2], ML2[[i]]$par[3], ML2[[i]]$par[5], ML2[[i]]$par[6], ML2[[i]]$par[7], xx20)
yy <- (yy+min(data[[i]]$x))
yy[yy < 0] <- 0.1
yy80 <- (yy80+min(data[[i]]$x))
yy80[yy80 < 0] <- 0.1
yy50 <- (yy50+min(data[[i]]$x))
yy50[yy50 < 0] <- 0.1
yy20 <- (yy20+min(data[[i]]$x))
yy20[yy20 < 0] <- 0.1
id <- order(xx)
id80 <- order(xx80)
id50 <- order(xx50)
id20 <- order(xx20)
maxAUC <- sum(diff(xx[id])*zoo::rollmean(yy[id], 2))
maxAUC80 <- exp(x80)*max(yy80)
maxAUC50 <- exp(x50)*max(yy50)
maxAUC20 <- exp(x20)*max(yy20)
AUC80 <- sum(diff(exp(xx80[id80]))*zoo::rollmean(yy80[id80], 2))
AUC50 <- sum(diff(exp(xx50[id50]))*zoo::rollmean(yy50[id50], 2))
AUC20 <- sum(diff(exp(xx20[id20]))*zoo::rollmean(yy20[id20], 2))
param <- data.frame(x80 = round(exp(x80), digits=0), x50 = round(exp(x50), digits=2), x20 = round(exp(x20), digits=0) , AUC80 = round(AUC80, digits=0), AUC50= round(AUC50, digits=0), AUC20= round(AUC20, digits=0), maxAUC = round(maxAUC, digits=0), maxAUC80 = round(maxAUC80, digits=0), maxAUC50 = round(maxAUC50, digits=0), maxAUC20 = round(maxAUC20, digits=0))
if (exp(param$x80)<1){
param$x80 <- 1}
if (exp(param$x50)<1){
param$x50 <- 1}
if (exp(param$x20)<1){
param$x20 <- 1}
return(param)
}
|
307fa49555ac4f64ca27ec45f878337b26022369
|
5e41eead74305b7bfed2f52a17288f830bff42fc
|
/plot3.R
|
f7ad80c2c48f64e2fe8165193a511d887c61efc9
|
[] |
no_license
|
avshatr/ExData_Plotting1
|
416f8e8a6f8d79db49ba6847983c56423a38fff8
|
34bc1e96e67e77546f50d99e946ad362e137953a
|
refs/heads/master
| 2021-01-09T08:08:41.324772
| 2016-01-07T16:41:57
| 2016-01-07T16:41:57
| 49,204,899
| 0
| 0
| null | 2016-01-07T13:01:48
| 2016-01-07T13:01:47
| null |
UTF-8
|
R
| false
| false
| 1,160
|
r
|
plot3.R
|
#Download and unzip the file
zipUrl<-'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(zipUrl,destfile='PrAss1data.zip')
fname<-'household_power_consumption.txt'
unzip('PrAss1data.zip')
#read the table. data.table package required
library(data.table)
dat<-fread(fname,na.strings='?')
#extract the portion of the data for plotting
ind<-dat$Date=='1/2/2007' | dat$Date=='2/2/2007'
dat<-dat[ind]
#open png device and plot the histogram
png(filename='plot3.png',width=480,height=480)
times<-strptime(paste(dat$Date,dat$Time,sep=" "),'%d/%m/%Y %H:%M:%S')
plot(times,dat$Sub_metering_1,col='black',main ='',xlab='',
ylab='Energy sub metering',type='l',xaxt='n')
lines(times,dat$Sub_metering_2,type='l',col='red')
lines(times,dat$Sub_metering_3,type='l',col='blue')
#Add ticks and tick labels
tck=c('1/2/2007 00:00:00','2/2/2007 00:00:00','3/2/2007 00:00:00')
ttt<-strptime(tck,'%d/%m/%Y %H:%M:%S')
axis(side=1,at=as.numeric(ttt),labels=c('Thu','Fri','Sat'))
#Add legend
legend('topright',legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),
col=c('black','red','blue'),lty=c(1,1))
dev.off()
|
bfca869e6ac3d8c5e5039a0b425ce79fa739ea3e
|
3ccb8544a8408ccad37c39a907db6c0ffa65f706
|
/Neural Network/50 Startups/NN-startups-R code.R
|
f2bbb85d3aa00dc696e8d7173c9b933fbdb877fb
|
[] |
no_license
|
mahesh-sakharpe/Machine_Learning
|
f0fbc2dd4f9e724b0cdbbef6a06e3ce3e6f87cda
|
f4e2a38b17a88ab7eedcf66773b9f7377fc96799
|
refs/heads/master
| 2022-11-24T05:29:32.574272
| 2020-07-31T06:53:57
| 2020-07-31T06:53:57
| 283,955,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,648
|
r
|
NN-startups-R code.R
|
##### Neural Network #####
normalize_dummy <- function(x){
col <- ncol(x)
row <- nrow(x)
y <- 1:nrow(x)
for (i in 1:col){
if(class(x[,i])=="numeric" | class(x[,i])=="integer")
{
minx <- min(x[,i])
maxx <- max(x[,i])
for(j in 1:row)
{
x[j,i] <- ifelse((x[j,i] - minx) != 0,yes =((x[j,i] - minx) / (maxx - minx)),no = 0)
}
}
}
f <- c()
for(i in 1:ncol(x)){
if(class(x[,i])=="factor"){
dummies <- data.frame(dummies::dummy(x[,i]))
y <- data.frame(y,dummies)
f <- c(f,i)
}
else{
next
}
}
if(is.null(f)){
output <- x
}
else{output <- data.frame(x[,-f],y[,-1])}
return(output)
}
denormalize <- function(x,max,min){
denormalize <- ((x*(max-min)) + min)
return(denormalize)
}
#install.packages(c("neuralnet","nnet"))
library(nnet)
library(neuralnet)
# Question no 1 ----
"Build a Neural Network model for 50_startups data to predict profit "
statups <- read.csv(file.choose())
head(statups)
str(statups)
# Target is Profit
plot(statups[,-4])
install.packages("corrplot")
library(corrplot)
corrplot::corrplot(cor(statups[,-4]))
colSums(is.na(statups)) # No NA values are present in our data
df_statups <- data.frame(normalize_dummy(statups))#,profit=statups$Profit)
colnames(df_statups)[5:7] <- c("California","Florida","NewYork")
head(df_statups)
boxplot(statups)
# Train Test Splitting
set.seed(101)
Split_s <- sample(x = 1:nrow(df_statups),size = round(nrow(df_statups)*0.3),replace = F)
Train_St <- df_statups[-Split_s,];dim(Train_St)
Test_St <- df_statups[Split_s,];dim(Test_St)
# Model Building
head(df_statups)
set.seed(4) #4
model_St1 <- neuralnet(Profit~.,data=Train_St)
plot(model_St1,rep = "best")
pred_St1 <- compute(model_St1,Test_St)
cor(pred_St1$net.result,Test_St$Profit) # 0.9778
pred_St_n1 <- denormalize(pred_St1$net.result,max(statups$Profit[Split_s]),min(statups$Profit[Split_s]))
cor(statups$Profit[Split_s],pred_St_n1) # 0.9778239
RMSE_S1 <- sqrt(mean((statups$Profit[Split_s]-pred_St_n1)^2)) # 14478.8
plot(statups$Profit[Split_s],pred_St_n1)
model_St2 <- neuralnet(Profit~.,data=Train_St,hidden = 5)
plot(model_St2,rep = "best")
pred_St2 <- compute(model_St2,Test_St)
cor(pred_St2$net.result,Test_St$Profit) # 0.9600603
pred_St_n2 <- denormalize(pred_St2$net.result,max(statups$Profit[Split_s]),min(statups$Profit[Split_s]))
cor(statups$Profit[Split_s],pred_St_n2) # 0.9600603
RMSE_S2 <- sqrt(mean((statups$Profit[Split_s]-pred_St_n2)^2)) # 13479.83
plot(statups$Profit[Split_s],pred_St_n2)
|
d176f2359638ae5845f3a7e104be8acecb978790
|
869117e5c2d2a1d31103cde4c1aff64b6993bc83
|
/ode_solve.R
|
dc84a61f413af758035da3ea44facb1e2ebbbbfe
|
[] |
no_license
|
mcatalano/Chemical_Kinetics_in_R
|
5c38d43696fd022255fcf449a092644ae1a25b05
|
31f3e27d00b7dbc9290726af02cc7e3ac6571b01
|
refs/heads/master
| 2016-09-05T21:52:39.310844
| 2015-03-15T17:48:59
| 2015-03-15T17:48:59
| 32,272,723
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,416
|
r
|
ode_solve.R
|
## Set working directory and load package requirements
setwd("C:/Documents and Settings/Michael/Desktop/dA-AP EM/DNA Data/ODE Solve in R")
library(ggplot2)
library(reshape2)
library(deSolve)
library(minpack.lm)
## Read concentration/time data into data frame
df <- read.csv('KS35pH7.csv', header=F)
names(df) <- c('time','A','B','C','D')
## The rate function calculates results to the ODEs associated with the specified
## kinetic model and returns the results as a list
rate <- function(t,c,params){
## Set rate constants to 'params' list elements passed to function
k1 <- params$k1
k2 <- params$k2
k3 <- params$k3
k4 <- params$k4
k5 <- params$k5
## Create vector of calculated rates; 'c' is a vector of concentrations
## of each species at a given time point. c['x'] refers to an element of 'c'
## with name attribute 'x'
r <- rep(0, length(c))
r[1] <- -k1*c['A'] + k2*c['B'] - k3*c['A']
r[2] <- -k2*c['B'] + k1*c['A']
r[3] <- -k4*c['C'] + k3*c['A'] + k5*c['D']
r[4] <- k4*c['C'] - k5*c['D']
return(list(r))
}
## The 'resid' function solves the ODEs specified by calling the 'rate' function in ode(),
## then returns the residual between calculated and measured values
resid <- function(params){
## Set initial concentrations
cinit <- c(A=100,B=0,C=0,D=0)
## Vector of time points
t <- df$time
## k values set to 'params' elements passed to function
k1 <- params[1]
k2 <- params[2]
k3 <- params[3]
k4 <- params[4]
k5 <- params[5]
output <- ode(y=cinit, times=t, func=rate, parms=list(k1=k1,k2=k2,k3=k3,k4=k4,k5=k5))
## Store 'output' matrix as data frame
outdf <- data.frame(output)
## Convert solution and measured data to long format
preddf <- melt(outdf, id.var="time", variable.name="species", value.name="yield")
expdf <- melt(df, id.var="time", variable.name="species", value.name="yield")
## Compute residual
res <- preddf$yield - expdf$yield
return(res)
}
## Vector of estimated rate constants
params <- c(k1=.06, k2=.03, k3=.004, k4=.003, k5=.005)
## Least squares minimization of residual by adjusting specified parameters
fitval <- nls.lm(par=params, fn=resid)
## Store resulting parameters as a list
parest <- as.list(coef(fitval))
## Create a results matrix of solutions from least squares minimization by passing
## the solution parameters list, 'parest', to ode()
## Set initial concentrations
cinit <- c(A=100,B=0,C=0,D=0)
## Create an arbitrarily incremented time sequence
t <- seq(0, df[length(df$time),1], 0.1)
## Set parameters
params <- as.list(parest)
## Solve
output <- ode(y=cinit, times=t, func=rate, parms=params)
## Convert ODE solution matrix to labeled data frame
outdf <- data.frame(output)
names(outdf) <- c('time','A_pred','B_pred','C_pred','D_pred')
## Rearrange data to long format
pred <- melt(outdf, id.var=c('time'), variable.name='species', value.name='yield')
exp <- melt(df, id.var=c('time'), variable.name='species', value.name='yield')
## Open graphics device
png(file = "Time_course.png")
## Plot results
p <- ggplot(data=pred, aes(x=time, y=yield, color=species)) + geom_line()
p <- p + geom_point(data=exp, aes(x=time, y=yield, color=species))
p <- p + labs(x='Time (hr)', y='% Yield')
print(p)
## Close device
dev.off()
|
65a398e49e066193c0bd6ca23589590e5f04549d
|
7c4a5ed177862f9d85acf5034455f025f512514d
|
/man/findGoodPoints.Rd
|
22871a94a0c4090a984c52ec7c6999aeaa083c4c
|
[] |
no_license
|
cran/moveWindSpeed
|
22be24be88b474fd52e6cc0f31041a61300cc9ef
|
2ad07d45bbf331c6a387c4481b1a7f4abfabd31e
|
refs/heads/master
| 2023-06-22T22:43:45.248186
| 2023-06-07T07:20:02
| 2023-06-07T07:20:02
| 71,349,255
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,017
|
rd
|
findGoodPoints.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimatePhi.R
\name{findGoodPoints}
\alias{findGoodPoints}
\title{Function to find good points for estimation of phi}
\usage{
findGoodPoints(
data,
maxPointsToUseInEstimate,
phiInitialEstimate,
windowSize,
...
)
}
\arguments{
\item{data}{An move object.}
\item{maxPointsToUseInEstimate}{The number of desired windows.}
\item{phiInitialEstimate}{The initial value used for the autocorrelation when calculating the wind speed for finding suitable windows.}
\item{windowSize}{An odd number providing the window size}
\item{...}{passed on to getWindEstimates}
}
\value{
a logical vector with the focal locations
}
\description{
The function tries to find non overlapping windows for phi optimization.
}
\examples{
data(storks)
which(findGoodPoints( storks[[2]],
windowSize = 29, isSamplingRegular = 1,
isThermallingFunction = getDefaultIsThermallingFunction(360, 4), maxPointsToUseInEstimate = 10,
phiInitialEstimate = 0 ))
}
|
97b40cae3356fc704cd91ac3fb98a1640078fbd1
|
5a0a3df2d8beb5e4c6af6579ff4f26e1dbc6dbed
|
/man/stfu.Rd
|
ba98225f8175c2a12f3b1b8a51eed9e59a7a78c7
|
[] |
no_license
|
AABoyles/ShRoud
|
0c13864cdae7578d2171966cae0f34ed3952bd6a
|
d6c4bedfc226cdf5d86011835abbdb44cd74bacf
|
refs/heads/master
| 2020-12-24T07:18:31.434183
| 2017-12-22T19:08:37
| 2017-12-22T19:08:37
| 57,148,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 404
|
rd
|
stfu.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stfu.R
\name{stfu}
\alias{stfu}
\title{Shut the File Up}
\usage{
stfu(expr)
}
\arguments{
\item{expr}{}
}
\value{
whatever expr should return
}
\description{
Given an expression, stfu evaluates and returns the value of the expression, but supressesses output to the console.
}
\examples{
stfu(print('Foo'))
stfu(cat('Foo'))
}
|
bce5a4a970cfd5fae42289cd48b701bc0cdaa09d
|
904819e704f9cb1851351d6d9c1013baf7b42719
|
/check_check_check.R
|
a69229c50e42109675f3bcf7bb64a0a6c2548137
|
[] |
no_license
|
youyugithub/illustration_of_lasso_and_lars
|
bdd55208276677b078c97af28d7ef36843a20fc0
|
890226e662db8a184f5ff7fd12e870f4f3cbf778
|
refs/heads/master
| 2020-03-27T18:43:56.228483
| 2018-09-06T15:34:03
| 2018-09-06T15:34:03
| 146,939,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,311
|
r
|
check_check_check.R
|
##################################
# Lasso using coordinate descent #
##################################
set.seed(10000)
ndim<-10
Sigma<-random_varcov_mat(ndim)
Sigma_inv<-solve(Sigma)
L<-chol(Sigma_inv)
xxx<-mvrnorm(1,rep(0,ndim),Sigma)
Lxx<-L%*%xxx
est<-rep(0,ndim)
S<-function(z,g){
temp<-abs(z)-g
temp[temp<0]<-0
return(sign(z)*temp)
}
lstlam<-seq(1,0.01,-0.01)
betpat<-matrix(NA,ndim,length(lstlam))
est<-rep(0,ndim)
for(idxlam in 1:length(lstlam)){
lam<-lstlam[idxlam]
for(iter in 1:20){
for(idx in 1:ndim){
first<-sum(L[1:idx,idx]*(Lxx[1:idx]-L[1:idx,-idx]%*%est[-idx]))
est[idx]<-S(first,lam/abs(Lxx[idx]))/sum(L[1:idx,idx]^2)
}
}
betpat[,idxlam]<-est
}
matplot(t(betpat),type="l")
lstlam<-seq(1,0.01,-0.01)
betpat<-matrix(NA,ndim,length(lstlam))
est<-rep(0,ndim)
for(idxlam in 1:length(lstlam)){
lam<-lstlam[idxlam]
for(iter in 1:20){
for(idx in 1:ndim){
tmpest<-est
tmpest[idx]<-0
rrr<-Lxx-L%*%tmpest
arg1<-t(L[,idx])%*%rrr
est[idx]<-S(arg1,lam)/sum(L[,idx]^2)
}
}
betpat[,idxlam]<-est
}
matplot(t(betpat),type="l")
temp<-glmnet(L,Lxx,intercept=FALSE,standardize=FALSE,lambda=lstlam/ndim)
matplot(t(temp$beta),type="l")
# _________
# / \
# | THE SAME! |
# \ /
# ```````````
#
|
00875f7d29049053d57a71916920174731b51f80
|
1487481b28a414841650f597766ff230043e7118
|
/man/new_QKResults.Rd
|
fdbfd04a5e0f6b069defe5bc7993ca23fb4bb94a
|
[
"MIT"
] |
permissive
|
LLNL/quantkriging
|
5ace2bbcef5494ea189550532559d4d1f9438ed6
|
6cd541def13f5d19ec1738e05a50fb87abb02af4
|
refs/heads/master
| 2023-06-03T21:29:18.951960
| 2020-02-19T23:21:22
| 2020-02-19T23:21:22
| 224,298,363
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 430
|
rd
|
new_QKResults.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QKResults_class.R
\name{new_QKResults}
\alias{new_QKResults}
\title{QKResult Constructor}
\usage{
new_QKResults(qkList)
}
\arguments{
\item{qkList}{list(quants, yquants, g, l, ll <- -optparb$value, beta0, nu, xstar, ystar, Ki, quantv, mult, ylisto, type)}
}
\value{
New class QKResults
}
\description{
Create Quantile Kriging Results class from list
}
|
d70d77bfc656574c8f7136100590c427c6a35f5b
|
43c7074e7e0f453db079ad08884ad13b8fc59e28
|
/dc-superheroes.R
|
505f41f38151a277349a38f91270140491c474e6
|
[] |
no_license
|
MaxGfeller/experiments-with-R
|
4e7d652343e5507769f558b8b6496b6f11211606
|
c787c7dc7b94a75c32726a416f01aa2eea0dcb3d
|
refs/heads/master
| 2021-01-22T02:58:27.502492
| 2015-01-21T14:36:27
| 2015-01-21T14:36:27
| 29,206,778
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
dc-superheroes.R
|
# install.packages("ggplot2", dependencies=c("Depends", "Imports"), repos = "http://cran.us.r-project.org")
library("ggplot2")
# read data from csf file
data <- read.csv("test-data/comic-characters/dc-wikia-data.csv", TRUE)
# create an empty data frame
characters <- data.frame(
Year=integer(),
femaleCharacters=integer(),
maleCharacters=integer()
)
addToFrame <- function(x) {
if (!is.na(x["YEAR"]) && !is.null(x["YEAR"])) {
if (!(as.integer(x["YEAR"]) %in% row.names(characters))) {
# add a new year
newYear <- data.frame(
Year=as.integer(x["YEAR"]),
femaleCharacters=0,
maleCharacters=0
)
row.names(newYear) <- as.integer(x["YEAR"])
characters <<- rbind(characters, newYear)
}
columnToUpdate <- "femaleCharacters"
if (x["SEX"] == "Male Characters") {
columnToUpdate <- "maleCharacters"
}
characters[x["YEAR"], columnToUpdate]<<-as.integer(characters[x["YEAR"], columnToUpdate]) + 1
}
}
invisible(apply(data, 1, addToFrame))
invisible(characters <- characters[order(rownames(characters)), ])
jpeg("output/dc-superheroes/male-vs-female-superheroes.jpg", width=1200, height=400)
ggplot(data=characters, aes(
x=Year
)) +
labs(y="Characters created") +
scale_color_discrete(name="Sex") +
scale_x_continuous(breaks = round(seq(min(characters$Year), max(characters$Year), by = 5), 1)) +
geom_line(aes(y=maleCharacters, ylab="Characters created", colour="male")) +
geom_line(aes(y=femaleCharacters, colour="female"))
|
924dff07e50cffccd6bfffb1634363421e4defc8
|
947c736c5c15dc19786384a506a0ee1646c76e1c
|
/Cap_12/tabela_freq.R
|
736c80c9eabb16c3d3f79e134a07df2a596cb6dd
|
[] |
no_license
|
shIsmael/DSA-PowerBI
|
b34396ce2a1445b96e09bc893ecb2a2aaa4865b3
|
42ec9c022c094f12bcb0ce1efbd88cea42ff53c3
|
refs/heads/master
| 2023-05-03T14:06:25.832994
| 2021-05-29T05:07:18
| 2021-05-29T05:07:18
| 369,346,365
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 266
|
r
|
tabela_freq.R
|
# Tabela de Frequencia
df = read.csv("Cap_12/Usuarios.csv")
# Absoluta
freq <- table(df$grau_instrucao)
View(freq)
# Relativa
freq_rel <- prop.table(freq)
View(freq_rel)
#Adcionando linha de total
freq <- c(freq, sum(freq))
names(freq)[4] <- "Total"
View(freq)
|
857fbca3daf42714ef8460b8579952145863458b
|
12f05e3b57214c3cbd42f11b10d81e23cc8598f6
|
/R/target.R
|
4c9df5c4802c27c1881d81be1e1440ee5ac7c0e3
|
[] |
no_license
|
dapperjapper/workshop
|
25a82011fe4db46d1084e2c8f964fe9cc1caf2cb
|
55e67da3b310239d018fe0b55cef33468ab3d099
|
refs/heads/master
| 2021-01-02T13:40:42.516821
| 2020-07-08T12:27:04
| 2020-07-08T12:27:04
| 239,646,444
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,398
|
r
|
target.R
|
#' @importFrom digest digest
#' @importFrom dplyr combine
#' @importFrom tidyr expand_grid
#' @importFrom purrr transpose walk reduce2
#' @importFrom rlang env_bind rep_along
#' @importFrom fs path_ext_remove
#' @importFrom stringr str_trim
#' @importFrom future future value
#' @export
target <- function(filepath_spec, method,
cache = default_cache(),
log_trackables = F, force_rebuild = F) {
dimensions <- spec_dimensions(filepath_spec)
ext <- path_ext(filepath_spec)
# Process method's args according to special arg syntax
args <- process_method_args(method, cache)
# Process method's dependencies that aren't explicitly specified as args,
# and bake them into the function environment
pure_method <- purify_function(method)
# Maybe this is necessary?? If only extension in filepath is changed, nothing will
# invalidate cache...
pure_method$trackables$ext <- ext
# What are the various dimensions that each arg operates over?
# Dimension args operate over themselves, other args that reference
# dimensions may operate over multiple dimensions
arg_dimensions <- map(args, "dimensions")
# For each dimension, retrieve all the values that it can take.
# For example, consider the below arg_dimensions:
# arg_dimensions <-
# list(
# size = list(size = c("big", "small")),
# name = list(name = c("ed",
# "edd", "eddy")),
# some_file = list(name = "ed", size = "big"),
# local_var = NULL
# )
# This would give specified dimenions list(size = "big", name = "ed").
# Since the name dimension isn't repeated in full for the some_file arg,
# we limit it to just the values that ARE reported.
specified_dimensions <- arg_dimensions %>%
map(names) %>%
combine() %>%
unique() %>%
set_names() %>%
map(function(dim) {
dim_values_for_args <- map(arg_dimensions, dim) %>% discard(is.null)
if (length(dim_values_for_args) > 1 && !dim_values_for_args %>% reduce(identical)) {
warning(
"For dimension `", dim, "`, the specified values are not equal between args...")
}
dim_values_for_args %>% reduce(intersect)
})
dimensions_missing_in_spec <- setdiff(names(specified_dimensions), dimensions)
if (length(dimensions_missing_in_spec)) {
stop(
"Dimensions ",
str_c("`", dimensions_missing_in_spec, "`", collapse = ", "),
" need to be present in the target spec,",
" or you must explicitly aggregate over them using dep_rollup."
)
}
# The converse of the above
# dynamic_branching_dimensions <- setdiff(dimensions, names(specified_dimensions))
# Dummy dimension if none present
if (length(specified_dimensions) == 0) {
specified_dimensions = list(id = T)
}
cat("Running target:", filepath_spec, "\n")
# Loop over every combination of all the specified dimensions
# TODO: is this appropriate behavior?
futures <- expand_grid(!!!specified_dimensions) %>%
transpose() %>%
reduce2(., 1:length(.), .init = list(), function(past_targets, these_dims, i) {
# Loop over the colors and create printer
color <- job_colors[[(i-1)%%length(job_colors)+1]]
dim_str <- list_to_str(these_dims)
if (identical(these_dims, list(id = T))) {
these_dims <- list()
dim_str <- "\u2600" # idk
}
printer <- function(...) {
cat(color("[", dim_str, "] ", ..., "\n", sep = ""))
}
this_future <- run_target(
these_dims,
printer = printer,
filepath_spec = filepath_spec,
pure_method = pure_method,
args = args,
options = list(
log_trackables = log_trackables,
force_rebuild = force_rebuild
),
cache = cache
)
# What a confusing variable name! Who came up with that?
# Basically we throw our new asyncronous future on the pile,
# and then we go back and check to see if anything on the
# pile has resolved.
past_targets <- c(past_targets, list(list(printed = FALSE, future = this_future)))
past_targets <- print_resolved_futures(past_targets)
return(past_targets)
})
# TODO: remove targets from yaml that fit spec but were not touched?
# and remove their files?
if (!all(map_lgl(futures, "printed"))) {
cat("All jobs initiated. Waiting for target to complete...\n")
}
# Block until everything is complete
while (!all(map_lgl(futures, "printed"))) {
Sys.sleep(1)
futures <- print_resolved_futures(futures)
}
# TODO: last time this target took XX (time per target), this time it took XX
invisible(futures)
}
run_target <- function(these_dims, printer,
filepath_spec, pure_method, args, options, cache) {
# TODO: try/catch
# Fill in the dimensions we have in the path, leave others still in :dimension format
filepath_spec_partial <- encode_spec(these_dims, filepath_spec, allow_missing = T)
# Determine if method needs to be re-run. Need to check:
# 1. The code of the method itself
# 2. The method's args
# 3. Any functions that the method accesses
# 3a. Just check package versions
# 3b. For local functions (or devtools shim functions),
# need to recursively track code!
# Assemble the hashes for the method's args (dependent on dimension)
pure_method$trackables$formals <- map(args, "hash") %>%
map(do.call, args = these_dims)
if (options$log_trackables) {
trackables_dir <- str_c(path_dir(filepath_spec_partial), "/trackables")
dir_create(trackables_dir)
write_rds(pure_method$trackables,
str_c(trackables_dir, "/", path_file(filepath_spec_partial), "_trackables.rds"))
}
# TODO: is order of items in trackables object relevant? sort?
# this might happen if user specifies target formals in different order
# Get the hash of this run
trackables_hash <- digest(pure_method$trackables)
# Get the hashes of the last run
# (there may be multiple bc of unspecified dimensions... so
# we must check that hash is equal across all these)
target_hash <- path_ext_remove(filepath_spec_partial) %>%
read_matching_targets_cache(cache) %>%
map("hash") %>%
unique()
if (length(target_hash) != 1) {
target_hash <- ""
}
# Return if target is up to date
if (length(target_hash) &&
target_hash == trackables_hash &&
!options$force_rebuild) {
# TODO: Check that the result files still exist??
printer("Target is up to date. ", # `", path_ext_remove(filepath_spec_partial), "`
sample(encouragement, 1))
return()
}
# OK let's build this frickin target then
printer("Running target...") #`", path_ext_remove(filepath_spec_partial), "`
start_time <- Sys.time()
times <- list()
timer_phase_end <- function(phase_name = "Unnamed phase") {
end_time <- Sys.time()
# Come up with a unique name for the phase
name_suffix <- ""
while (str_trim(str_c(phase_name, " ", name_suffix)) %in% names(times)) {
if (name_suffix == "") {
name_suffix <- 2
} else {
name_suffix <- name_suffix + 1
}
}
mins <- as.numeric(end_time - start_time, units = "mins")
times[[str_trim(str_c(phase_name, " ", name_suffix))]] <<- mins
# Double assignment sets start_time at the top level
start_time <<- Sys.time()
return(mins)
}
# TODO what to do when func doesn't have a save_target in it?
save_target <- function(result, ...) {
timer_phase_end("Processing")
printer("Saving ", list_to_str(list(...)), "...")
# TODO: error when unnecessary dimensions provided?
filepath <- encode_spec(list(...), filepath_spec_partial)
metadata <- save_target_result(filepath, result)
timer_phase_end("Saving")
upsert_target_cache(
cache = cache,
target = path_ext_remove(filepath),
val = list(
hash = trackables_hash,
build_min = times,
metadata = metadata
)
)
times <<- list()
start_time <<- Sys.time()
invisible()
}
# Special values for use inside method
environment(pure_method$value) %>%
env_bind(
.dimensions = these_dims,
.cache = cache,
.realcat = cat,
cat = printer,
save_target = save_target,
timer_phase_end = timer_phase_end
)
# TODO: automatic inclusion of S3 functions in packages?
# like the following:
# months <- lubridate:::months.numeric
# packages_to_load <- pure_method$trackables$globals %>%
# map_chr("package") %>%
# unique()
# TODO: futures return the cache update item, target() collects
# and writes yaml -- save_target does not write yaml
# Git 'r dun
this_future <- future({
loaded_args <- map(args, "load") %>%
map(do.call, args = these_dims)
# TODO: only do this if there *were* dependencies to load
timer_phase_end("Loading dependencies")
do.call(pure_method$value, loaded_args)
# Probably a good time to garbage collect
rm(loaded_args)
gc()
printer("Complete!")
})
return(this_future)
}
#' @importFrom future resolve resolved
print_resolved_futures <- function(futures_list) {
map(futures_list, function(pending_target) {
# If it's resolved, get the value.
# This prints anything that was logged. It's useful to do this
# as soon as we see it was resolved so logging is as
# contemporaneous as possible
if (!pending_target$printed && resolved(pending_target$future)) {
resolve(pending_target, stdout = T, signal = T)
pending_target$printed <- TRUE
}
return(pending_target)
})
}
# Dummy function
#' @export
save_target <- function(...) {
stop("Please only use save_target() inside a target!")
}
encouragement <- c(
"Huzzah!",
"Great news :)",
"Good job.",
"Nice work.",
"Chill B^)",
"Stan SOPHIE and 100 gecs",
"You're doing a great job ;^)",
"Remember to drink water!"
)
job_colors <- list(
# crayon::red, # Looks too much like an error
crayon::green,
crayon::yellow,
crayon::blue,
crayon::magenta,
crayon::cyan,
crayon::green$underline,
crayon::yellow$underline,
crayon::blue$underline,
crayon::magenta$underline,
crayon::cyan$underline
)
list_to_str <- function(l) {
l %>%
imap(function(x, i) { str_c(i, '="', x, '"') }) %>%
str_c(collapse = ", ")
}
|
ffba049890e00918202e93588a5119053690cc1e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tclust/examples/ctlcurves.Rd.R
|
d841e7e1cb84abf5892b07f8455f253d929eaf90
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,438
|
r
|
ctlcurves.Rd.R
|
library(tclust)
### Name: ctlcurves
### Title: Classification Trimmed Likelihood Curves
### Aliases: ctlcurves print.ctlcurves
### Keywords: hplot multivariate robust cluster
### ** Examples
## Don't show:
set.seed (0)
## End(Don't show)
## Not run:
##D #--- EXAMPLE 1 ------------------------------------------
##D
##D sig <- diag (2)
##D cen <- rep (1, 2)
##D x <- rbind(mvtnorm::rmvnorm(108, cen * 0, sig),
##D mvtnorm::rmvnorm(162, cen * 5, sig * 6 - 2),
##D mvtnorm::rmvnorm(30, cen * 2.5, sig * 50)
##D )
##D
##D ctl <- ctlcurves (x, k = 1:4)
##D
##D ## ctl-curves
##D plot (ctl) ## --> selecting k = 2, alpha = 0.08
##D
##D ## the selected model
##D plot (tclust (x, k = 2, alpha = 0.08, restr.fact = 7))
##D
##D #--- EXAMPLE 2 ------------------------------------------
##D
##D data (geyser2)
##D ctl <- ctlcurves (geyser2, k = 1:5)
##D
##D ## ctl-curves
##D plot (ctl) ## --> selecting k = 3, alpha = 0.08
##D
##D ## the selected model
##D plot (tclust (geyser2, k = 3, alpha = 0.08, restr.fact = 5))
##D
##D
##D #--- EXAMPLE 3 ------------------------------------------
##D
##D data (swissbank)
##D ctl <- ctlcurves (swissbank, k = 1:5, alpha = seq (0, 0.3, by = 0.025))
##D
##D ## ctl-curves
##D plot (ctl) ## --> selecting k = 2, alpha = 0.1
##D
##D ## the selected model
##D plot (tclust (swissbank, k = 2, alpha = 0.1, restr.fact = 50))
## End(Not run)
|
9ba60ef5bd08805b36fcb97b10580f37b9aad7b9
|
d11815dd635c4ce6f1f7adb3449ae616d0e9423c
|
/ARMAParamEstimation.R
|
e9f2603c1e92414aae3c11f083913a6af5c1f35c
|
[] |
no_license
|
erv4gen/DS-TimeSeries
|
0cae69247230221e02e26c01700c6bbd4d8afb96
|
0c51e7c4fbcce5ce162cc11a4a4931713f69c15a
|
refs/heads/master
| 2020-04-16T22:07:00.291912
| 2019-06-08T19:04:14
| 2019-06-08T19:04:14
| 165,953,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,295
|
r
|
ARMAParamEstimation.R
|
#Source : https://www.youtube.com/channel/UCpviBv-De2_oeuSU_b968BQ
library(mgcv)
dat <-read.csv('c:\\data\\Datasets\\Medicine\\example_patients_in_emergency\\govhack3.csv'
,header = TRUE
,stringsAsFactors = FALSE,skip = 1)
dat <- dat[,1:2]
dat$Date <- as.Date(dat$Date,format = "%d-%b-%Y")
dat$Attendance <- as.integer(dat$Attendance)
attach(dat)
Attendance.ts <-sqrt(Attendance+3/8)
time.pts <- c(1:length(Attendance))
time.pts <- c(time.pts-min(time.pts)/max(time.pts))
month <-as.factor(months(Date))
week <-as.factor(weekdays(Date))
gam.fit.seastr.2 <- gam(Attendance.ts ~s(time.pts)+month+week)
gam.fit.seastr.2.fitted <-fitted(gam.fit.seastr.2)
resid.process <- Attendance.ts - gam.fit.seastr.2.fitted
par(mfrow = c(2,1))
acf(resid.process,lag.max = 12*4, main="ACF:Resid")
pacf(resid.process,lag.max = 12*4, main="PACF:Resid plot")
##Fit the AR(q) process for q<=order.max
mod <- ar(resid.process,order.max=20)
print(mod$order)
summary(mod)
##Plot AIC values on the log scale to easy identify minimum
plot(c(0:20),
mod$aic,type='b',log='y',
xlab='order',ylab='log-AIC')
#Are the roots in fitted AR within the unit circle?
roots <- polyroot(c(1,(-mod$ar)))
plot(roots,xlim=c(-1.2,1.2),ylim=c(-1.2,1.2))
resid <- mod$resid[(mod$order+1):length(mod$resid)]
#Plot the residuals
par(mfrow=c(2,2))
plot(resid,xlab='',ylab='Model Residuals')
acf(resid,main='ACF')
pacf(resid,main='PACF')
qqnorm(resid)
#Fit ARMA(1,1)
modarma <-arima(resid.process,order=c(1,0,1),method='ML')
par(mfrow=c(2,2))
plot(resid(modarma),ylab='Std Resid')
abline(h=0)
acf(resid(modarma,main='ACF'))
pacf(as.vector(resid(modarma)),main='PACF')
qqnorm(resid(modarma))
qqline(resid(modarma))
##ORDER Selection
#Use EACF
library(TSA)
eacf(resid.process,ar.max=6,ma.max = 6)
#use AICC
n <- length(resid.process)
norder <- 6
p <- c(1:norder) - 1 ; q = c(1:norder) - 1
aic <- matrix(0,norder,norder)
for(i in 1:norder) {
for(j in 1:norder) {
modij <- arima(resid.process,order= c(p[i],0,q[j]),method='ML')
aic[i,j] <- modij$aic - 2*(p[i]+q[j] +1) +2 * (p[i]+q[j]+1)*n/(n-p[i]-q[j]-2)
}
}
#Which order to select?
aicv <-as.vector(aic)
plot(aicv,ylab='AIC val')
indexp <- rep(c(1:norder),norder)
indexq <- rep(c(1:norder), each=norder)
indexaic <- which(aicv==min(aicv))
porder <- indexp[indexaic] -1
qorder <- indexq[indexaic] -1
#Final Model
final_model <- arima(resid.process,
order = c(porder,0,qorder),
method='ML')
par(mfrow=c(2,2))
plot(resid(final_model),ylab='Std Resid')
abline(h=0)
acf(resid(final_model,main='ACF'))
pacf(as.vector(resid(final_model)),main='PACF')
qqnorm(resid(final_model))
qqline(resid(final_model))
###Test for independence for final model
Box.test(final_model$residuals,
lag=(porder+qorder+1),
type='Box-Pierce',
fitdf=(porder+qorder))
Box.test(final_model$residuals,
lag=(porder+qorder+1),
type='Ljung-Box',
fitdf=(porder+qorder))
### Test for independence for smaller model
Box.test(modarma$residuals,
lag=(porder+qorder+1),
type='Box-Pierce',
fitdf=(porder+qorder))
Box.test(modarma$residuals,
lag=(porder+qorder+1),
type='Ljung-Box',
fitdf=(porder+qorder))
|
87d1ae814c16a5aa07a17cb31a4f820a4221d30e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ade4/examples/statico.Rd.R
|
773b0a76ac371cdb0e0172e39df1661acc98c65f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 645
|
r
|
statico.Rd.R
|
library(ade4)
### Name: statico
### Title: STATIS and Co-Inertia : Analysis of a series of paired
### ecological tables
### Aliases: statico
### Keywords: multivariate
### ** Examples
data(meau)
wit1 <- withinpca(meau$env, meau$design$season, scan = FALSE, scal = "total")
spepca <- dudi.pca(meau$spe, scale = FALSE, scan = FALSE, nf = 2)
wit2 <- wca(spepca, meau$design$season, scan = FALSE, nf = 2)
kta1 <- ktab.within(wit1, colnames = rep(c("S1","S2","S3","S4","S5","S6"), 4))
kta2 <- ktab.within(wit2, colnames = rep(c("S1","S2","S3","S4","S5","S6"), 4))
statico1 <- statico(kta1, kta2, scan = FALSE)
plot(statico1)
kplot(statico1)
|
f0ef79432c60dde35aa1e3aea79eae658b6091ba
|
47d22168f647e7d8e4b07732438418351da61f92
|
/Hierarcial Clustering.R
|
1b9a911d2e691c2551e11577354c4a84b5241ece
|
[] |
no_license
|
NomanAhsan/DS_Assignment_Task
|
45cbcdacd7eb349418d77c5bdfc1aebd3dc9a624
|
ba9a8def07b5871f85696a1cfac3fc4480486e43
|
refs/heads/master
| 2022-07-26T13:03:00.472697
| 2020-05-19T06:22:43
| 2020-05-19T06:22:43
| 264,996,306
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 832
|
r
|
Hierarcial Clustering.R
|
#Hierarchical Clustering
getwd()
install.packages('cluster')
#importing Dataset
dataset <- read.csv('Mall_Customers.csv')
X <- dataset[4:5]
#Finding Optimal number of cluster through dendogram
dendogram <- hclust(dist(X,method = 'euclidean'), method = 'ward.D')
plot(dendogram,
main = paste('Dendogram'),
xlab = 'Customers',
ylab = 'Eculidean Distance')
#Applying hierarchical clustering on mall dataset
HC <- hclust(dist(X,method = 'euclidean'), method = 'ward.D')
Y_HC <- cutree(dendogram, 5)
Y_HC
#Visualization
library(cluster)
clusplot(X,
Y_HC,
lines = 0,
shade = TRUE,
color = TRUE,
labels= 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of customers'),
xlab = 'Annual Income',
ylab = 'Spending Score')
|
cc1f69297f46ebae492d98887976bb5aa89598d0
|
1ee90109eb327d979b228174d4c310022439ed0b
|
/modules/map.R
|
1db206e0413d6210c4117a60b5a4eeeff98b5c6e
|
[] |
no_license
|
jakubsob/plants
|
d66346d7360c44d95fb8c49edb57287b234f3cb9
|
bef449f4d62f23250256d2474ceb8ab719f7b8d3
|
refs/heads/master
| 2023-04-30T16:41:25.054063
| 2021-05-13T15:49:42
| 2021-05-13T15:49:42
| 365,287,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,746
|
r
|
map.R
|
box::use(
shiny[...],
shiny.fluent[...],
leaflet[...],
maps[...],
dplyr[...],
purrr[...],
glue[...],
owmr[...],
promises[...],
future[...],
ui_utils = ./ui_utils[card]
)
#' @export
ui <- function(id) {
ns <- NS(id)
reactOutput(ns("info"))
}
#' @export
server <- function(map_id) {
moduleServer(map_id, function(input, output, session) {
ns <- session$ns
data_manager <- session$userData$data_manager
selected <- session$userData$selected
icon <- makeIcon("www/plant.png", iconWidth = 32, iconHeight = 32)
output$info <- renderReact({
req(!data_manager()$empty())
if (length(selected()) != 1) return(empty_info_card())
plant <- data_manager()$get(selected())
tagList(
leafletOutput(ns("map"), height = "600px"),
ui_utils$card(
"Native",
glue_collapse(plant$distributions, sep = ", ")
)
)
})
output$map <- renderLeaflet({
req(!data_manager()$empty())
plant <- data_manager()$get(selected())
native <- region_to_country(plant$distributions)
countries <- maps::world.cities %>%
filter(capital == 1, country.etc %in% native)
countries %>%
select(country = country.etc, lat, lng = long) %>%
leaflet() %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addMarkers(
lng = ~ lng,
lat = ~ lat,
label = ~ country,
icon = icon
)
})
observeEvent(input$map_marker_click, {
click <- input$map_marker_click
if (is.null(click)) return()
proxy <- leafletProxy("map", session)
proxy %>% clearPopups()
future_promise({
resp <- get_current(lon = click$lng, lat = click$lat, units = "metric")
addPopups(
map = proxy,
lng = click$lng,
lat = click$lat,
popup = if (resp$cod == 200) make_whether_prompt(resp) else resp$message
)
})
})
})
}
empty_info_card <- function() {
ui_utils$card("Empty", "Select a plant from sidebar menu.", style = "height: 600px;")
}
region_to_country <- function(country) {
countries <- unique(maps::world.cities$country.etc)
map_chr(country, function(cn) {
res <- countries[map_lgl(countries, ~ grepl(.x, cn))][1]
if (length(res) == 0) return("")
res
})
}
make_whether_prompt <- function(owm_data) {
glue("
{icon(leaflet::icons(owmr::get_icon_url(owm_data$weather$icon)))}
<b>{stringr::str_to_sentence(owm_data$weather$description)}</b></br>
<b>Temp:</b> {owm_data$main$temp} °C</br>
<b>Humidity:</b> {owm_data$main$humidity}</br>
<b>Wind speed:</b> {owm_data$wind$speed}</br>
")
}
|
d239ab32f638722d3d38f23923a96aa973163cac
|
bec91c94d51586f69b5c5e8fa6ad03fd62e33e70
|
/generate_report.R
|
33a24daa36886be0889ff7c7bb5d445878c04fd3
|
[] |
no_license
|
Militeee/GDA_exam
|
83839ca97bf03093bddaf7dd84fed95d073260b3
|
a1c30591ea742bf96557b5846bec8725760510d5
|
refs/heads/master
| 2022-11-07T10:41:12.301500
| 2020-06-23T13:06:44
| 2020-06-23T13:06:44
| 261,563,582
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,370
|
r
|
generate_report.R
|
require(argparse, quietly = T)
## SQL queries
alter_range = "alter table {table}
add column range int8range"
range_update = "update {table} set \"range\"= subquery.range
from (
select (\"chr\", \"start\", \"end\") as tp,
int8range(cast(\"start\" as INT),
cast(\"end\" as INT), '[]') as range
from {table}
) as subquery
where (\"chr\", \"start\", \"end\") = subquery.tp"
overlap_syndrome = "select \"sample id\", \"sindrome\", st.\"chr\" , st.\"start\", st.\"end\", s.\"start\" as syn_start, s.\"end\" as syn_end,
((upper(st.range * s.range) - lower(st.range * s.range))::numeric / (upper(st.range) - lower(st.range)) * 100) as \"perc overlap\",
\"classe\", \"cnv value\", \"comment\"
from syndrome as s inner join {sample_table} as st on s.chr = st.chr
where st.range && s.range and st.\"cnv conf\" >= {qcoff} and ({filter})
"
gene_annotation = "
create table gene_snp_annot as (
select distinct \"sample id\",st.\"chr\", st.\"start\", st.\"end\",gn.\"gene name\" ,gn.\"start\" as gene_start, gn.\"end\" as gene_end, st.\"cnv value\",
((upper(st.range * gn.range) - lower(st.range * gn.range))::numeric / (upper(gn.range) - lower(gn.range)) * 100) as \"perc overlap\",
st.\"comment\"
from {sample_table} as st inner join gene_annotations as gn on st.\"chr\" = gn.\"chr\"
where st.range && gn.range and st.\"cnv conf\" >= {qcoff} and ({filter})
)"
overlap_ddg2p = " select dd.\"gene_symbol\", st.\"chr\",st.\"sample id\", st.\"perc overlap\", dd.\"allelic_requirement\",dd.\"ddd.category\", st.\"cnv value\", dd.\"organ.specificity.list\", st.\"comment\"
from gene_snp_annot as st inner join ddg2p as dd on st.\"gene name\" = dd.\"gene_symbol\"
"
overlap_clingen = "
select cg.\"symbol\", st.\"chr\",st.\"sample id\", st.\"perc overlap\", cg.\"haploinsufficiency\",cg.\"triplosensitivity\", st.\"cnv value\", cg.\"online_report\", st.\"comment\"
from gene_snp_annot as st inner join clingen_dosage as cg on st.\"gene name\" = cg.\"symbol\"
"
overlap_OMIM = "
select ph.\"symbol\", st.\"chr\",st.\"start\", st.\"end\", st.\"gene_start\", st.\"gene_end\",st.\"sample id\", st.\"perc overlap\", ph.\"phenotype\", st.\"cnv value\", st.\"comment\"
from gene_snp_annot as st inner join morbidmap_ok as ph on st.\"gene name\" = ph.\"symbol\"
"
get_sample = "
select *
from {sample_table}
"
get_sample_filt = "
select *
from {sample_table} as st
where st.\"cnv conf\" >= {qcoff} and ({filter})
"
## Parsing arguments
parser <- ArgumentParser()
parser$add_argument("-db", "--database", type= "character", help = "Name of the Postgres database where you want to insert/read annot tables")
parser$add_argument("-dbh", "--database-host",type= "character")
parser$add_argument("-dbp", "--database-port",type= "character")
parser$add_argument("-dbu", "--database-user",type= "character")
parser$add_argument("-dbpw", "--database-password", type= "character")
parser$add_argument("--quality-cutoff", type="integer", default=30)
parser$add_argument("--annotation-dir", type= "character", default ="annot", help = "Directory of annotation table")
parser$add_argument("--delim", type= "character", default =",", help = "Separator for annotation tables")
parser$add_argument("-f","--filename", type= "character", help= "Input file, needs to be tab separated")
parser$add_argument("-o","--out-prefix", type= "character", default = "output", help = "Output prefix to be appendend to the two output files")
parser$add_argument("--do-not-force-annot", action="store_true", default = FALSE, help = "If selected the script will not insert all the tables but only the one different from those present in the Postgres DB")
parser$add_argument("-stn","--sample-table-name", type = "character", default = "sample_table", help = "Name of sample table in the df")
parser$add_argument("--do-not-create-df", action="store_true", default = FALSE , help = "Don't write or create any table on the database, just read")
parser$add_argument("--no-filter-diploid-X", action="store_true", default = FALSE , help = "Do not filter samples with CNV value = 2 on the X chromosome")
parser$add_argument("-gv","--genome-version", type= "character", default ="hg19", help = "Version of the ref [hg38 or hg19] genome to be used for gene position")
parser$add_argument("--custom-filter", type= "character", default =NULL, help = "A custom filter for the sample file")
parser$add_argument("--save-tables", action= "store_true", default =FALSE, help = "Save an RData object with the results of the queries")
parser$add_argument("--verbose", action= "store_true", default =FALSE, help ="Show output and warnings of functions")
args <- parser$parse_args()
suppressPackageStartupMessages({
require(RPostgreSQL, quietly = T)
require(tidyverse, quietly = T)
require(glue, quietly = T)
require(kableExtra, quietly = T)
require(rmarkdown, quietly = T)
require(cowplot, quietly = T)
require(gtools, quietly = T)
require(scales, quietly = T)
require(formattable, quietly = T)})
## Connecting to the db
if(!args$verbose){
options(warn=-1)
col <- cols()
} else{
col <- NULL
}
cat("Connecting to DB")
cat("\n")
con <- dbConnect(RPostgres::Postgres(), dbname = args$database, host=args$database_host, port=args$database_port, user=args$database_user, password= args$database_password)
## Make sure to not reload all the tables if the database when not neaded
files <- dir(args$annotation_dir, full.names = T)
if(!(args$genome_version %in% c("hg19", "hg38"))) stop("Genome version should be hg19 or hg38")
files_no_hg <- grep(files, pattern = "hg", value = T, invert = T)
files_hg <- grep(files, pattern = args$genome_version, value = T)
files <- c(files_hg, files_no_hg)
nms <- sub("(.*/)","",files)
nms <- sub("\\..*","",nms)
nms <- sub(paste0("_",args$genome_version),"",nms)
tables <- dbListTables(con)
tables1 <- setdiff(nms, tables)
if(!args$do_not_create_df & (length(tables1) != 0 | !args$do_not_force_annot)){
cat("Reading and inserting annotation tables")
cat("\n")
# create tables for the annotations
for(i in seq_along(files)){
annot_df <- read_delim(files[i], delim = args$delim,col_types = col)
colnames(annot_df) <- tolower(colnames(annot_df))
if(args$verbose)
cat(paste0("Reading ", nms[i], " \n"))
dbWriteTable(con, nms[i], annot_df, overwrite = T)
# add range if the table has the right columns
if(all(c("start", "end", "chr") %in% colnames(annot_df)) ){
dbExecute(con, glue(alter_range, table = nms[i]))
dbExecute(con, glue(range_update, table = nms[i]))
}
}
}
if(is.null(args$custom_filter)){
if(args$no_filter_diploid_X){
filter = "st.\"cnv value\" <> 2 or st.chr = 'X'"
} else {
filter = " st.\"cnv value\" <> 2 "
}
} else {
filter = args$custom_filter
}
quality_cutoff <- paste(args$quality_cutoff)
## Processing sample file
cat("Processing sample table")
cat("\n")
if(!args$do_not_create_df){
sample_file <- read_delim(args$filename, delim = "\t", na = c("","NA"), col_types = col)
colnames(sample_file) <- tolower(colnames(sample_file))
dbWriteTable(con, args$sample_table_name, sample_file, overwrite = T)
k_ <- dbExecute(con, glue(alter_range, table = args$sample_table_name))
k_ <- dbExecute(con, glue(range_update, table = args$sample_table_name))
} else {
sample_file <- dbGetQuery(con, glue(get_sample, sample_table = args$sample_table_name))
}
## Calculating overlappings
cat("Calculating overlaps")
cat("\n")
syndrome_overlaps <- dbGetQuery(con, glue(overlap_syndrome, sample_table = args$sample_table_name, filter = filter, qcoff = quality_cutoff))
if(!args$do_not_create_df){
k_ <- dbExecute(con, "drop table if exists gene_snp_annot")
k_ <- dbExecute(con, glue(gene_annotation,sample_table = args$sample_table_name, qcoff = quality_cutoff, filter = filter))
}
annotated_ddg2p <- dbGetQuery(con, glue(overlap_ddg2p))
annotated_clingen <- dbGetQuery(con, glue(overlap_clingen))
annotated_morbidmap <- dbGetQuery(con, glue(overlap_OMIM))
sample_file_filtered <- dbGetQuery(con, glue(get_sample_filt,
sample_table = args$sample_table_name,
qcoff = quality_cutoff, filter = filter))
## Save datasets temporarly for rendering the report
cat("Saving temporary file")
cat("\n")
save(sample_file, sample_file_filtered, syndrome_overlaps, annotated_ddg2p,
annotated_clingen, annotated_morbidmap,file = "tables.RData")
## Prepare datasets for the summary plot
cat("Plotting")
cat("\n")
# DF for plotting the percentage of the different CNVs
plt_data1 <- sample_file_filtered %>% group_by(`cnv value`) %>%
summarize(counts = n()) %>% arrange(counts) %>% mutate(freq = counts/sum(counts), y_pos = cumsum(freq) - 0.4*freq)
# DF for plotting the percentage of amplifications and deletions
plt_data2 <- sample_file_filtered %>% mutate(del_dup = if_else(`cnv value` > 2, "ampl", "del")) %>%
mutate(del_dup = if_else(`cnv value` == 2, "norm", del_dup)) %>% group_by(del_dup) %>%
summarize(counts = n()) %>% arrange(counts) %>%
mutate(freq = counts/sum(counts), y_pos = cumsum(freq) - 0.5*cumsum(freq))
# DF for plotting the chrs prevalence of CNVs
plt_data3 <- sample_file_filtered %>% group_by(chr) %>% summarize(counts = n()) %>% mutate(chr = factor(.$chr, levels = mixedsort(.$chr)))
# DF for plotting the overlap sample ids and datasets
plt_data4 <- data.frame(syndrome = length(intersect(sample_file_filtered$`sample id`, syndrome_overlaps$`sample id`)),
ddg2p = length(intersect(sample_file_filtered$`sample id`, annotated_ddg2p$`sample id`)),
omim = length(intersect(sample_file_filtered$`sample id`, annotated_morbidmap$`sample id`)),
clingen = length(intersect(sample_file_filtered$`sample id`, annotated_clingen$`sample id`)))
plt_data4 <- suppressMessages(plt_data4 %>% reshape2::melt())
## Generate summary plots
pie_1 <- ggplot(data = plt_data1, aes(x = "", y = freq, fill = paste(`cnv value`))) + geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0)+ ggtitle("Perc of CNVs in the table") + scale_fill_discrete( "CNV value") + ylab("") + xlab("") +
theme_minimal() + theme(axis.text.x = element_blank())
pie_2 <- ggplot(data = plt_data2, aes(x = "", y = freq, fill = paste(del_dup))) + geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0)+ ggtitle("Perc of amp/del in the table") + scale_fill_discrete( "CNV value") + ylab("") + xlab("") +
theme_minimal() + theme(axis.text.x = element_blank())
hist_1 <- ggplot(data = plt_data3, aes(x = chr, y = counts/sum(counts) * 100, fill = chr)) +
geom_bar(stat = "identity", show.legend = FALSE)+ ggtitle("Distribution of CNVs in the chromosome") + ylab("") +
theme_minimal()
hist_2 <- ggplot(data = plt_data4, aes(x = variable, y = value, fill = variable)) +
geom_bar(stat = "identity", show.legend = FALSE) + ggtitle("Number of annotated samples for db") + ylab("") +
theme_minimal() + theme(axis.text.x = element_text(angle = 90))
## Save summary plots
cowplot::plot_grid(
pie_1,
pie_2,
hist_1,
hist_2,
nrow = 2, ncol = 2, align = "h"
) %>% ggsave(filename = paste0(args$out_prefix,"_summary_plots.pdf"), device = "pdf", width = 10, height = 10)
## Render the report
cat("Generating report")
cat("\n")
rmarkdown::render("print_report.Rmd", output_file = paste0(args$out_prefix,"_CNV_report.html"), quiet = T)
cat("Remove temporary files")
cat("\n")
## Remove the temporary datasets
if(!args$save_tables)
system("rm tables.RData")
cat("BYE!")
cat("\n")
quit()
|
a48d35e8138a6fc79faf28d080e7564f6757005f
|
553992ae66d19695a240b2c8df4357b09f99bb69
|
/SAMR2014/PCA/0_RequiredPackages.R
|
889ec0cc708342c432e46c8baaf4e62b6e726d5b
|
[] |
no_license
|
Alfiew/Workshops
|
839ec14d5c4b95cd39474044e9bdb2946d2dece9
|
4ac40823e13ed285bcabc44eb4449d4d1be4cd05
|
refs/heads/master
| 2023-04-19T05:48:15.096172
| 2021-04-27T01:12:07
| 2021-04-27T01:12:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,119
|
r
|
0_RequiredPackages.R
|
###Required packages
### This file will be updated periodically between now (whenever you are seeing this)
### and April 2nd (the day before the workshop!)
#####
### If you download this file now, please be sure to check back for updates
### as the conference approaches.
################################################################################################
################### ###################
#### Please be sure to check this file before the workshop. ####
################### ###################
################################################################################################
##ANOVA workshop:
install.packages('car',dependencies=TRUE)
install.packages('ez',dependencies=TRUE)
install.packages('gplot',dependencies=TRUE)
install.packages('foreign',dependencies=TRUE)
install.packages('ggplots2',dependencies=TRUE)
##PCA workshop:
##We need prettyGraphs, ExPosition, and InPosition. But that's easy:
install.packages('InPosition',dependencies=TRUE) ##InPosition depends on the others -- and will grab them all.
|
361a979c967f5bb866e4b17bcf2d3197347ad4f1
|
ca77596fe70e47acde07f303904c2c6707634f51
|
/Exploratory_Project1_Graph1.R
|
8362b50cca4aa261dec977352187e10acf592e42
|
[] |
no_license
|
omt88/Exploratory_DA_Project1
|
a0936889cc3c6edfd6a13e10490d9e5b8b9acc3b
|
aaaf57efbdc2ed1d54f8fafcce16f9deed67c1cb
|
refs/heads/master
| 2020-05-18T05:51:33.826160
| 2015-04-11T15:34:47
| 2015-04-11T15:34:47
| 33,781,112
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 834
|
r
|
Exploratory_Project1_Graph1.R
|
## Exploratory Data Analysis
## Project 1
setwd("D:/Users/omartineztr/Desktop/CEMEX/Coursera/Data Science Specialization/04 Exploratory Data Analysis/Projects/Project 1")
Dataset <- data.frame(
read.table(
"household_power_consumption.txt",
header=TRUE,
sep =";",
na.strings="?",
)
)
Dataset$Date2 <- as.Date(Dataset$Date, format = "%d/%m/%Y")
Datasubset <- subset(Dataset, Date2 >= "2007-02-01" & Date2 <= "2007-02-02")
hist(Datasubset$Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
ylab = "Frequency"
)
dev.copy(png, file="plot1.png", height = 480, width = 480)
dev.off()
|
3b291ab9241d5a4697134a714e48c2a085780cd5
|
361e8f9adb76dba05bad3d585bdd16980c07d2ae
|
/man/geocodes.Rd
|
703cc5cd2ad3e7b4ce4a118769b2aa53390b84b7
|
[
"MIT"
] |
permissive
|
junkka/movement
|
7179e3a8581a36dc854a0b5970bf9b5f8a19a5d2
|
bc713dc2e165f3135c8c369e9042e42de8b518ee
|
refs/heads/master
| 2021-01-01T17:48:55.693909
| 2017-07-24T07:50:49
| 2017-07-24T07:50:49
| 98,161,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 472
|
rd
|
geocodes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{geocodes}
\alias{geocodes}
\title{Geocodes}
\format{A data frame with 11385 rows and 3 variables:
\describe{
\item{geoid}{int Geolocation id}
\item{lon}{num longitude }
\item{lat}{num latitude}
}}
\usage{
geocodes
}
\description{
Geocodes for places in popular movement data. Projection RT90
ESPG:2400.
}
\author{
Johan Junkka \email{johan.junkka@umu.se}
}
|
af27e5844451da0c85bc59a0147df3296e5bf2ad
|
28056fe82b5eafa30fb633082497e2920d544819
|
/cachematrix.R
|
8ce2ca87950d1f38ad0f0caea6445b13fedc06fd
|
[] |
no_license
|
AbimbolaSP/ProgrammingAssignment2
|
a041800f2f7e117d7bccaa668638df1eb9214dfb
|
273c3f79d9e992e53358df4c0ee903a762260620
|
refs/heads/master
| 2021-08-15T20:48:00.380752
| 2017-11-18T08:38:29
| 2017-11-18T08:38:29
| 111,094,348
| 0
| 0
| null | 2017-11-17T11:08:43
| 2017-11-17T11:08:42
| null |
UTF-8
|
R
| false
| false
| 1,072
|
r
|
cachematrix.R
|
## Two functions - makeCacheMatrix & cacheSolve - are created to compute and
## cache the inverse of a matrix when the matrix is recurring.
## The first function creates an object that can cache the inverse of a matrix:
## sets the content of the matrix
## gets the content of the matrix
## sets the inverse
## gets the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set=set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## The second function calculates the inverse of the matrix called above, if non-existent,
## otherwise retrieves cached inverse and returns the inverse of the matrix
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("get cached data")
return(x)
}
data <- x$get()
i <- solve(data,...)
x$setinverse(i)
i ## Returns a matrix that is the inverse of 'x'
}
|
18607cb7f49985268fbb32961d6a2bc2b786394c
|
cb5a28b5c2e21f9eef15cbbe802db5c802a5e81f
|
/code/schedR/man/FracWorkingPlot.Rd
|
2364dd2fd1b3bccf97f1eda672ce5baeacf543cc
|
[] |
no_license
|
johnjosephhorton/sched
|
96f50b255daa5546e66e26a6b22a74ed1f942e5b
|
a5066c4ff1d123c77450b9688a8f5a377e1539c5
|
refs/heads/master
| 2021-01-13T16:31:48.438858
| 2017-01-19T05:07:20
| 2017-01-19T05:07:20
| 44,535,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 351
|
rd
|
FracWorkingPlot.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/FracWorkingPlot.R
\name{FracWorkingPlot}
\alias{FracWorkingPlot}
\title{Fraction of retail workers working on the sample day, over time}
\usage{
FracWorkingPlot()
}
\value{
data frame
}
\description{
Fraction of retail workers working on the same day, over time
}
|
32bc6a2a6589b8c7b433739fd190be2fe888759a
|
8e20060c5475f00e9a513f76725bcf6e54f2068a
|
/man/subgraph_isomorphisms.Rd
|
99c72475842b871cf4a42a2b3c81eba7e019cb19
|
[] |
no_license
|
DavisVaughan/rigraph
|
8cc1b6c694ec03c1716d8b471d8f910e08c80751
|
a28ac7fe7b45323a38ffe1f13843bb83bdb4278f
|
refs/heads/master
| 2023-07-18T20:34:16.631540
| 2021-09-20T22:55:53
| 2021-09-20T22:55:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,014
|
rd
|
subgraph_isomorphisms.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topology.R
\name{subgraph_isomorphisms}
\alias{subgraph_isomorphisms}
\alias{graph.get.subisomorphisms.vf2}
\title{All isomorphic mappings between a graph and subgraphs of another graph}
\usage{
subgraph_isomorphisms(pattern, target, method = c("lad", "vf2"), ...)
}
\arguments{
\item{pattern}{The smaller graph, it might be directed or
undirected. Undirected graphs are treated as directed graphs with
mutual edges.}
\item{target}{The bigger graph, it might be directed or
undirected. Undirected graphs are treated as directed graphs with
mutual edges.}
\item{method}{The method to use. Possible values: \sQuote{auto},
\sQuote{lad}, \sQuote{vf2}. See their details below.}
\item{...}{Additional arguments, passed to the various methods.}
}
\value{
A list of vertex sequences, corresponding to all
mappings from the first graph to the second.
}
\description{
All isomorphic mappings between a graph and subgraphs of another graph
}
\section{\sQuote{lad} method}{
This is the LAD algorithm by Solnon, see the reference below. It has
the following extra arguments:
\describe{
\item{domains}{If not \code{NULL}, then it specifies matching
restrictions. It must be a list of \code{target} vertex sets, given
as numeric vertex ids or symbolic vertex names. The length of the
list must be \code{vcount(pattern)} and for each vertex in
\code{pattern} it gives the allowed matching vertices in
\code{target}. Defaults to \code{NULL}.}
\item{induced}{Logical scalar, whether to search for an induced
subgraph. It is \code{FALSE} by default.}
\item{time.limit}{The processor time limit for the computation, in
seconds. It defaults to \code{Inf}, which means no limit.}
}
}
\section{\sQuote{vf2} method}{
This method uses the VF2 algorithm by Cordella, Foggia et al., see
references below. It supports vertex and edge colors and have the
following extra arguments:
\describe{
\item{vertex.color1, vertex.color2}{Optional integer vectors giving the
colors of the vertices for colored graph isomorphism. If they
are not given, but the graph has a \dQuote{color} vertex attribute,
then it will be used. If you want to ignore these attributes, then
supply \code{NULL} for both of these arguments. See also examples
below.}
\item{edge.color1, edge.color2}{Optional integer vectors giving the
colors of the edges for edge-colored (sub)graph isomorphism. If they
are not given, but the graph has a \dQuote{color} edge attribute,
then it will be used. If you want to ignore these attributes, then
supply \code{NULL} for both of these arguments.}
}
}
\seealso{
Other graph isomorphism:
\code{\link{count_isomorphisms}()},
\code{\link{count_subgraph_isomorphisms}()},
\code{\link{graph_from_isomorphism_class}()},
\code{\link{isomorphic}()},
\code{\link{isomorphism_class}()},
\code{\link{isomorphisms}()},
\code{\link{subgraph_isomorphic}()}
}
\concept{graph isomorphism}
|
95c9193b9eb6c192b9f452319483901e31354047
|
de85e86d2c7724c264918cc1518f3745d00273b4
|
/r-project/original-code-and-results/Figure 4/Figure 4 final.R
|
e90e6a1fdd77e0c7b254cd0fdfe28ed98ffb6eb9
|
[
"MIT"
] |
permissive
|
amirmasoudabdol/bakker-et-al-2012-reproduction-using-sam
|
44571058f6797f5916ae57e2d1e045949949ce3f
|
518ab1cebaa80c19a12e92db8ae87386512ae053
|
refs/heads/main
| 2023-08-02T10:24:22.850001
| 2021-09-18T11:37:02
| 2021-09-18T11:37:02
| 391,968,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,523
|
r
|
Figure 4 final.R
|
#This R-code is used to generate Figure 4 and the expected values of the meta-
#analyses performed for this figure. If you only want the Figure, set nSim on 1
#(more efficient).
#required packages
library(MASS)
library(meta)
library(metafor)
library(gplots)
#variables
es=c(0,.2,.5,.8) #selected values of underlying ES
nSamp=100 #number of studies included in each meta analysis
cbdv=.5 #correlation between the two dependent variables
nStudies=5 #maximum number of small studies performed
nSim=10 #number of simulations to create expected values of these meta analyses
nb=10 #number of subjects added (per cell)
#functions
power=function(A,s,alpha,df){1 - pt (qt (1-alpha/2, df)-A/s, df) + pt (-qt(1-alpha/2, df)-A/s, df)}
p0=function(x){ifelse(x<.001,"p<.001",paste("p=",round(x,3),sep=""))}
#simulation
#create arrays for the expected values of the metaanalyses
RExVal=PRExVal=ESExVal=QExVal=PQExVal=IExVal=PIExVal=array(NA,c(length(es),4,nSim))
for(l in 1:nSim)
{
#create arrays for the study characteristics
resL=resLQRP=resS=resSQRP=array(NA,c(length(es),nSamp,10)) ##
for(i in 1:length(es))
{
for(j in 1:nSamp)
{
#draw total sample size of small study
tS=rnbinom(1,mu=30,size=2)+10
tC=round(tS/2) #cell size of small study
tL=nStudies*tC #cell size of large study
resk=reskQRP=matrix(NA,5,10) #create matrix for results 5 small studies
#perform 'nStudies' studies
for(k in 1:nStudies)
{
g1=mvrnorm(tC,rep(es[i],2),matrix(c(1,cbdv,cbdv,1),2,2))
g2=mvrnorm(tC,rep(0,2),matrix(c(1,cbdv,cbdv,1),2,2))
m1=mean(g1[,1])
m2=mean(g2[,1])
sd1=sd(g1[,1])
sd2=sd(g2[,1])
obsES=(m1-m2)/sqrt(.5*(var(g1[,1])+var(g2[,1])))
vi=(tC+tC)/(tC*tC)+obsES^2/(tC+tC)
sei=sqrt(vi)
pv=t.test(g1[,1],g2[,1],var.equal=T)$p.value
resk[k,]=c(tC,m1,m2,sd1,sd2,obsES,vi,sei,pv,tC)
if(pv>.05|obsES<0)
{
#test second dependent variable
m1_2=mean(g1[,2])
m2_2=mean(g2[,2])
sd1_2=sd(g1[,2])
sd2_2=sd(g2[,2])
obsES_2=(m1_2-m2_2)/sqrt(.5*(var(g1[,2])+var(g2[,2])))
vi_2=(tC+tC)/(tC*tC)+obsES_2^2/(tC+tC)
sei_2=sqrt(vi_2)
pv_2=t.test(g1[,2],g2[,2],var.equal=T)$p.value
if(pv_2>.05|obsES_2<0)
{
#add nb extra subjects per cell for both dependent variables
g1_set2=mvrnorm(nb,rep(es[1],2),matrix(c(1,cbdv,cbdv,1),2,2))
g2_set2=mvrnorm(nb,rep(0,2),matrix(c(1,cbdv,cbdv,1),2,2))
g1_3=rbind(g1,g1_set2)
g2_3=rbind(g2,g2_set2)
pv_3_v1=t.test(g1_3[,1],g2_3[,1],var.equal=T)$p.value
pv_3_v2=t.test(g1_3[,2],g2_3[,2],var.equal=T)$p.value
obsES_3_v1=(mean(g1_3[,1])-mean(g2_3[,1]))/sqrt(.5*(var(g1_3[,1])+var(g2_3[,1])))
obsES_3_v2=(mean(g1_3[,2])-mean(g2_3[,2]))/sqrt(.5*(var(g1_3[,2])+var(g2_3[,2])))
m1_3_v1=mean(g1_3[,1])
m2_3_v1=mean(g2_3[,1])
sd1_3_v1=sd(g1_3[,1])
sd2_3_v1=sd(g2_3[,1])
vi_3_v1=(tC+tC+2*nb)/((tC+nb)*(tC+nb))+obsES_3_v1^2/(tC+tC+2*nb)
sei_3_v1=sqrt(vi_3_v1)
m1_3_v2=mean(g1_3[,2])
m2_3_v2=mean(g2_3[,2])
sd1_3_v2=sd(g1_3[,2])
sd2_3_v2=sd(g2_3[,2])
vi_3_v2=(tC+tC+2*nb)/((tC+nb)*(tC+nb))+obsES_3_v2^2/(tC+tC+2*nb)
sei_3_v2=sqrt(vi_3_v2)
if((pv_3_v1>.05|obsES_3_v1<0)&(pv_3_v2>.05|obsES_3_v2))
{
#remove outliers
g1_4=g1_3
g2_4=g2_3
g1_4[which(abs((g1_4[,1]-mean(g1_4[,1]))/sd(g1_4[,1]))>2)]=NA
g2_4[which(abs((g2_4[,1]-mean(g2_4[,1]))/sd(g2_4[,1]))>2)]=NA
g1_4[which(abs((g1_4[,2]-mean(g1_4[,2]))/sd(g1_4[,2]))>2)]=NA
g2_4[which(abs((g2_4[,2]-mean(g2_4[,2]))/sd(g2_4[,2]))>2)]=NA
pv_4_v1=t.test(g1_4[,1],g2_4[,1],var.equal=T,na.rm=T)$p.value
pv_4_v2=t.test(g1_4[,2],g2_4[,2],var.equal=T,na.rm=T)$p.value
obsES_4_v1=(mean(g1_4[,1],na.rm=T)-mean(g2_4[,1],na.rm=T))/sqrt(.5*(var(g1_4[,1],na.rm=T)+var(g2_4[,1],na.rm=T)))
obsES_4_v2=(mean(g1_4[,2],na.rm=T)-mean(g2_4[,2],na.rm=T))/sqrt(.5*(var(g1_4[,2],na.rm=T)+var(g2_4[,2],na.rm=T)))
TC4_1_v1=length(na.omit(g1_4[,1]))
TC4_2_v1=length(na.omit(g2_4[,1]))
TC4_1_v2=length(na.omit(g1_4[,2]))
TC4_2_v2=length(na.omit(g2_4[,2]))
m1_4_v1=mean(g1_4[,1],na.rm=T)
m2_4_v1=mean(g2_4[,1],na.rm=T)
sd1_4_v1=sd(g1_4[,1],na.rm=T)
sd2_4_v1=sd(g2_4[,1],na.rm=T)
vi_4_v1=(length(na.omit(g1_4[,1]))+length(na.omit(g2_4[,1])))/((length(na.omit(g1_4[,1])))*(length(na.omit(g2_4[,1]))))+obsES_4_v1^2/(length(na.omit(g1_4[,1]))+length(na.omit(g2_4[,1])))
sei_4_v1=sqrt(vi_4_v1)
m1_4_v2=mean(g1_4[,2],na.rm=T)
m2_4_v2=mean(g2_4[,2],na.rm=T)
sd1_4_v2=sd(g1_4[,2],na.rm=T)
sd2_4_v2=sd(g2_4[,2],na.rm=T)
vi_4_v2=(length(na.omit(g1_4[,2]))+length(na.omit(g2_4[,2])))/((length(na.omit(g1_4[,2])))*(length(na.omit(g2_4[,2]))))+obsES_4_v2^2/(length(na.omit(g1_4[,2]))+length(na.omit(g2_4[,2])))
sei_4_v2=sqrt(vi_4_v2)
if((pv_4_v1>.05|obsES_4_v1<0)&(pv_4_v2>.05|obsES_4_v2))
{
#if non is significant select the best result
all_ps=c(pv,pv_2,pv_3_v1,pv_3_v2,pv_4_v1,pv_4_v2)
all_es=c(obsES,obsES_2,obsES_3_v1,obsES_3_v2,obsES_4_v1,obsES_4_v2)
if(length(which(all_es>0))>0)
{
p_5=min(all_ps[all_es>0])
}else{
p_5=max(all_ps[all_es<0])
}
obsES_5=all_es[which(all_ps==p_5)][1]
w=which(all_ps==p_5)[1]
if(w==1)
{
reskQRP[k,]=c(tC,m1,m2,sd1,sd2,obsES,vi,sei,pv,tC)
}
if(w==2)
{
reskQRP[k,]=c(tC,m1_2,m2_2,sd1_2,sd2_2,obsES_2,vi_2,sei_2,pv_2,tC)
}
if(w==3)
{
reskQRP[k,]=c(tC+10,m1_3_v1,m2_3_v1,sd1_3_v1,sd2_3_v1,obsES_3_v1,vi_3_v1,sei_3_v1,pv_3_v1,tC+10)
}
if(w==4)
{
reskQRP[k,]=c(tC+10,m1_3_v2,m2_3_v2,sd1_3_v2,sd2_3_v2,obsES_3_v2,vi_3_v2,sei_3_v2,pv_3_v2,tC+10)
}
if(w==5)
{
reskQRP[k,]=c(TC4_1_v1,m1_4_v1,m2_4_v1,sd1_4_v1,sd2_4_v1,obsES_4_v1,vi_4_v1,sei_4_v1,pv_4_v1,TC4_2_v1)
}
if(w==6)
{
reskQRP[k,]=c(TC4_1_v2,m1_4_v2,m2_4_v2,sd1_4_v2,sd2_4_v2,obsES_4_v2,vi_4_v2,sei_4_v2,pv_4_v2,TC4_2_v2)
}
}else{
w2=which(c(pv_4_v1,pv_4_v2)==min(c(pv_4_v1,pv_4_v2)[which(c(obsES_4_v1,obsES_4_v2)>0)]))
if(w2==1)
{
reskQRP[k,]=c(TC4_1_v1,m1_4_v1,m2_4_v1,sd1_4_v1,sd2_4_v1,obsES_4_v1,vi_4_v1,sei_4_v1,pv_4_v1,TC4_2_v1)
}
if(w2==2)
{
reskQRP[k,]=c(TC4_1_v2,m1_4_v2,m2_4_v2,sd1_4_v2,sd2_4_v2,obsES_4_v2,vi_4_v2,sei_4_v2,pv_4_v2,TC4_2_v2)
}
}
}else{
w3=which(c(pv_3_v1,pv_3_v2)==min(c(pv_3_v1,pv_3_v2)[which(c(obsES_3_v1,obsES_3_v2)>0)]))
if(w3==1)
{
reskQRP[k,]=c(tC+10,m1_3_v1,m2_3_v1,sd1_3_v1,sd2_3_v1,obsES_3_v1,vi_3_v1,sei_3_v1,pv_3_v1,tC+10)
}
if(w3==2)
{
reskQRP[k,]=c(tC+10,m1_3_v2,m2_3_v2,sd1_3_v2,sd2_3_v2,obsES_3_v2,vi_3_v2,sei_3_v2,pv_3_v2,tC+10)
}
}
}else{
reskQRP[k,]=c(tC,m1_2,m2_2,sd1_2,sd2_2,obsES_2,vi_2,sei_2,pv_2,tC)
}
}else{
reskQRP[k,]=c(tC,m1,m2,sd1,sd2,obsES,vi,sei,pv,tC)
}
}
#select first significant small study with positive ES
if(length(which(resk[,6]>0&resk[,9]<.05))>0)
{
resS[i,j,]=resk[which(resk[,6]>0&resk[,9]<.05)[1],]
}else{
if(length(which(resk[,6]>0))>0)
{
minp=min(resk[which(resk[,6]>0),9])
resS[i,j,]=resk[which(resk[,9]==minp)[1],]
}else{
resS[i,j,]=resk[which(resk[,9]==max(resk[,9])),]
}
}
#select first significant small study with QRP with positive ES
if(length(which(reskQRP[,6]>0&reskQRP[,9]<.05))>0)
{
resSQRP[i,j,]=reskQRP[which(reskQRP[,6]>0&reskQRP[,9]<.05)[1],]
}else{
if(length(which(reskQRP[,6]>0))>0)
{
minpQRP=min(reskQRP[which(reskQRP[,6]>0),9])
resSQRP[i,j,]=reskQRP[which(reskQRP[,9]==minpQRP)[1],]
}else{
resSQRP[i,j,]=reskQRP[which(reskQRP[,9]==max(reskQRP[,9])),]
}
}
#large study
reskL=rep(NA,9)
g1L=mvrnorm(tL,rep(es[i],2),matrix(c(1,cbdv,cbdv,1),2,2))
g2L=mvrnorm(tL,rep(0,2),matrix(c(1,cbdv,cbdv,1),2,2))
m1L=mean(g1L[,1])
m2L=mean(g2L[,1])
sd1L=sd(g1L[,1])
sd2L=sd(g2L[,1])
obsESL=(m1L-m2L)/sqrt(.5*(var(g1L[,1])+var(g2L[,1])))
viL=(tL+tL)/(tL*tL)+obsESL^2/(tL+tL)
seiL=sqrt(viL)
pvL=t.test(g1L[,1],g2L[,1],var.equal=T)$p.value
resL[i,j,1:10]=c(tL,m1L,m2L,sd1L,sd2L,obsESL,viL,seiL,pvL,tL)
if(pvL>.05|obsESL<0)
{
#test second dependent variable
m1L_2=mean(g1L[,2])
m2L_2=mean(g2L[,2])
sd1L_2=sd(g1L[,2])
sd2L_2=sd(g2L[,2])
obsESL_2=(m1L_2-m2L_2)/sqrt(.5*(var(g1L[,2])+var(g2L[,2])))
viL_2=(tL+tL)/(tL*tL)+obsESL_2^2/(tL+tL)
seiL_2=sqrt(viL_2)
pvL_2=t.test(g1L[,2],g2L[,2],var.equal=T)$p.value
if(pvL_2>.05|obsESL_2<0)
{
#add nb extra subjects per cell for both dependent variables
g1L_set2=mvrnorm(nb,rep(es[1],2),matrix(c(1,cbdv,cbdv,1),2,2))
g2L_set2=mvrnorm(nb,rep(0,2),matrix(c(1,cbdv,cbdv,1),2,2))
g1L_3=rbind(g1L,g1L_set2)
g2L_3=rbind(g2L,g2L_set2)
pvL_3_v1=t.test(g1L_3[,1],g2L_3[,1],var.equal=T)$p.value
pvL_3_v2=t.test(g1L_3[,2],g2L_3[,2],var.equal=T)$p.value
obsESL_3_v1=(mean(g1L_3[,1])-mean(g2L_3[,1]))/sqrt(.5*(var(g1L_3[,1])+var(g2L_3[,1])))
obsESL_3_v2=(mean(g1L_3[,2])-mean(g2L_3[,2]))/sqrt(.5*(var(g1L_3[,2])+var(g2L_3[,2])))
m1L_3_v1=mean(g1L_3[,1])
m2L_3_v1=mean(g2L_3[,1])
sd1L_3_v1=sd(g1L_3[,1])
sd2L_3_v1=sd(g2L_3[,1])
viL_3_v1=(tL+tL+2*nb)/((tL+nb)*(tL+nb))+obsESL_3_v1^2/(tL+tL+2*nb)
seiL_3_v1=sqrt(viL_3_v1)
m1L_3_v2=mean(g1L_3[,2])
m2L_3_v2=mean(g2L_3[,2])
sd1L_3_v2=sd(g1L_3[,2])
sd2L_3_v2=sd(g2L_3[,2])
viL_3_v2=(tL+tL+2*nb)/((tL+nb)*(tL+nb))+obsESL_3_v2^2/(tL+tL+2*nb)
seiL_3_v2=sqrt(viL_3_v2)
if((pvL_3_v1>.05|obsESL_3_v1<0)&(pvL_3_v2>.05|obsESL_3_v2))
{
#remove outliers
g1L_4=g1L_3
g2L_4=g2L_3
g1L_4[which(abs((g1L_4[,1]-mean(g1L_4[,1]))/sd(g1L_4[,1]))>2)]=NA
g2L_4[which(abs((g2L_4[,1]-mean(g2L_4[,1]))/sd(g2L_4[,1]))>2)]=NA
g1L_4[which(abs((g1L_4[,2]-mean(g1L_4[,2]))/sd(g1L_4[,2]))>2)]=NA
g2L_4[which(abs((g2L_4[,2]-mean(g2L_4[,2]))/sd(g2L_4[,2]))>2)]=NA
pvL_4_v1=t.test(g1L_4[,1],g2L_4[,1],var.equal=T,na.rm=T)$p.value
pvL_4_v2=t.test(g1L_4[,2],g2L_4[,2],var.equal=T,na.rm=T)$p.value
obsESL_4_v1=(mean(g1L_4[,1],na.rm=T)-mean(g2L_4[,1],na.rm=T))/sqrt(.5*(var(g1L_4[,1],na.rm=T)+var(g2L_4[,1],na.rm=T)))
obsESL_4_v2=(mean(g1L_4[,2],na.rm=T)-mean(g2L_4[,2],na.rm=T))/sqrt(.5*(var(g1L_4[,2],na.rm=T)+var(g2L_4[,2],na.rm=T)))
TCL_1_v1=length(na.omit(g1L_4[,1]))
TCL_2_v1=length(na.omit(g2L_4[,1]))
TCL_1_v2=length(na.omit(g1L_4[,2]))
TCL_2_v2=length(na.omit(g2L_4[,2]))
m1L_4_v1=mean(g1L_4[,1],na.rm=T)
m2L_4_v1=mean(g2L_4[,1],na.rm=T)
sd1L_4_v1=sd(g1L_4[,1],na.rm=T)
sd2L_4_v1=sd(g2L_4[,1],na.rm=T)
viL_4_v1=(length(na.omit(g1L_4[,1]))+length(na.omit(g2L_4[,1])))/((length(na.omit(g1L_4[,1])))*(length(na.omit(g2L_4[,1]))))+obsES_4_v1^2/(length(na.omit(g1L_4[,1]))+length(na.omit(g2L_4[,1])))
seiL_4_v1=sqrt(viL_4_v1)
m1L_4_v2=mean(g1L_4[,2],na.rm=T)
m2L_4_v2=mean(g2L_4[,2],na.rm=T)
sd1L_4_v2=sd(g1L_4[,2],na.rm=T)
sd2L_4_v2=sd(g2L_4[,2],na.rm=T)
viL_4_v2=(length(na.omit(g1L_4[,2]))+length(na.omit(g2L_4[,2])))/((length(na.omit(g1L_4[,2])))*(length(na.omit(g2L_4[,2]))))+obsES_4_v2^2/(length(na.omit(g1L_4[,2]))+length(na.omit(g2L_4[,2])))
seiL_4_v2=sqrt(viL_4_v2)
if((pvL_4_v1>.05|obsESL_4_v1<0)&(pvL_4_v2>.05|obsESL_4_v2))
{
#if non is significant select the best result
allL_ps=c(pvL,pvL_2,pvL_3_v1,pvL_3_v2,pvL_4_v1,pvL_4_v2)
allL_es=c(obsESL,obsESL_2,obsESL_3_v1,obsESL_3_v2,obsESL_4_v1,obsESL_4_v2)
if(length(which(allL_es>0))>0)
{
pL_5=min(allL_ps[allL_es>0])
}else{
pL_5=max(allL_ps[allL_es<0])
}
obsESL_5=allL_es[which(allL_ps==p_5)][1]
wL=which(allL_ps==pL_5)[1]
if(wL==1)
{
reskL=c(tL,m1L,m2L,sd1L,sd2L,obsESL,viL,seiL,pvL,tL)
}
if(wL==2)
{
reskL=c(tL,m1L_2,m2L_2,sd1L_2,sd2L_2,obsESL_2,viL_2,seiL_2,pvL_2,tL)
}
if(wL==3)
{
reskL=c(tL+10,m1L_3_v1,m2L_3_v1,sd1L_3_v1,sd2L_3_v1,obsESL_3_v1,viL_3_v1,seiL_3_v1,pvL_3_v1,tL+10)
}
if(wL==4)
{
reskL=c(tL+10,m1L_3_v2,m2L_3_v2,sd1L_3_v2,sd2L_3_v2,obsESL_3_v2,viL_3_v2,seiL_3_v2,pvL_3_v2,tL+10)
}
if(wL==5)
{
reskL=c(TCL_1_v1,m1L_4_v1,m2L_4_v1,sd1L_4_v1,sd2L_4_v1,obsESL_4_v1,viL_4_v1,seiL_4_v1,pvL_4_v1,TCL_2_v1)
}
if(wL==6)
{
reskL=c(TCL_1_v2,m1L_4_v2,m2L_4_v2,sd1L_4_v2,sd2L_4_v2,obsESL_4_v2,viL_4_v2,seiL_4_v2,pvL_4_v2,TCL_2_v2)
}
}else{
wL2=which(c(pvL_4_v1,pvL_4_v2)==min(c(pvL_4_v1,pvL_4_v2)[which(c(obsESL_4_v1,obsESL_4_v2)>0)]))
if(wL2==1)
{
reskL=c(TCL_1_v1,m1L_4_v1,m2L_4_v1,sd1L_4_v1,sd2L_4_v1,obsESL_4_v1,viL_4_v1,seiL_4_v1,pvL_4_v1,TCL_2_v1)
}
if(wL2==2)
{
reskL=c(TCL_1_v2,m1L_4_v2,m2L_4_v2,sd1L_4_v2,sd2L_4_v2,obsESL_4_v2,viL_4_v2,seiL_4_v2,pvL_4_v2,TCL_2_v2)
}
}
}else{
wL3=which(c(pvL_3_v1,pvL_3_v2)==min(c(pvL_3_v1,pvL_3_v2)[which(c(obsESL_3_v1,obsESL_3_v2)>0)]))
if(wL3==1)
{
reskL=c(tL+10,m1L_3_v1,m2L_3_v1,sd1L_3_v1,sd2L_3_v1,obsESL_3_v1,viL_3_v1,seiL_3_v1,pvL_3_v1,tL+10)
}
if(wL3==2)
{
reskL=c(tL+10,m1L_3_v2,m2L_3_v2,sd1L_3_v2,sd2L_3_v2,obsESL_3_v2,viL_3_v2,seiL_3_v2,pvL_3_v2,tL+10)
}
}
}else{
reskL=c(tL,m1L_2,m2L_2,sd1L_2,sd2L_2,obsESL_2,viL_2,seiL_2,pvL_2,tL)
}
}else{
reskL=c(tL,m1L,m2L,sd1L,sd2L,obsESL,viL,seiL,pvL,tL)
}
resLQRP[i,j,1:10]=reskL
}
#perform meta-analyses
m1=metagen(resL[i,,6],resL[i,,8],sm="SMD")
m2=metagen(resLQRP[i,,6],resLQRP[i,,8],sm="SMD")
m3=metagen(resS[i,,6],resS[i,,8],sm="SMD")
m4=metagen(resSQRP[i,,6],resSQRP[i,,8],sm="SMD")
m1b=rma.uni(yi=resL[i,,6],sei=resL[i,,8],method="FE")
m2b=rma.uni(yi=resLQRP[i,,6],sei=resLQRP[i,,8],method="FE")
m3b=rma.uni(yi=resS[i,,6],sei=resS[i,,8],method="FE")
m4b=rma.uni(yi=resSQRP[i,,6],sei=resSQRP[i,,8],method="FE")
#collect estimated ES
ESExVal[i,1,l]=m1$TE.fixed
ESExVal[i,2,l]=m2$TE.fixed
ESExVal[i,3,l]=m3$TE.fixed
ESExVal[i,4,l]=m4$TE.fixed
#collect Q values and accompanying p values
QExVal[i,1,l]=m1$Q
QExVal[i,2,l]=m2$Q
QExVal[i,3,l]=m3$Q
QExVal[i,4,l]=m4$Q
PQExVal[i,1,l]=1-pchisq(m1$Q,99)
PQExVal[i,2,l]=1-pchisq(m2$Q,99)
PQExVal[i,3,l]=1-pchisq(m3$Q,99)
PQExVal[i,4,l]=1-pchisq(m4$Q,99)
#collect Bias Z and p values
RExVal[i,1,l]=regtest(m1b)$zval
RExVal[i,2,l]=regtest(m2b)$zval
RExVal[i,3,l]=regtest(m3b)$zval
RExVal[i,4,l]=regtest(m4b)$zval
PRExVal[i,1,l]=regtest(m1b)$pval
PRExVal[i,2,l]=regtest(m2b)$pval
PRExVal[i,3,l]=regtest(m3b)$pval
PRExVal[i,4,l]=regtest(m4b)$pval
#calculate expected values and collect observed values and perform Ioannidis test
Exp1=sum(power(m1$TE.fixed,sqrt(1/resL[i,,1]+1/resL[i,,10]),.05,Inf))
Obs1=length(which(resL[i,,9]<.05))
Exp2=sum(power(m2$TE.fixed,sqrt(1/resLQRP[i,,1]+1/resLQRP[i,,10]),.05,Inf))
Obs2=length(which(resLQRP[i,,9]<.05))
Exp3=sum(power(m3$TE.fixed,sqrt(1/resS[i,,1]+1/resS[i,,10]),.05,Inf))
Obs3=length(which(resS[i,,9]<.05))
Exp4=sum(power(m4$TE.fixed,sqrt(1/resSQRP[i,,1]+1/resSQRP[i,,10]),.05,Inf))
Obs4=length(which(resSQRP[i,,9]<.05))
IExVal[i,1,l]=(Obs1-Exp1)^2/Exp1 + (Obs1-Exp1)^2/(nSamp-Exp1)
IExVal[i,2,l]=(Obs2-Exp2)^2/Exp2 + (Obs2-Exp2)^2/(nSamp-Exp2)
IExVal[i,3,l]=(Obs3-Exp3)^2/Exp3 + (Obs3-Exp3)^2/(nSamp-Exp3)
IExVal[i,4,l]=(Obs4-Exp4)^2/Exp4 + (Obs4-Exp4)^2/(nSamp-Exp4)
PIExVal[i,1,l]=1-pchisq(IExVal[i,1,l],1)
PIExVal[i,2,l]=1-pchisq(IExVal[i,2,l],1)
PIExVal[i,3,l]=1-pchisq(IExVal[i,3,l],1)
PIExVal[i,4,l]=1-pchisq(IExVal[i,4,l],1)
}
if(l==1)
{
#create Figure 4 from first simulated meta-analysis
pdf("funnel plots final_test.pdf",16,16)
layout(matrix(c(25,17,17,17,18,18,18,19,19,19,20,20,20,
21,1,1,1,2,2,2,3,3,3,4,4,4,
21,1,1,1,2,2,2,3,3,3,4,4,4,
21,1,1,1,2,2,2,3,3,3,4,4,4,
22,5,5,5,6,6,6,7,7,7,8,8,8,
22,5,5,5,6,6,6,7,7,7,8,8,8,
22,5,5,5,6,6,6,7,7,7,8,8,8,
23,9,9,9,10,10,10,11,11,11,12,12,12,
23,9,9,9,10,10,10,11,11,11,12,12,12,
23,9,9,9,10,10,10,11,11,11,12,12,12,
24,13,13,13,14,14,14,15,15,15,16,16,16,
24,13,13,13,14,14,14,15,15,15,16,16,16,
24,13,13,13,14,14,14,15,15,15,16,16,16
),13,13,T))
for(i in 1:4)
{
m1=metagen(resL[i,,6],resL[i,,8],sm="SMD")
funnel(m1,yaxis="invse",contour.levels=.95,xlim=c(-1.5,1.5),ylim=c(2,12),col.contour="light grey",main=paste("1 large study; True ES = ",es[i],sep=""),cex=1.1,cex.lab=1.2,cex.main=1.4)
legend(-1.7,12,c(paste("Est ES =",round(ESExVal[i,1,l],3)),
paste("I-Chi(1)=",round(IExVal[i,1,l],3),", ",p0(PIExVal[i,1,l]),sep=""),
paste("Z=",round(RExVal[i,1,l],3),", ",p0(PRExVal[i,1,l]),sep="")),bty="n",cex=1)
m2=metagen(resLQRP[i,,6],resLQRP[i,,8],sm="SMD")
funnel(m2,yaxis="invse",contour.levels=.95,xlim=c(-1.5,1.5),ylim=c(2,12),col.contour="light grey",main=paste("1 large study with QRPs; True ES = ",es[i],sep=""),cex=1.1,cex.lab=1.2,cex.main=1.4)
legend(-1.7,12,c(paste("Est ES =",round(ESExVal[i,2,l],3)),
paste("I-Chi(1)=",round(IExVal[i,2,l],3),", ",p0(PIExVal[i,2,l]),sep=""),
paste("Z=",round(RExVal[i,2,l],3),", ",p0(PRExVal[i,2,l]),sep="")),bty="n",cex=1)
m3=metagen(resS[i,,6],resS[i,,8],sm="SMD")
funnel(m3,yaxis="invse",contour.levels=.95,xlim=c(-2,2),ylim=c(1,7.5),col.contour="light grey",main=paste("5 small studies; True ES = ",es[i],sep=""),cex=1.1,cex.lab=1.2,cex.main=1.4)
legend(-2.27,7.5,c(paste("Est ES =",round(ESExVal[i,3,l],3)),
paste("I-Chi(1)=",round(IExVal[i,3,l],3),", ",p0(PIExVal[i,3,l]),sep=""),
paste("Z=",round(RExVal[i,3,l],3),", ",p0(PRExVal[i,3,l]),sep="")),bty="n",cex=1)
m4=metagen(resSQRP[i,,6],resSQRP[i,,8],sm="SMD")
funnel(m4,yaxis="invse",contour.levels=.95,xlim=c(-2,2),ylim=c(1,7.5),col.contour="light grey",main=paste("5 small studies with QRPs; True ES = ",es[i],sep=""),cex=1.1,cex.lab=1.2,cex.main=1.4)
legend(-2.27,7.5,c(paste("Est ES =",round(ESExVal[i,4,l],3)),
paste("I-Chi(1)=",round(IExVal[i,4,l],3),", ",p0(PIExVal[i,4,l]),sep=""),
paste("Z=",round(RExVal[i,4,l],3),", ",p0(PRExVal[i,4,l]),sep="")),bty="n",cex=1)
}
textplot("1",cex=2.5)
textplot("2",cex=2.5)
textplot("3",cex=2.5)
textplot("4",cex=2.5)
textplot("A",cex=2.5)
textplot("B",cex=2.5)
textplot("C",cex=2.5)
textplot("D",cex=2.5)
dev.off()
}
print(l)
flush.console()
}
#save values
#save(ESExVal,file="ESExVal.dat")
#save(QExVal,file="QExVal.dat")
#save(PQExVal,file="PQExVal.dat")
#save(RExVal,file="RExVal.dat")
#save(PRExVal,file="PRExVal.dat")
#save(IExVal,file="IExVal.dat")
#save(PIExVal,file="PIExVal.dat")
#load values
#load("ESExVal.dat")
#load("QExVal.dat")
#load("PQExVal.dat")
#load("RExVal.dat")
#load("PRExVal.dat")
#load("IExVal.dat")
#load("PIExVal.dat")
#calculate expected values for tabel in appendix
ExpValAll=array(NA,c(4,4,7))
for(i in 1:4)
{
for(j in 1:4)
{
ExpValAll[i,j,1]=mean(ESExVal[i,j,])
ExpValAll[i,j,2]=mean(QExVal[i,j,])
ExpValAll[i,j,3]=length(which(PQExVal[i,j,]<.05))/nSim
ExpValAll[i,j,4]=mean(RExVal[i,j,])
ExpValAll[i,j,5]=length(which(PRExVal[i,j,]<.05))/nSim
ExpValAll[i,j,6]=mean(IExVal[i,j,])
ExpValAll[i,j,7]=length(which(PIExVal[i,j,]<.05))/nSim
}
}
round(ExpValAll[,,1],3)
round(ExpValAll[,,2],3)
round(ExpValAll[,,3],3)
round(ExpValAll[,,4],3)
round(ExpValAll[,,5],3)
round(ExpValAll[,,6],3)
round(ExpValAll[,,7],3)
#save expected values
#save(ExpValAll,file="ExpValAll.dat")
|
590e2d5ccf66d28beb48e75539fa71c35da6a891
|
8a01b427af49871c2fe03f9d0b6a7d2b6aca0cd8
|
/man/run.Rd
|
250339fa05463f6ae76c2a469c0ea467bdffc900
|
[] |
no_license
|
yxlin/commotions
|
1040342ded468655ee1be4a145bf8770b6877753
|
7cef1248bcb0cf45ffcccc8ed5a058aa5b5ac144
|
refs/heads/main
| 2023-04-07T06:27:23.984884
| 2021-03-25T00:33:15
| 2021-03-25T00:33:15
| 350,555,487
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 327
|
rd
|
run.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{run}
\alias{run}
\title{Simulate Either Walkers' and Vehicles' Trajectory}
\usage{
run(time, para, pos, goal, so, ao, ss, sa, obstacles, cd)
}
\description{
This function uses (new) Agent Class. Not yet fully tested
}
|
6582dfd91c8e5c904faa3ee50d6d803699bc0a3d
|
10ef68adbb781072d456c259179eb35d3a1d26ac
|
/LPPL_parallel_GSPC_2014.R
|
31eae22f4273eba49af8d37fc5bd166072241565
|
[
"Apache-2.0"
] |
permissive
|
IgorCavaca/lppl-model-r
|
4544407d99347734a98dfd75ef072c34c7071514
|
6f0399a75a6a0bfa44456bfaa85ab0926ab59b40
|
refs/heads/master
| 2021-05-18T08:03:55.704359
| 2018-11-05T22:28:36
| 2018-11-05T22:28:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,854
|
r
|
LPPL_parallel_GSPC_2014.R
|
#import packages
require(foreach)
require(doParallel)
require(lubridate)
require(quantmod)
require(minpack.lm)
require(plyr)
require(gplots)
require(fPortfolio)
# clear workspace
rm(list=ls())
# dev.off()
# 2014 crash prediction
startDate = as.Date("2012-01-30")
endDate = as.Date("2014-10-01")
getSymbols("^GSPC", src="yahoo", from=startDate, to=endDate)
chartSeries(GSPC, theme='white')
guess <- list(A=2000, B=-100, C=0.5, a=0.33, tc=length(GSPC[,6])+150, w=16.36, phi=0.1)
cm <- nls.lm.control(maxiter=250, maxfev=1600)
ubnd <- c(5000,0,5,1,length(GSPC[,6])+750,Inf,Inf)
#setup parallel backend to use N processors
cl<-makeCluster(2)
registerDoParallel(cl)
#start time
timer0 <- proc.time()
fitter <- function(i,j,GSPC,ubnd,pars,cm) {
d1 = length(GSPC[,6])-j
d2 = d1-i
t <- tail(head(1:length(GSPC[,6]),d1),d2)
y <- tail(head(GSPC[,6],d1),d2) # adjusted close price
lbnd <- c(0,-5000,0,0,d1+1,-Inf,-Inf)
x <- tryCatch({
mod <- nlsLM(y ~ A + B*((tc-t)^a)*(1+C*cos(w*log(tc-t)+phi)), start=pars, control=cm,
lower=lbnd, upper=ubnd, model=TRUE)
}, warning = function(war) {
print(paste("warning: ", war))
}, error = function(err) {
print(paste("error: ", err))
}, finally = {
})
}
#loop
model <- list()
models <- rep(list(list()),250)
models<-foreach( j = tail(seq(0,250,1),250), .packages="minpack.lm") %dopar% {
pars <- guess
model<-list()
for (i in tail(seq(0,250,1),250) ) {
temp <- fitter(i,j,GSPC,ubnd,pars,cm)
pars <- tryCatch({
x <- temp$m$getPars()
model[[i]] = x[[5]]
x
}, warning = function(war) {
print(paste("warning: ", war))
}, error = function(err) {
print(paste("error: ", err))
}, finally = {
})
}
to.models<-model
to.models
}
proc.time()-timer0
stopCluster(cl)
# turn results into a matrix
e2 <- do.call(cbind,models) # applies rbind to all lists within
dims <- dim(e2)
e2 <- as.numeric(e2)
dim(e2) <- dims
# examine all dates
e2 <- e2 - length(GSPC[,6])
e2[is.na(e2)] <- 0
d2 <- density(e2,bw=2)
plot(d2)
tc_pred <- d2$x[which.max(d2$y)] # peak predictor
crashDate1 <- endDate + tc_pred*7/5
crashDate1
# examine all FUTURE dates
e3 <- e2
e3[e3<0 | e3>250] <- 0
d3 <- density(e3[e3>0],bw=1)
plot(d3)
tc_pred <- d3$x[which.max(d3$y)] # peak predictor
crashDate2 <- endDate + tc_pred*7/5
crashDate2
# examine all FUTURE dates with stability offset
e4 <- e3[2,]
d4 <- density(e4[e4>0],bw=1)
plot(d4)
tc_pred <- d4$x[which.max(d4$y)] # peak predictor
crashDate3 <- endDate + tc_pred*7/5
crashDate3
heatmap.2(e2,dendrogram="none", Rowv=NULL, Colv=NULL, trace="none")
plot(e2[1,])
heatmap.2(e3,dendrogram="none", Rowv=NULL, Colv=NULL, trace="none")
plot(e3[1,])
plot(e3[2,])
plot(e3[3,])
write.table(e2,"./parallel_250x250_20141231.csv",sep=",")
|
6729aa9705b524c3b118ebf450b9a73cf095fe1a
|
aba4026c593dc205b2b12735ec51427d0a1d1bd1
|
/lfr_plots.R
|
abd98acd5cfaa9baf31a7ed72af07347dc1c1ea3
|
[] |
no_license
|
Adnanbukhari123/SMA
|
023256d2ad59ae7c9de8188a0947133df298c187
|
a4a77795b2053aee370f272f2245597292b46bed
|
refs/heads/main
| 2023-08-22T00:46:46.737724
| 2021-11-01T13:38:56
| 2021-11-01T13:38:56
| 423,462,535
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,880
|
r
|
lfr_plots.R
|
library(ggplot2)
library(dplyr)
library(tidyverse)
library(readxl)
library(grid)
df_lfr = read.csv("SMA_Project/LFR.csv")
avg_data <- df_lfr %>% group_by(n, mu) %>% summarise_at(vars(multilevel_community_modularity, walktrap_community_modularity,
infomap_community_modularity,louvain_community_modularity, label_pop_community_modularity,
fast_community_modularity), mean) %>% data.frame()
avg_data %>% ggplot(mapping=aes(n,multilevel_community_modularity, color=factor(n)))+geom_line()+geom_point()+facet_wrap(~n)
#avg_data %>% ggplot(mapping = aes(mu,multilevel_community_modularity, color=factor(n))) + geom_line()+geom_point()
mc <-avg_data %>% ggplot(mapping=aes(mu,multilevel_community_modularity, color=factor(n), shape=factor(n)))+labs(x='Mixing Parameter( μ )',y='Multilevel Modularity', title = " Graph LFR")+
geom_line(linetype="solid",size=1)+geom_point(size=2)
wc <-avg_data%>% ggplot(mapping = aes(mu, walktrap_community_modularity, color=factor(n), shape=factor(n)))+labs(x='Mixing Parameter( μ )', y='Walktrap Modularity', title = " Graph LFR")+
geom_line(linetype="dotted",size=1)+geom_point(size=2)
ic <- avg_data%>% ggplot(mapping = aes(mu, infomap_community_modularity, color=factor(n), shape=factor(n)))+labs(x='Mixing Parameter( μ )', y='Infomap Modularity', title = "Graph LFR")+
geom_line(linetype="dotted",size=1)+geom_point(size=2)
lc <-avg_data%>% ggplot(mapping = aes(mu, louvain_community_modularity, color=factor(n), shape=factor(n)))+labs(x='Mixing Parameter( μ )', y='Louvain Modularity', title = "Graph LFR")+
geom_line(linetype="dotted",size=1)+geom_point(size=2)
labc <-avg_data%>% ggplot(mapping = aes(mu, label_pop_community_modularity, color=factor(n), shape=factor(n)))+labs(x='Mixing Parameter( μ )', y='Labelpop Modularity', title = "Graph LFR")+
geom_line(linetype="dotted", size=1)+geom_point(size=2)
fc <- avg_data%>% ggplot(mapping = aes(mu, fast_community_modularity, color=factor(n), shape=factor(n)))+labs(x='Mixing Parameter( μ )', y='Fast Modularity', title = "Graph LFR")+
geom_line(linetype="dotted", size=1)+geom_point(size=2)
df=as.data.frame(t(df_lfr))
avg_modularity <- df_lfr %>% group_by(n) %>% summarise_at(vars(multilevel_community_modularity, walktrap_community_modularity,
infomap_community_modularity,louvain_community_modularity, label_pop_community_modularity,
fast_community_modularity), mean) %>% data.frame()
y_values <- gather(avg_modularity,Algorithm, y, -n)
ggplot(y_values, aes(n, y, color = Algorithm, shape=Algorithm)) + labs(x='Network size',y='Modularities')+geom_line(linetype="dotted", size=1)+geom_point(size=2)
|
2b920775ec383914865687c554ba40d9265b6acc
|
11dc0bf1c11898f8142a6f89326cf65dbbaecafe
|
/data/bialystok/BialystokData.R
|
3131ad63aeabb0199ccf906d13c4a41da749ef25
|
[] |
no_license
|
kontrabanda/mgr-2
|
4669eeb21a6e3e8365d9a203c275c08daf01bd76
|
151bbba7005621e4ef1502d105e2c2c8a7b149eb
|
refs/heads/master
| 2021-05-05T18:17:24.743810
| 2018-05-16T19:37:14
| 2018-05-16T19:37:14
| 117,591,492
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,927
|
r
|
BialystokData.R
|
library(lubridate)
source(file="./data/DataBase.R")
BialystokData <- setRefClass(
Class="BialystokData",
fields=list(
rawData="data.frame",
categories="character",
allColnames="character",
propertiesColnames="character"
),
methods = list(
initialize = function() {
name <<- "bialystok"
allColnames <<- c("lat", "lng", "day", "month", "category")
propertiesColnames <<- c("lat", "lng", "day", "month")
},
extractData = function(params = NULL) {
data <- readData()
categories <<- as.character(unique(data$KAT))
rawData <<- parseData(data)
},
readData = function() {
read.csv(const$bialystokDataPath, sep = ",")
},
parseData = function(inputData) {
inputData$DATA <- as.Date(inputData$DATA, "%y/%m/%d")
data <- setNames(data.frame(matrix(ncol = length(allColnames), nrow = nrow(inputData))), allColnames)
data$month <- as.factor(month(as.Date(inputData$DATA, "%y/%m/%d")))
data$day <- as.factor(weekdays(as.Date(inputData$DATA, "%y/%m/%d")))
data$year <- as.factor(year(as.Date(inputData$DATA, "%y/%m/%d")))
data$lat <- inputData$LAT
data$lng <- inputData$LNG
data$category <- inputData$KAT
data <- removeIncompeleteData(data)
data
},
removeIncompeleteData = function(data) {
years <- as.numeric(as.character(data$year))
months <- as.numeric(as.character(data$month))
data[years >= 2009 & !(years == 2016 & months == 12) & !(years == 2016 & months == 11), ]
},
getData = function(category) {
data <- rawData[, propertiesColnames]
data$label <- as.factor(ifelse(rawData$category==category, 1, 0))
data
},
getTestData = function() {
data <- rawData[, propertiesColnames]
data
},
getClassificationCategories = function() {
unique(rawData$category)
}
),
contains=c("DataBase")
)
|
b391c35c868dec2b4d5a9a0128c0e11c12392f79
|
8ab617657cab50e73bd72272c8655b9d46486f9a
|
/man/CompetingRiskFrailty-internal.Rd
|
d7d318faa51d392f00c8108ee42f164e8fa2f8e0
|
[] |
no_license
|
zhaoyiqi97/CompetingRiskFrailty
|
0e705089a6ea69e023977d48e180ca63823fb290
|
2099f8e40e4a9a492eb9ee5631ca7f0004d86495
|
refs/heads/master
| 2021-05-29T07:41:23.905279
| 2007-11-24T00:00:00
| 2007-11-24T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 420
|
rd
|
CompetingRiskFrailty-internal.Rd
|
\name{CompetingRiskFrailty-internal}
\alias{CompetingRiskFrailtyOptim}
\title{Internal function for fitting of the comepting-risks-with-frailties survival model}
\description{The function 'CompetingRiskFrailtyOptim' implements an optimization procedure and
is used internally in the body of the 'CompetingRiskFrailtySurvfitCreate' function.}
\details{This function is not to be called by the user.}
\keyword{internal}
|
0d1b1bdef1cb828d4b8a839f8db74e4cc64448f4
|
2bf87b711bf3ba9b34057a95035134b800586d38
|
/R/rid.R
|
a028ec5025f0efbafd1f37f47c5b1fd1de66f708
|
[] |
no_license
|
kobeliu85/rSVD
|
90f4ab7568e02582fe536c6f6d3bfe13984d72e2
|
735bc5a1e94503371a6c5323bde04f317f404dc2
|
refs/heads/master
| 2021-06-26T10:26:59.921133
| 2017-09-12T20:04:59
| 2017-09-12T20:04:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,715
|
r
|
rid.R
|
#' @title Randomized interpolative decomposition (ID).
#
#' @description Randomized interpolative matrix decomposition.
#
#' @details
#' Algorithm for computing the ID of a rectangular \eqn{(m, n)} matrix \eqn{A}, with target rank
#' \eqn{k << min(m,n)}. The input matrix is factored as \eqn{A = C * Z},
#' using the column pivoted QR decomposition. The factor matrix \eqn{C} is formed as a subset of
#' columns of \eqn{A}, also called the partial column skeleton.
#' If \code{mode='row'}, then the input matrix is factored as \eqn{A = Z * R}, using the
#' row pivoted QR decomposition. The factor matrix \eqn{R} is now formed as
#' a subset of rows of \eqn{A}, also called the partial row skeleton.
#' The factor matrix \eqn{Z} contains a \eqn{(k, k)} identity matrix as a submatrix,
#' and is well-conditioned.
#'
#' If \eqn{rand='TRUE'} a probabilistic strategy is used to compute the decomposition, otherwise a
#' deterministic algorithm is used.
#'
#'
#' @param A Array_like. \cr
#' A numeric input matrix (or data frame), with dimensions \eqn{(m, n)}. \cr
#' If the data contain \eqn{NA}s na.omit is applied.
#'
#' @param k Int, optional. \cr
#' Determines the number of rows/columns to be selected.
#' It is required that \eqn{k} is smaller or equal to \eqn{min(m,n)}.
#'
#' @param mode String c('column', 'row'). \cr
#' Determines whether a subset of columns or rows is selected.
#'
#' @param p Int, optional. \cr
#' Oversampling parameter (default \eqn{p=10}).
#'
#' @param q Int, optional. \cr
#' Number of power iterations (default \eqn{q=0}).
#'
#' @param idx_only Bool (\eqn{TRUE}, \eqn{FALSE}), optional. \cr
#' If (\eqn{TRUE}), the index set \code{idx} is returned, but not the matrix \code{C} or \code{R}.
#' This is more memory efficient, when dealing with large-scale data.
#'
#' @param rand Bool (\eqn{TRUE}, \eqn{FALSE}). \cr
#' If (\eqn{TRUE}), a probabilistic strategy is used, otherwise a deterministic algorithm is used.
#'
#' @param ................. .
#'
#' @return \code{id} returns a list with class \eqn{id} containing the following components:
#' \item{C}{ Array_like. \cr
#' Column subset \eqn{C = A[,idx]}, if \code{mode='column'}; array with dimensions \eqn{(m, k)}.
#' }
#'
#' \item{R}{ Array_like. \cr
#' Row subset \eqn{R = A[idx, ]}, if \code{mode='row'}; array with dimensions \eqn{(k, n)}.
#' }
#'
#' \item{Z}{ Array_like \cr
#' Well conditioned matrix; Dependng on the slected mode, this is an
#' array with dimensions \eqn{(k,n)} or \eqn{(m,k)}.
#' }
#'
#' \item{idx}{ Array_like \cr
#' The index set of the \eqn{k} selcted columns or rows used to form \eqn{C} or \eqn{R}.
#' }
#'
#' \item{pivot}{ Array_like \cr
#' Information on the pivoting strategy used during the decomposition.
#' }
#'
#' \item{scores}{ Array_like .\cr
#' The scores (importancies) of the columns or rows of the input matrix \eqn{A}.
#' }
#'
#' \item{scores.idx}{ Array_like .\cr
#' The scores (importancies) of the \eqn{k} selected columns or rows in \eqn{C} or \eqn{R}.
#' }
#' \item{.................}{.}
#'
#'
#'
#' @author N. Benjamin Erichson, \email{erichson@uw.edu}
#'
#' @seealso \code{\link{rcur}},
#'
#'
#' @export
rid <- function(A, k = NULL, mode = 'column', p = 10, q = 0, idx_only = FALSE, rand = 'TRUE') UseMethod("rid")
#' @export
rid.default <- function(A, k = NULL, mode = 'column', p = 10, q = 0, idx_only = FALSE, rand = 'TRUE') {
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Checks
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (any(is.na(A))) {
warning("Missing values are omitted: na.omit(A).")
A <- stats::na.omit(A)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Init id object
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
idObj = list( C = NULL,
R = NULL,
Z = NULL,
idx = NULL,
pivot = NULL,
scores = NULL,
scores.idx = NULL,
mode = mode,
rand = rand)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Transpose input matrix if mode == 'row'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(mode == 'row') A = H(A)
m <- nrow(A)
n <- ncol(A)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set target rank
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(is.null(k)) k <- n
if(k > min(m,n)) k <- min(m,n)
if(k < 1) stop("Target rank is not valid!")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute interpolative decompositon
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pivoted QR decomposition
if(rand == 'TRUE') {
out_rqb <- rqb(A, k = k, p = p, q = q)
out <- qr(out_rqb$B, LAPACK=TRUE)
} else {
out <- qr(A, LAPACK=TRUE)
}
# Get index set
idObj$idx <- out$pivot[1:k] # Get row set
idObj$pivot <- out$pivot # Get row set
ordered.pivtos <- order(idObj$pivot)
# Get R
R <- qr.R( out )
# Compute scores
idObj$scores <- abs(diag(R))
idObj$scores.idx <- idObj$scores[1:k]
idObj$scores <- idObj$scores[ordered.pivtos]
# Compute Z
if(k == n) {
V = pinv(R[1:k, 1:k])
}else{
V = pinv(R[1:k, 1:k]) %*% R[1:k, (k+1):n]
}
idObj$Z = cbind(diag(k), V)
idObj$Z = matrix(idObj$Z[, ordered.pivtos], nrow = k, ncol = n)
if(mode == 'row') {
idObj$Z = H(idObj$Z)
}
# Create column / row subset
if(idx_only == FALSE) {
if(mode == 'column') {
idObj$C = matrix(A[, idObj$idx], nrow = m, ncol = k)
colnames(idObj$C) <- colnames(A)[idObj$idx]
rownames(idObj$C) <- rownames(A)
}
if(mode == 'row') {
idObj$R = H(matrix(A[, idObj$idx], nrow = m, ncol = k))
rownames(idObj$R) <- colnames(A)[idObj$idx]
colnames(idObj$R) <- rownames(A)
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Return
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class(idObj) <- "rid"
return( idObj )
}
|
3755c3d718c539d646d168d75c3bc056d623239e
|
b7f9874173b3b1f78eceb0fcac263a8dc8ea4c90
|
/MarthaGizawRFinal.R
|
ac806f2f0896d6ee59e41937c471e774b5ef859f
|
[] |
no_license
|
mtgizaw/Advanced_R_Programming
|
59d9f9198ce4c1937ea074fe384f7bca46fd22ef
|
e8157897f239ca5b1487fd1dfe236fce9ee87e2f
|
refs/heads/master
| 2022-12-19T06:49:12.609390
| 2020-09-28T06:51:24
| 2020-09-28T06:51:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,379
|
r
|
MarthaGizawRFinal.R
|
# Martha Gizaw
# Advanced R Final Exam
# March 4-5, 2020
# CREATING AN EXTENSION OF ARITHMETIC FUNCTIONS
# OBJECTIVE: To create a new function that extends the functionality of the existing arithmetic
# functions in R, such as exp(), log(), sqrt(), and sin().
# The arithmetic functions I will use in this exam are as follows:
# exp(), log(), sqrt(), abs(), ceiling(), floor(), trunc(), round(), signif(), cos(), sin(), tan()
#===============================================================================================#
# (1) Create a function named "plot_math" that takes two inputs: a function, and a vector x.
# I used FUN as an argument to input any math function to determine the result of the vector x.
plot_math <- function(FUN, x){
return(FUN(x))
}
#===============================================================================================#
# (2) Your function should perform the chosen function on each value in x.
# To check the results of x, I executed plot_math for each math function. Let x contain three
# vector values.
# Solve with Euler's number e and its exponent x.
plot_math(exp, c(1, 2.71, log(10))) # log() is the same as ln, so e^ln(x) = x.
# Calculate the natural log of x.
plot_math(log, c(1, 10, 54.59999))
# Calculate the square root of x.
plot_math(sqrt, c(1, 10^2, 960+2))
# Calculate the absolute value of x.
plot_math(abs, c(5, -5, -(-3/-5)))
# Round to the nearest integer greater than x using ceiling.
# Round to the nearest integer less than x using floor.
# Round x to the nearest integer in the direction of 0 using trunc.
plot_math(ceiling, c(1, 1.11, -1.75))
plot_math(floor, c(1, 1.11, -1.75))
plot_math(trunc, c(1, 1.11, -1.75))
# Round x to the nearest whole number.
plot_math(round, c(5.66666667, -3.39, 5/6))
# Round x to a number of significant figures.
plot_math(signif, c(0.707, 1, 5.33333/6.667))
# Calculate the cosine, sine, and tangent of x.
plot_math(cos, c(0, 0.556, 3.14))
plot_math(sin, c(0, 0.556, 3.14))
plot_math(tan, c(0, 0.556, 3.14))
#===============================================================================================#
# (3) It should then plot the results in a scatterplot (use the plot() function), with the values
# from x in the x-axis and the output from the plot_math() function in the y-axis.
# I have updated the function with a statement for plotting the output.
plot_math <- function(FUN, x){
return(plot(x, FUN(x), main="plot_math() output"))
}
# I have varied x to accurately plot the graphs of each math function.
plot_math(exp, c(50:100))
plot_math(abs, c(-500:500))
plot_math(log, c(0:500))
plot_math(sqrt, c(0:500))
# The graphs for ceiling, floor, trunc, round, and signif are all linear.
plot_math(ceiling, c(0:500))
plot_math(floor, c(0:500))
plot_math(trunc, c(0:500))
plot_math(round, c(0:500))
plot_math(signif, c(0:500))
# Variables that store generated sequences are vectors, too! Now we can clearly see the trig
# graphs.
x <- seq(0,10,0.1)
is.vector(x)
plot_math(cos, x)
plot_math(sin, x)
plot_math(tan, x)
#===============================================================================================#
# (4) Now, include an option to return the output from (2) as a vector in addition to plotting
# it. This will require an additional argument for the function as well as an if-clause in the
# function.
# In plot_math, plot a function that matches with the variable y. Suppose y calls for the
# square root of x. If the user specifies FUN = sqrt, plot_math will return y as a vector
# while plotting it. If the user specifies a function other than sqrt (e.g. FUN = log),
# plot_math will output FALSE because y does not match with FUN, and the scatterplot will
# not appear.
plot_math <- function(FUN, x, y){
ifelse(y == FUN(x), plot(x, y, main="plot_math() output") & return(as.vector(y)), return(FALSE))
}
# If y can equate to FUN, then we can return the output as a vector while producing the graph.
x <- c(0:500)
plot_math(sqrt, x, sqrt(x))
plot_math(log, x, sqrt(x))
plot_math(log, x, log(x))
#===============================================================================================#
# (5) Last, include a check that will stop your function from running if the user passes negative
# x values while specifying the sqrt() or log() functions (you cannot take the square root or log
# of a negative number).
# I have updated plot_math to include a simple test in an inelse clause that checks if x is 0
# or greater, the vector output y does not contain any values of NaN, and the user specifies
# either FUN = sqrt or FUN = log for y. While y can match with any math function, if all of
# the above conditions are true, then plot_math will properly plot the sqrt and log outputs.
plot_math <- function(FUN, x, y){
while(any(y == FUN(x))){
ifelse(x >= 0 & any(is.nan(y)) == FALSE & (y == sqrt(x) | y == log(x)),
plot(x, y, main="plot_math() output") & return(as.vector(y)), return(FALSE))
}
}
# First, let's try a vector with all positive values. The tryCatch loops are used to check for
# any errors or warnings with sqrt or log.
x <- c(0:500)
plot_math(abs, x, abs(x))
tryCatch(
expr = {
plot_math(sqrt, x, sqrt(x))
},
warning = function(w){
stop("You cannot take the square root of a negative number!")
print(w)
}
)
tryCatch(
expr = {
plot_math(log, x, log(x))
},
warning = function(w){
stop("You cannot take the log of a negative number!")
print(w)
}
)
# Now let's try a vector with half of the values being negative integers.
x <- c(-500:500)
plot_math(abs, x, abs(x))
tryCatch(
expr = {
plot_math(sqrt, x, sqrt(x))
},
warning = function(w){
stop("You cannot take the square root of a negative number!")
print(w)
}
)
tryCatch(
expr = {
plot_math(log, x, log(x))
},
warning = function(w){
stop("You cannot take the log of a negative number!")
print(w)
}
)
#===============================================================================================#
"THE END!"
graphics.off()
clc <- function() cat(rep("\n", 50))
clc()
rm(list = ls(all.names = TRUE))
# Martha Gizaw
# Advanced R Final Exam
# March 4-5, 2020
|
109509333659d7bebcc8146f1e4192e39bd2bf04
|
90bc0268ab54edfeb1eb2231e3d40c074b1fc784
|
/man/add_time_to_date.Rd
|
a1413995f9010f902323c42ced28f30d6247fd33
|
[] |
no_license
|
jackwasey/jwutil
|
e920952f8f42ef609c6019f7107c4256836fb4a9
|
d149051dc750a56412c8c7d7d07c1d3619d4f4b2
|
refs/heads/master
| 2021-01-17T09:26:51.710521
| 2020-01-18T19:58:17
| 2020-01-18T19:58:17
| 24,302,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 716
|
rd
|
add_time_to_date.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.r
\name{add_time_to_date}
\alias{add_time_to_date}
\title{convert separate lists of dates and times to POSIXlt objects}
\usage{
add_time_to_date(tms, dts, verbose = FALSE)
}
\arguments{
\item{tms}{vector of times, i.e. number in range 0 to 2400, as string or
integer, with or without trailing zeros}
\item{dts}{vector of dates, in string format \%Y-\%m-\%d or simple R Date
objects}
\item{verbose}{single logical value, if \code{TRUE} then produce verbose
messages}
}
\value{
vector of POSIXlt date-times
}
\description{
Some datetime data is presented as a separate dates and times.
This function restores the full date-time.
}
|
fa09e69f6902b86ab2aa2a7d387b1e6ef8a72c8a
|
9f3e9e2e5eabc0352134c3235980b3c92289cc1e
|
/vignettes/PaleoFidelity.R
|
10b16ed2da78452c681265c522021a5ae24a0f0b
|
[] |
no_license
|
MJKowalewski/PaleoFidelity
|
761cbf386c322c1fae0416e152a7665eb594a59e
|
8375c559873908e101f0e6f0dadfc77bd05685d3
|
refs/heads/master
| 2023-04-14T22:59:28.880163
| 2023-04-09T01:03:14
| 2023-04-09T01:03:14
| 144,731,251
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,503
|
r
|
PaleoFidelity.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----installing PaleoFidelity package, eval = FALSE---------------------------
# install.packages('devtools')
# library(devtools)
# devtools::install_github('mjkowalewski/PaleoFidelity', build_vignettes = TRUE)
# library(PaleoFidelity)
## ----data example-------------------------------------------------------------
library(PaleoFidelity)
str(FidData) # check the structure of the dataset
## ----fidelity summary function------------------------------------------------
FidelitySummary(live = FidData$live, dead = FidData$dead, gp = FidData$habitat, report = TRUE)
## ----fidelity summary function part 2-----------------------------------------
FidelitySummary(live = FidData$live, dead = FidData$dead, gp = FidData$habitat,
report = TRUE, n.filters = 30)
## ----fidelity summary function part 3-----------------------------------------
FidelitySummary(live = FidData$live, dead = FidData$dead, gp = FidData$habitat, report = TRUE, n.filters = 100)
## ----live-dead plot, fig.width=7, fig.height=6--------------------------------
par(mar=c(3, 7, 0.5, 7))
rep1 <- LDPlot(live = colSums(FidData$live),
dead = colSums(FidData$dead),
tax.names = colnames(FidData$live), toplimit = 20,
cor.measure = 'spearman', report = TRUE, iter = 1000)
## ----LD comparison------------------------------------------------------------
rep1[1:5]
## ----live-dead model, fig.width=7, fig.height=3.5-----------------------------
par(mar=c(4, 4, 0.5, 0.5))
hist(rep1$randomized.r[,2], breaks=seq(-1,1,0.05), main='',
las=1, xlab=bquote('Spearman' ~ italic(rho)))
arrows(rep1$cor.coeff[2], 100, rep1$cor.coeff[2], 10,
length=0.1, lwd=2)
## ----fidelity estimates-------------------------------------------------------
out1 <- FidelityEst(live = FidData$live, dead = FidData$dead,
gp = FidData$habitat,
n.filters = 30, iter = 499)
str(out1)
## ----fidelity estimates outputs-----------------------------------------------
out1$xc # adjusted correlation measure summary
out1$yc # adjusted similarity measure summary
## ----fidelity estimates outputs: sample-standardized--------------------------
out1$xs # sample-standardized correlation measure summary
out1$ys # sample-standardized similarity measure summary
## ----classic fidelity plot, fig.width=7, fig.height=4-------------------------
par(mar = c(4, 4, 0.5, 0.5))
SJPlot(out1, gpcol = c('aquamarine3', 'coral3'), cex.legend = 0.8)
## ----classic fidelity plot 2, fig.width=7.5, fig.height=4---------------------
par(mar = c(4, 4, 0.5, 0.5))
SJPlot(out1, gpcol = c('aquamarine3', 'coral3'), bubble = F, unadj = T, adjF = F, cex.legend = 0.8)
## ----alpha diversity----------------------------------------------------------
out3 <- FidelityDiv(FidData$live, FidData$dead, iter=1000)
out3$x
out3$y
## ----alpha diversity 2--------------------------------------------------------
out4 <- FidelityDiv(FidData$live, FidData$dead, FidData$habitat, iter=1000)
out4$xmean
out4$ymean
out4$xgp
out4$ygp
out4$p.values
out4$p.gps
## ----plot alpha 2, fig.width=7, fig.height=4----------------------------------
out3 <- FidelityDiv(FidData$live, FidData$dead, FidData$habitat, CI = 0.95, iter = 1000)
par(mar = c(4, 4.5, 0.5, 0.5))
AlphaPlot(out3, col.gp = c('aquamarine3', 'coral3'), bgpt = 'beige', pch = 22, legend.cex = 0.8)
|
f2da067e8dee83135cf65bcb6b89537c1386f6a2
|
bed59aeb82e8b75f9a7bec0a8a4657d84ad2f3f6
|
/2c_synth_figures.R
|
9301f7e29c0d57e7fa01bff5aef8a7a3c4d542f6
|
[] |
no_license
|
maxkasy/Marienthal
|
b21b0e1c717380e5a9c58c52e585f55800148180
|
4f9f17f358d75b05666a585bd89dd6bba89415b7
|
refs/heads/master
| 2023-01-05T23:39:46.397059
| 2020-11-03T13:08:19
| 2020-11-03T13:08:19
| 294,738,046
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,344
|
r
|
2c_synth_figures.R
|
# Tables and plots for evaluating the synthetic control
library(kableExtra) # for table export
variable_names = c(
"Working age pop",
"Long term unemp/pop",
"Inactive/pop",
"Mean age",
"Share small firms",
"Share mid firms",
"Share low edu",
"Share mid edu",
"Share men",
"Share migrant",
"Share care resp",
"Mean wage",
"Mean age unemp",
"Low edu/unemp",
"Mid edu/unemp",
"Poor German/unemp",
"Men/unemp",
"Migrant/unemp",
"Health cond/unemp",
"Communal tax/pop",
# additional 2020 variables
"Lt ue/pop 2020",
"Inactive/pop",
"Mean wage",
"Mean age ue",
"Low edu/ue",
"Mid edu/ue",
"Poor German/ue",
"Health cond/ue"
)
#check the names align with the variables
tmp = tibble(a = c(matching_variables[-1], matching_variables_2020_names), b = variable_names)
# Checking solution of synth
synth_outcome_gap = function(ms) {
ms$municipalities_synth_prepared$Z0 %*%
ms$municipalities_synth_out$solution.w -
ms$municipalities_synth_prepared$Z1
}
check_synth = function(ms) {
ratio_covariates =
ms$municipalities_synth_prepared$X0 %*%
ms$municipalities_synth_out$solution.w /
ms$municipalities_synth_prepared$X1
gaps_outcomes = synth_outcome_gap(ms)
print(ratio_covariates)
print(gaps_outcomes)
municipalities_synth_tables =
synth.tab(
dataprep.res = ms$municipalities_synth_prepared,
synth.res = ms$municipalities_synth_out
)
# Select municipalities with non-negligible weight in the synthetic control,
# sort them by weight.
non_zero_weights = sum(ms$municipalities_synth_out$solution.w > .005)
non_zero_municipalities = municipalities_synth_tables$tab.w %>%
arrange(desc(w.weights)) %>%
head(n = non_zero_weights)
# Print a table of municipalities used in the synthetic control,
# sorted by weight.
non_zero_municipalities %>%
kable(
col.names = c("Weight", "Municipality", "Identifier"),
row.names = F,
digits = 3,
format = "latex",
booktabs = TRUE,
escape = F,
linesep = ""
) %>%
write("Data/synthetic_control_weights.tex")
# Build and print table of variables for control municipalities
# First row: Gramatneusied
Gramatneusiedl_variables = t(ms$municipalities_synth_prepared$X1) %>%
as_tibble() %>%
mutate(GEMEINDE = "Gramatneusiedl")
# Second row: Synthetic control
synthetic_variables = t(ms$municipalities_synth_prepared$X0 %*%
ms$municipalities_synth_out$solution.w) %>%
as_tibble() %>%
mutate(GEMEINDE = "Synthetic control")
# Remaining rows: Municipalities with non-negligible weights
control_variables = t(ms$municipalities_synth_prepared$X0) %>%
as_tibble(rownames = "unit.numbers") %>%
mutate(unit.numbers = as.integer(unit.numbers)) %>%
right_join(non_zero_municipalities, by = "unit.numbers") %>%
select(-c("w.weights", "unit.numbers")) %>%
rename(GEMEINDE = unit.names)
# Combine the rows into one table
table_variables = bind_rows(Gramatneusiedl_variables,
synthetic_variables,
control_variables) %>%
mutate(POP_workingage = as.integer(POP_workingage),
mean_wage = as.integer(mean_wage),
special.mean_wage.2020 = as.integer(special.mean_wage.2020)) %>%
select(c("GEMEINDE", matching_variables[-1], matching_variables_2020_names))
# Rename columns for printing
names(table_variables) = c("Municipality", variable_names)
# Split into sub-tables by columns for page width reasons
sub_tables = list(1:8, c(1, 9:15), c(1, 16:22), c(1, 23:29)) %>%
map(~ table_variables[, .x])
sub_kables = map(
1:4,
~ sub_tables[[.x]] %>%
kable(
row.names = F,
digits = 3,
format = "latex",
booktabs = TRUE,
escape = F,
linesep = c("", "\\addlinespace", "", "", "", "", "", "", "")
) %>%
str_replace_all("_", " ")
)
paste(
substring(
sub_kables[[1]],
first = 1,
last = nchar(sub_kables[[1]]) - 13
),
substring(
sub_kables[[2]],
first = 28,
last = nchar(sub_kables[[2]]) - 13
),
substring(
sub_kables[[3]],
first = 28,
last = nchar(sub_kables[[3]]) - 13
),
"\\addlinespace",
"\\multicolumn{8}{c}{\\textbf{2020}}\\\\",
"\\addlinespace",
substring(sub_kables[[4]], first = 28),
sep = ""
) %>%
write("Data/synthetic_control_variables.tex")
}
# Print tables for Gramatneusiedl and it's controls
check_synth(municipalities_synth_Gramatneusiedl)
# Function to plot the gap between actual and counterfactual unemployment over time
plot_gap = function(ms) {
year = ms$municipalities_synth_prepared$tag$time.plot %>%
lubridate::ymd(truncated = 2L)
tibble(
Year = year,
Col = "Actual",
Y = ms$municipalities_synth_prepared$Z1
) %>%
bind_rows(
tibble(
Year = year,
Col = "Synthetic",
Y = ms$municipalities_synth_prepared$Z0 %*%
ms$municipalities_synth_out$solution.w
)
) %>%
ggplot(aes(x = Year, y = Y, color = Col)) +
geom_path() +
scale_color_manual(values = c("firebrick", "gray70")) +
scale_x_date(
date_breaks = "1 year",
date_labels = "%Y",
expand = expansion(mult = 0)
) +
expand_limits(y = 0) +
scale_y_continuous(expand = expansion(mult = 0)) +
theme_minimal() +
theme(legend.position = "top",
plot.margin = unit(c(1, 1, 1, 1), "cm")) +
labs(color = "",
y = "Unemployment",
title = "Actual unemployment and synthetic control unemployment")
}
# Plot the predictive gap for Gramatneusiedl
ggsave(
"Data/Synthetic_control_gap.png",
plot_gap(municipalities_synth_Gramatneusiedl),
width = 7,
height = 4
)
# Visual permutation inference, comparing the trajectory of predictive gaps
# between Gramatneusiedl and the 25 potential control municipalities
plot_permutation_trajectories = function(GKZ) {
all_gaps =
map(
1:length(all_identifiers),
~ municipalities_synth_all[[.x]] %>%
synth_outcome_gap %>%
as_tibble %>%
mutate(
Year = lubridate::ymd(2011:2020, truncated = 2L),
GKZ = all_identifiers[.x]
)
) %>%
bind_rows() %>%
rename(Gap = w.weight)
all_gaps %>%
ggplot(aes(
x = Year,
y = Gap,
color = (GKZ == Gramatneusiedl),
alpha = (GKZ == Gramatneusiedl),
group = GKZ
)) +
geom_hline(yintercept = 0, alpha = .7) +
geom_line() +
scale_color_manual(
values = c("gray70", "firebrick"),
labels = c("Other", "Gramatneusiedl")
) +
scale_x_date(
date_breaks = "1 year",
date_labels = "%Y",
expand = expansion(mult = 0)
) +
scale_y_continuous(expand = expansion(mult = 0)) +
scale_alpha_manual(values = c(.33, 1), guide = 'none') +
theme_minimal() +
theme(legend.position = "top",
plot.margin = unit(c(1, 1, 1, 1), "cm")) +
labs(color = "Municipality",
title = "Gap in actual unemployment versus synthetic control unemployment")
}
ggsave(
"Data/Synthetic_permutation_inference.png",
plot_permutation_trajectories(Gramatneusiedl),
width = 7,
height = 4
)
|
7affca78e0441926ec11ed9d301e67adcb629251
|
c6ab22e84df7b7dcf7a1ab462a99484237694fa7
|
/man/importexport.Rd
|
fd0f61fbdb809c9e69af0ac91bb13e5575323ada
|
[] |
no_license
|
Nisus-Liu/bbturns
|
d2f55950450851946987ed816b02f557bee5c471
|
84ea91b86568c060609c644bf2c6a4f022970740
|
refs/heads/master
| 2021-01-11T17:37:58.308645
| 2017-02-06T14:54:19
| 2017-02-06T14:54:19
| 79,809,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 490
|
rd
|
importexport.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importexport.R
\docType{data}
\name{importexport}
\alias{importexport}
\title{Monthly year on year growth of imports and exports data of China.}
\format{A 3 x 254 data.frame}
\usage{
importexport
}
\description{
This data set is a format of data.frame, contains monthly year on year growth of imports-exports,
imports, exports from 1995.8 to 2016.9, there are altogether 254 observations.
}
\keyword{datasets}
|
ff72ac3313e057a21d460e34e376c2fdb021f540
|
79675c0d22fa75e8b2199e3bc366779481be43fa
|
/man/make_filename.Rd
|
015abd1463cf061a8eb2d561d19787aeae70b398
|
[] |
no_license
|
shaojl7/fars
|
5ce841376459de41ea56a71bf0543388404784c2
|
9222b5cdf39756515ea635e6f55e5617de418d9b
|
refs/heads/master
| 2021-01-20T13:22:28.245582
| 2017-05-07T06:32:02
| 2017-05-07T06:32:02
| 90,480,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 548
|
rd
|
make_filename.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{make_filename}
\alias{make_filename}
\title{Make File Name}
\usage{
make_filename(year)
}
\arguments{
\item{year}{\code{numerical} value that converted to the year, used to specify filename}
}
\value{
\code{make_file} returns \code{string} of characters that contain
csv filename based on the year specified
}
\description{
\code{make_file} creates a standardized filename based on the year specified
}
\examples{
\dontrun{make_filename(2017)}
}
|
c2d81ab9ca2e26c53b2fa5dd738fdb7074d1b9ed
|
e0a7899f2f9063f0eec3fba369c8f878afe050fd
|
/docs/slides/Section05.1.R
|
0c3081faa2af6ddc03c906ce22a9cd771a85f2b2
|
[] |
no_license
|
jaredsmurray/sta371g_s19
|
e084c7211d03c24e42f34eb9ed5a9cbf38afa25e
|
e029d0b36640792bc1d583eea00b1e75dbac8abe
|
refs/heads/master
| 2020-04-18T01:16:05.133255
| 2019-05-08T04:40:05
| 2019-05-08T04:40:05
| 167,112,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,013
|
r
|
Section05.1.R
|
library(readr)
path = "https://jaredsmurray.github.io/sta371g_s19/data/"
beer = read_csv(paste0(path, 'beer.csv'))
beerfit_hw = lm(nbeer~weight+height, data=beer)
summary(beerfit_hw)
beerfit_w = lm(nbeer~weight, data=beer)
summary(beerfit_w)
## Out of sample forecasts for the airline data
library(forecast)
airline = read_csv(paste0(path, 'airline.csv'))
passengers = ts(airline$Passengers[-nrow(airline)], start=c(1949, 1), frequency=12)
fit1 = tslm(log(passengers)~trend)
fit2 = tslm(log(passengers)~trend+season)
X = cbind(trend=1:length(passengers), # This is the trend
season=seasonaldummy(passengers) # These are the seasonal (month) dummies
)
# We can use Arima to generate forecasts with lagged variables here too, by
# specifying the other variables (trend + seasonal dummies) using the xreg
# argument below
fit3 = Arima(log(passengers), order=c(1,0,0), xreg = X)
h=1
f1 = forecast(fit1, h=h)
f2 = forecast(fit2, h=h)
# First we need to build the new covariates:
new_X = cbind(trend = seq(length(passengers)+1, length(passengers)+h),
season = seasonaldummy(passengers, h=h))
f3 = forecast(fit3, h=h, xreg=new_X)
full_series = ts(airline$Passengers, start=c(1949, 1), frequency=12)
plot(log(passengers), xlim=c(1959, 1962), ylim=c(5.75, 6.5))
points(1961 - 1/12, log(full_series[length(full_series)]), pch=20)
points(1961 - 1/12, f1$mean, pch=4, col='red')
points(1961 - 1/12, f2$mean, pch=4, col='blue')
points(1961 - 1/12, f3$mean, pch=4, col='darkorange')
## Out of sample forecasts for the airline data, this time with a whole year
library(forecast)
passengers = ts(airline$Passengers[-((nrow(airline)-11):nrow(airline))], start=c(1949, 1), frequency=12)
h=12
f1 = forecast(fit1, h=h)
f2 = forecast(fit2, h=h)
# First we need to build the new covariates:
new_X = cbind(trend = seq(length(passengers)+1, length(passengers)+h),
season = seasonaldummy(passengers, h=h))
f3 = forecast(fit3, h=h, xreg=new_X)
plot(log(passengers), xlim=c(1959, 1962), ylim=c(5.75, 6.5))
points(1960 + (1:12)/12, log(full_series[((nrow(airline)-11):nrow(airline))]), pch=20)
points(1960 + (1:12)/12, f1$mean, pch=4, col='red')
points(1960 + (1:12)/12, f2$mean, pch=4, col='blue')
points(1960 + (1:12)/12, f3$mean, pch=4, col='darkorange')
# Computing model selection criteria for the two beer models:
CV(beerfit_hw)
CV(beerfit_w)
Auto = read_csv(paste0(path, 'auto.csv'))
base_model = lm(mpg~weight+horsepower+displacement+acceleration+
cylinders+year+I(year^2)+factor(origin), data=Auto)
step_model = step(base_model)
# How do our model diagnostics look?
plot(resid(step_model)~fitted(step_model))
# We should have diagnosed the base model first!
plot(resid(base_model)~fitted(base_model))
logged_base_model = lm(log(mpg)~weight+horsepower+displacement+acceleration+
cylinders+year+I(year^2)+factor(origin), data=Auto)
# These look better
plot(resid(logged_base_model)~fitted(base_model))
plot(resid(logged_base_model)~weight, data=Auto)
plot(resid(logged_base_model)~horsepower, data=Auto)
plot(resid(logged_base_model)~displacement, data=Auto)
# Log-log models make some sense here -- for ex: a 1% increase in HP gives us
# a beta2% decrease in efficiency, on average
logged_base_model2 = lm(log(mpg)~log(weight)+log(horsepower)+log(displacement)+
acceleration+cylinders+year+I(year^2)+factor(origin), data=Auto)
plot(resid(logged_base_model2)~fitted(base_model))
plot(resid(logged_base_model2)~weight, data=Auto)
plot(resid(logged_base_model2)~horsepower, data=Auto)
plot(resid(logged_base_model2)~displacement, data=Auto)
step_model_2 = step(logged_base_model2)
# What if we start with a small model and try making it bigger?
step(lm(log(mpg)~1, data=Auto), scope = log(mpg)~log(weight)+log(horsepower)+log(displacement)+
acceleration+cylinders+year+I(year^2)+factor(origin))
# We get the same answer! This won't always be the case...
|
94780e2cc44c654c258affafa5fe3351a0330039
|
81f68c4397dec52c5129cbff33988669d7431f36
|
/man/transform_by.Rd
|
1ddaa7b902f404df92dc3d2f1ec2e92820fe18f9
|
[
"MIT"
] |
permissive
|
coolbutuseless/threed
|
b9bb73da1bbbc4bd6f0fb8d3b4f310d013326dfd
|
35d10852ef08f327011c82a586e85b1c39e64382
|
refs/heads/master
| 2020-04-05T07:47:24.436328
| 2018-12-02T07:41:17
| 2018-12-02T07:41:17
| 156,688,668
| 42
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 988
|
rd
|
transform_by.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transform-by.R
\name{transform_by}
\alias{transform_by}
\alias{transform_by.default}
\alias{transform_by.matrix}
\alias{transform_by.mesh3d}
\alias{transform_by.numeric}
\alias{transform_by.mesh3dlist}
\alias{transform_by_inverse}
\title{Transform the vertex coordinates by the transformation matrix}
\usage{
transform_by(x, transform_matrix)
\method{transform_by}{default}(x, transform_matrix)
\method{transform_by}{matrix}(x, transform_matrix)
\method{transform_by}{mesh3d}(x, transform_matrix)
\method{transform_by}{numeric}(x, transform_matrix)
\method{transform_by}{mesh3dlist}(x, transform_matrix)
transform_by_inverse(x, transform_matrix)
}
\arguments{
\item{x}{matrix, mesh3d object, mesh3dlist object, or numeric vector of length 3 or 4 (homogenous coordinates)}
\item{transform_matrix}{4x4 transformation matrix}
}
\description{
Transform the vertex coordinates by the transformation matrix
}
|
32426486824f84068ad08bc4a142c1af26084756
|
ded480449adbc5c9607da6d843cfe5aff7a7f4f8
|
/Connected_Cluster.R
|
6d26d24248e98062acae4e51c9989d25e016db69
|
[
"MIT"
] |
permissive
|
yosiknorman/MEWS_2020_BOT
|
25c5c38c3d14b5829e0696e763aabd1400fa57d8
|
4ef8b53159ef258c30fb0c1945a8586c758d61fd
|
refs/heads/master
| 2022-04-15T20:49:11.978228
| 2020-04-15T08:06:36
| 2020-04-15T08:06:36
| 255,846,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,772
|
r
|
Connected_Cluster.R
|
#!/usr/bin/Rscript
library(rgdal)
library(rgeos)
library(doParallel)
rm(list = ls())
lsc = list.files("data/Data_Cluster/", pattern = ".geojson")
DN = lsc[length(lsc)-1] # GANTI
D0 = lsc[which(lsc == DN)-1]
Connected_Cluster = readOGR(paste0("data/Data_Cluster/",DN))
Last_Cluster = readOGR(paste0("data/Data_Cluster/",D0))
# plot(Last_Cluster[1:10,])
# plot(Connected_Cluster[1:10,], border = "blue", add = T)
# PARA = 1
CEK_CONNECTED_CLUSTER = function(PARA){
Con_CL = list()
for(i in 1:length(Last_Cluster)){
INI = rgeos::gIntersection(Last_Cluster[i,], Connected_Cluster[PARA,])
if(is.null(INI )){
Con_CL[[i]] = c(0, PARA)
}else{
Con_CL[[i]] = c(i, PARA)
}
}
Con_CL = do.call("rbind", Con_CL )
Con_CL = data.frame(Con_CL , stringsAsFactors = F)
names(Con_CL) = c(D0, DN)
Con_CL = Con_CL[Con_CL[,1] != 0,]
return(Con_CL)
}
# rm(PARA)
# CONNECTED_CLUSTER = list()
# strt<-Sys.time()
# for(i in 1:length(Connected_Cluster)){
# CONNECTED_CLUSTER [[i]]= CEK_CONNECTED_CLUSTER(PARA = i)
# }
# print(Sys.time()-strt)
cl <- makeCluster(4) # how many worker ?
registerDoParallel(cl)
strt<-Sys.time()
CONNECTED_CLUSTER <- foreach(i=1:length(Connected_Cluster)) %dopar%
CEK_CONNECTED_CLUSTER(PARA = i)
print(Sys.time()-strt)
stopCluster(cl)
CONNECTED_CLUSTER = do.call("rbind", CONNECTED_CLUSTER)
UCC = unique(CONNECTED_CLUSTER[,2])
A10MINBEFORE = c()
for(i in 1:length(UCC)){
A10MINBEFORE[i] = paste(CONNECTED_CLUSTER[,1][which(CONNECTED_CLUSTER[,2] == UCC[i])], collapse = "&")
}
CON = data.frame( A10MINBEFORE, UCC , stringsAsFactors = F)
names(CON) = c(D0, DN)
# system("mkdir data/ConnectedCluster")
SFX = substr(DN, 14, nchar(DN)-8)
save(CON ,file = paste0("data/ConnectedCluster/ConnectedCluster_", SFX, ".bin"))
|
d52914a6f3aaf75a8d4d14ea5a4e9875cd35c1d5
|
faca9fb310e0f5d25206dd7fbd8bd059e6facefb
|
/R/imbs-package.R
|
98b3298503d2a3daef42cde6c7319fd5d57b2e6e
|
[] |
no_license
|
imbs-hl/imbs
|
505f534fb68cd2d8fc6a3847f36784245cab3111
|
2d3ec95b81ea84623f007c5364ab19789a85715c
|
refs/heads/master
| 2023-08-11T08:33:42.695944
| 2019-09-05T20:01:22
| 2019-09-05T20:01:22
| 66,840,758
| 1
| 1
| null | 2018-01-29T15:02:18
| 2016-08-29T12:13:16
|
R
|
UTF-8
|
R
| false
| false
| 51
|
r
|
imbs-package.R
|
#' imbs.
#'
#' @name imbs
#' @docType package
NULL
|
b3457adef31a0045821985fd64ca8ebcc478345a
|
67222f69dd1a5b5ced1d28df833a303924dbde35
|
/2. Algorithms on Datasets/Hypothesis Testing/Fantaloons_HypothesisTesting/Fantaloons_HypothesisTesting.R
|
069193ef2d9a951125fcd58f66ab82617a740664
|
[] |
no_license
|
mandarmakhi/DataScience-R-code
|
4f75906507e303fb9b438b99a5eab0a74bcc77f6
|
8c1728b306e53668b1814283da9936503e0554b9
|
refs/heads/master
| 2023-01-19T04:55:11.171455
| 2020-11-28T07:59:55
| 2020-11-28T07:59:55
| 263,417,867
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 903
|
r
|
Fantaloons_HypothesisTesting.R
|
#Hypothesis Testing
#Fantaloons Sales managers commented that % of males versus females walking in to the store differ based on day of the week. Analyze the data and determine whether there is evidence at 5 % significance level to support this hypothesis.
#h0: = (Proportion of male and female is same)
#ha: != (Proportion of male and female is not same)
fantaloons <- read.csv("C:/Users/Mandar/Desktop/data/assignments/Hypothesis Testing/Faltoons.csv")
View(fantaloons)
summary(fantaloons)
fanta <- as.data.frame(as.factor(fantaloons$Weekdays), as.factor(fantaloons$Weekend))
fanta1 <- fantaloons
fanta1$Weekdays <- as.factor(fanta1$Weekdays)
fanta1$Weekend<- as.factor(fanta1$Weekend)
summary(fanta1)
fanta2 <- data.frame("Weekdays"=c(287,113), "Weekend" = c(233,167))
row.names(fanta2) <- c("Female","Male")
#now lets use the chisq.test() to test the hypothesis on fanta2
chisq.test(fanta2)
|
48cc3c9923166522b4441ff6d61bbacf5fa325f8
|
c40122631106d1813fa9ab4580c05fdd9524c131
|
/matching_test_2015.R
|
cb7a0702ae1bf79a692f73966b7a5dfc8bd4dff3
|
[] |
no_license
|
carpio-ucv/address_matching
|
4e1c7f7d0d1079ab3cf052f7101f7787557f9cb8
|
9b8eb1e7ca9b8158ba822696fbc865a812c78a49
|
refs/heads/master
| 2020-04-05T12:08:54.653914
| 2018-01-19T11:32:25
| 2018-01-19T11:32:25
| 95,222,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,880
|
r
|
matching_test_2015.R
|
### Relevant libraries
# Relevant Libraries
library("data.table")
library("dplyr")
library("reshape2")
library("ggplot2")
library("stringdist")
library("stringr")
library("tm")
library("fuzzyjoin")
# Setting up data
setwd("//dscs/cscom/Policy/Corp Information/Olypmic Hosts NKM (secure)/Community Mapping 2017/Juans_tests/2015")
gp_raw<-fread("GP_Register_2015.csv", na.strings=c(""))
gazet_raw<-fread("UNI2LIVEGAZVIEW.csv", na.strings=c(""))
blpu_raw<-fread("BLPU.csv", na.strings=c(""))
#elect_raw<-fread("ElectoralRegister2017.csv", na.strings=c(""))
# Creating blpu clean data set. # 78,595
blpu_tidy<- blpu_raw %>% filter(LOGICAL_ST==1) %>%
# mutate(filt=grepl("PP",BLPU_CLASS)) %>% # Filter Residential UPRN
# filter(filt=="FALSE") %>%
select(UPRN, BLPU_CLASS,BLPU_CLA_1, MAP_X, MAP_Y, WARD_CODE) #%>%
#filter(grepl("RH01|RH01|RH02|RH03|RI01|RI02|RI03",BLPU_CLASS))#NEW CODE
#filter(grepl("PP",BLPU_CLASS))#NEW CODE
nrow(blpu_tidy)
# Creating Gazeteer clean data set. # 78,841
gazet_tidy<- gazet_raw %>% filter(LOGICAL_ST==1) %>%
select(UPRN, ADDRESS, PAO_START_, PAO_START1,SAO_START1, POSTCODE,
MAP_EAST, MAP_NORTH) %>%
filter(POSTCODE!="") %>% # Removing gazeteer data with no post codes
mutate(adr.NO=sub("\\Essex.*","",ADDRESS)) %>% #eliminate anything from "Essex"
mutate(No=gsub("[^0-9]","",adr.NO)) %>% # Extract the house number (and premises)
mutate(sfx1= ifelse(is.na(PAO_START1),"",PAO_START1)) %>% # Extract any suffix from PA
mutate(sfx2= ifelse(is.na(SAO_START1),"",SAO_START1)) %>% # Extract any suffix from PA
mutate(ID= paste0(No,sfx1,sfx2,POSTCODE)) %>% # create a unique identifier based on house number and postcode
mutate(ID=gsub(" ","",ID))%>% # remove empty spaces
mutate(ID=gsub("-","",ID))# remove dashes
nrow(gazet_tidy)
# Creating Final Gazeeter dataset # 74,852 (uno menos) - 74,553 Unique
gazet_df <- semi_join(gazet_tidy,blpu_tidy, by="UPRN") #%>%
#distinct(ID)
nrow(gazet_df)
# Creating GPs clean data set. #207,755 (uniques ID=67056) # new unique
gp_unique<- gp_raw %>% select(UPRN_match,FORENAME, SURNAME, PREMISES, STREET, POSTCODE) %>%
mutate(PREMISES=replace(PREMISES,which(is.na(PREMISES)),""))%>%
mutate(premises.no=gsub("[^0-9]","",PREMISES)) %>%
mutate(str.no=gsub(" .*$","",STREET)) %>%
mutate(str.no=ifelse(grepl("[0-99]",str.no)==TRUE,str.no,"")) %>%
mutate(No= paste0(premises.no, str.no)) %>%
mutate(ID= paste0(No, POSTCODE)) %>%
mutate(ID=gsub(" ","",ID)) %>% # remove empty spaces
mutate(ID=gsub("-","",ID)) %>% # remove dashes
distinct(ID)
nrow(gp_unique)
gp_tidy<- gp_raw %>% select(UPRN_match,FORENAME, SURNAME, PREMISES, STREET, POSTCODE) %>%
mutate(PREMISES=replace(PREMISES,which(is.na(PREMISES)),""))%>%
mutate(premises.no=gsub("[^0-9]","",PREMISES)) %>%
mutate(str.no=gsub(" .*$","",STREET)) %>%
mutate(str.no=ifelse(grepl("[0-99]",str.no)==TRUE,str.no,"")) %>%
mutate(No= paste0(premises.no, str.no)) %>%
mutate(ID= paste0(No, POSTCODE)) %>%
mutate(ID=gsub(" ","",ID)) %>%
mutate(ID=gsub("-","",ID)) # remove dashes
nrow(gp_tidy)
##############################
## MERGING GPs and GAZETEER ##
##############################
# Merging datasets of UNIQUE ID's
#################################
df<-semi_join(gp_unique,gazet_df, by="ID")
nrow(df) # 64,195 new= 64,283
no.match<-anti_join(gp_unique, gazet_df, by="ID")
nrow(no.match)# 2,818 new=2,730
# Accuracy
paste0(round(100-nrow(no.match)/nrow(df)*100,3),"%")# 96.61% (new=95.753%)
# Merging back datasets
#################################
df2<-inner_join(df, gazet_df, by="ID")
nrow(df2) # 64,464 new=64,562
df_final<-inner_join(df2,gp_tidy, by="ID")
nrow(df_final) # 203,710 new=204,145
# Accuracy UPRN
#################################
### Does not include Phil's non-matches
audit<-df_final %>% filter(!is.na(UPRN_match)) %>%
filter(UPRN_match!="?") %>%
mutate(check= ifelse(UPRN==UPRN_match,1,0))
perc<-audit %>% group_by(check) %>% count()
paste0(round(perc[2,2]/(perc[2,2]+perc[1,2])*100,3),"%") #98.374% new="98.324%"
wrongly.match<- audit %>% filter(check==0)
# Accuracy TOTAL RECORS FROM GP'S
#################################
paste0(round(nrow(df_final) / nrow(gp_tidy),5)*100,"%") #98.053% new= 98.262%%
# Visual inspection of no matches
exp<-audit %>% filter(check==0) %>% select(PREMISES, STREET, ADDRESS)
head(exp,200)
exp2<-audit %>% filter(check==0) %>% select(UPRN, UPRN_match)
head(exp2,200)
# NO MATCHES
names(gp_tidy)[1]<-"UPRN"
gp.fil <- gp_tidy %>% filter(!is.na(UPRN)) %>% # filtering the ones no UPRN matched by Phil
filter(UPRN!="?")
no<-anti_join(gp.fil, df_final, by="UPRN")
nrow(no)# 4,224 new=3,942
write.csv(no,"no_match.csv")
write.csv(wrongly.match,"wrong_match.csv")
# E.G. 110 CHELMER CRESCENT matched with parent shel by Phil (tHERE IS 110A, 110B)...
# 36 ESSEX ROAD "" "" (IT HAS GROUND FLOOR AND FIRST FLOOR)
# 177 Howard Road
# 59 KING EDWARDS ROAD IG11 7TS (GAZEETER POST CODE FINISHES IN B).
# 16 CROSSNESS ROAD- PEOPLE USED PREMISE NUMBER(16) BUT NOT PROPERTY NUMBER (22)
## CODES
## grepl("^[0-9].*$","hjg")
gazet_df %>% filter(grepl("100057382",UPRN))#NEW CODE
names(df_final)[14]<-"UPRN"
filter(grepl("Faircross Care|Strathfield Gardens|Abbey Care|Westerley Lodge|
Chestnut Court Care|Keith Lauder House|Lisnaveane Lodge|Woodlands Rainham|
Turning Point House| Parkview Nursing Home|Outlook Care|Chaseview Care Centre|
Sahara Parkside Apartments|Mayesbrook Home For The Aged|Cloud House|Outlook
Care|Lisaveane House|Gascogine Road Care Home|Bennetts Castle Care Home|
Kallar Lodge|Cherry Orchard|Richard Ryan Place|Hanbury Court",ADDRESS)) #NEW CODE
|
d8815e0904b1ccd7a2fba6658566d63a27f85968
|
0d2190a6efddb7167dee3569820724bfeed0e89c
|
/R3.0.2 Package Creation/PBTools/man/getDefaultGenesContrast.Rd
|
d3e8636abb5134f422cff85c8e6dc121b9d228fc
|
[] |
no_license
|
djnpisano/RScriptLibrary
|
6e186f33458396aba9f4151bfee0a4517d233ae6
|
09ae2ac1824dfeeca8cdea62130f3c6d30cb492a
|
refs/heads/master
| 2020-12-27T10:02:05.719000
| 2015-05-19T08:34:19
| 2015-05-19T08:34:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
rd
|
getDefaultGenesContrast.Rd
|
\name{getDefaultGenesContrast}
\alias{getDefaultGenesContrast}
\title{getDefaultGenesContrast}
\description{getDefaultGenesContrast}
\usage{
getDefaultGenesContrast(geneNumber = 2)
}
\arguments{
\item{geneNumber}{a}
}
\author{
Mao Qin
}
\keyword{misc}
|
aa64447ea611c85880214353be6a371ddd609e44
|
54ff210f8313184137e68f4a337c1b0bfd74af56
|
/man/db_get_info.Rd
|
33ca23974ba22d9c6104066434fcf16f2f912d2d
|
[
"MIT"
] |
permissive
|
petermeissner/db6
|
fdcf3fac8ffe29d36bf983ba14b67f406a86e352
|
fbff81c8c7a319efc8d84127c18d8952d4aa39ac
|
refs/heads/master
| 2020-03-27T01:54:39.952645
| 2018-10-31T15:34:32
| 2018-10-31T15:34:32
| 145,753,015
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 495
|
rd
|
db_get_info.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db_get_info.R
\name{db_get_info}
\alias{db_get_info}
\alias{db_get_info_SQLiteConnection}
\alias{db_get_info_AnsiConnection}
\alias{db_get_info_PostgreSQLConnection}
\title{db_get_info}
\usage{
db_get_info(con)
db_get_info_SQLiteConnection(con)
db_get_info_AnsiConnection(con)
db_get_info_PostgreSQLConnection(con)
}
\arguments{
\item{con}{a connection object}
}
\description{
db_get_info
}
|
6a236ffd079fdf7d9840527cbb4caea6159978fd
|
35de14603463a45028bd2aca76fa336c41186577
|
/man/get_os.Rd
|
856557f42edb85570676638cd15ec21185b1abef
|
[
"MIT"
] |
permissive
|
UKDRI/echolocatoR
|
e3cf1d65cc7113d02b2403960d6793b9249892de
|
0ccf40d2f126f755074e731f82386e4e01d6f6bb
|
refs/heads/master
| 2023-07-14T21:55:27.825635
| 2021-08-28T17:02:33
| 2021-08-28T17:02:33
| 416,442,683
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 561
|
rd
|
get_os.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_os.R
\name{get_os}
\alias{get_os}
\title{Identify current operating system (OS).}
\usage{
get_os()
}
\description{
Identify current operating system (OS).
}
\examples{
get_os()
}
\seealso{
Other general:
\code{\link{createDT_html}()},
\code{\link{createDT}()},
\code{\link{dt.replace}()},
\code{\link{example_fullSS}()},
\code{\link{fillNA_CS_PP}()},
\code{\link{get_sample_size}()},
\code{\link{startup_image}()},
\code{\link{tryFunc}()}
}
\concept{general}
\keyword{internal}
|
86b09fe1335d7b9d946892cd727f885ab32e4792
|
0e30ad16e659bb06953ba1256341e228f7cbebdc
|
/man/BNG.Rd
|
6fc98357cc72ac3020303a55f7c0ceb7cabb1917
|
[] |
no_license
|
davesteps/randomFuns
|
15ab512645235818427c88f1cab78a35d65c6abd
|
4b70216b9cb084cac1a061d3ce607eb6be506b01
|
refs/heads/master
| 2021-01-22T03:39:39.356916
| 2016-03-11T22:41:20
| 2016-03-11T22:41:20
| 27,593,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 186
|
rd
|
BNG.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial.R
\name{BNG}
\alias{BNG}
\title{BNG}
\usage{
BNG(x)
}
\arguments{
\item{x}{}
}
\description{
BNG
}
|
458a645d2cb92a608b721c1a0637dfeacfb1d9c2
|
b4dd3dd232c8cc7692f44f58af1d73ca0d5405f0
|
/plot3.R
|
aabb4e92b19ff5294f75614f317a02b1f3e4cca1
|
[] |
no_license
|
kriseuna/ExData_Plotting1
|
7bb29076604e4256ca071ef8947aaf0c7525dae1
|
c309e82b14843141ebf53e0798453f0e670e6e3d
|
refs/heads/master
| 2021-07-12T13:28:05.506019
| 2017-10-11T16:45:56
| 2017-10-11T16:45:56
| 106,581,883
| 0
| 0
| null | 2017-10-11T16:44:24
| 2017-10-11T16:44:23
| null |
UTF-8
|
R
| false
| false
| 749
|
r
|
plot3.R
|
## get data
data <- read.table("./household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
data <- data[(data$Date == "1/2/2007" | data$Date == "2/2/2007"), ]
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
## plot in png file
png(filename = "plot3.png", width = 480, height = 480, units = "px")
plot(data$DateTime, data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(data$DateTime, data$Sub_metering_2, type = "l", col = "red")
lines(data$DateTime, data$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
## clean data
rm(data)
|
c547a0893bf99b1ee076a4e4d8e1e007744ebf76
|
d9804455d392a3bd1faeffbca27b38360d00410b
|
/Data_preprocessing/preprocess_merged.R
|
0abaf2969a067f28888599dbaf1bf5b6b75e73f5
|
[] |
no_license
|
ik-karlsson/EWAScognition
|
8410f3262855f0ba7c76c5bf7ff0889fe93a692c
|
fb5030990202ec9b40aa3e63e139836a0b930ab4
|
refs/heads/main
| 2023-04-02T01:14:01.969630
| 2021-03-27T12:42:39
| 2021-03-27T12:42:39
| 351,799,324
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,631
|
r
|
preprocess_merged.R
|
## ----------------------------------------------
## Filename: preprocess_merged.R
## Study: EPIC_blood
## Author: Yunzhang Wang
## Date: 2017-09-28
## Updated:
## Purpose: To preprocess merged EPIC and 450k chip data
## Notes: depends on preprocess_QC.R, preprocess_norm.R preprocess_cellbatch.R
## -----------------------------------------------
## Data used:
## * raw RGset
## -----------------------------------------------
## Output:
## * beta-values
## -----------------------------------------------
## OP: R 3.4.1
## -----------------------------------------------*/
########## settings
wd.dir <- "/proj/b2016305/INBOX/PE-1124/PE-1124_170823_ResultReport/output/combine/sample1469_p005_dasen"
if(!dir.exists(wd.dir)) dir.create(wd.dir)
out.dir <- "/proj/b2016305/INBOX/PE-1124/PE-1124_170823_ResultReport/output"
in.dir <- "/proj/b2016305/INBOX/PE-1124/PE-1124_170823_ResultReport/output/combine"
require(lumi)
require(minfi)
require(IlluminaHumanMethylationEPICmanifest)
require(IlluminaHumanMethylationEPICanno.ilm10b2.hg19)
source("/home/yunzhang/epic_blood/src/preprocess_QC.R")
source("/home/yunzhang/epic_blood/src/preprocess_norm.R")
source("/home/yunzhang/epic_blood/src/preprocess_cellbatch.R")
phe <- load_data(uppmax=T, pheno=T, greedy.cut=F)
phe <- phe$pheno
phe$name <- paste0(phe$SLIDE, "_", phe$POSITION)
########## load data
load(file.path(out.dir, "RGset_raw.Rdata"))
RGset_epic <- RGset
load(file.path(in.dir, "RGset_raw.Rdata"))
RGset_450 <- RGset
RGset_cb <- combineArrays(RGset_epic, RGset_450)
rm(RGset_epic)
rm(RGset_450)
gc()
########## QC
qced <- preprocess_QC(
ifloadrgset = F,
RGset_raw = RGset_cb,
dpval = 0.05,
rmsexProbe=T, # remove probes on sex chromosomes or not
rmsnpProbe=T, # remove probes with a snp or not
rmoutliers=T, # remove outliers in PC1
keepcpg = NULL,
rmsample = c("201496850061_R06C01", "201496860035_R01C01"), # 201496850093_R07C01 201496860021_R03C01 201503670171_R01C01 low quality
# 201496850061_R06C01 201496860035_R01C01 bad distribution
keepsample = as.vector(phe$name),
# keepsample = colnames(RGset_cb)[6:10],
saverawbeta= T,
savedata=T,
.out.dir=wd.dir)
cellcounts <- estimate_cellcounts(qced, .out.dir=wd.dir)
dim(cellcounts)
save(cellcounts, file=file.path(wd.dir, TimeStampMe("cellcounts.Rdata")))
normed <- preprocess_norm(
RGset_qc=qced,
bgcorrect=T,
savedata=F,
norm.method="dasen",
.out.dir=wd.dir
)
bt <- normed@colData@listData$Slide
betas <- getBeta(normed) # CpG, samples
save(betas, file=file.path(wd.dir, TimeStampMe("betas_norm.Rdata")))
rm(qced)
rm(normed)
gc()
summary(colnames(betas) == rownames(cellcounts))
betas_ct <- adjust_cellcounts(.Beta=t(betas), celltypes=cellcounts)
betas_ct <- t(betas_ct)
mval_ct <- beta2m(betas_ct)
save(betas_ct, file=file.path(wd.dir, TimeStampMe("betas_ct.Rdata")))
rm(betas_ct)
gc()
mval_ct_batch <- adjust_batch(edata=mval_ct, batches=bt, .out.dir=wd.dir, parp=T, name="_ct_batch")
betas_ct_batch <- m2beta(mval_ct_batch)
save(mval_ct_batch, file=file.path(wd.dir, TimeStampMe("mval_ct_batch.Rdata")))
save(betas_ct_batch, file=file.path(wd.dir, TimeStampMe("betas_ct_batch.Rdata")))
rm(mval_ct_batch, betas_ct_batch)
gc()
mval <- beta2m(betas)
mval_batch <- adjust_batch(edata=mval, batches=bt, .out.dir=wd.dir, parp=T, name="_batch")
betas_batch <- m2beta(mval_batch)
save(mval_batch, file=file.path(wd.dir, TimeStampMe("mval_batch.Rdata")))
save(betas_batch, file=file.path(wd.dir, TimeStampMe("betas_batch.Rdata")))
|
d0ddfde453f5cd847108d17f9f0ed9928f1a3f8d
|
69ef4dfc59485960e49709acec093fa52ba476cf
|
/components/tools_choice_comp.R
|
07b0c99a41201338e5f845a88698d06e4caafc3e
|
[] |
no_license
|
EBlonkowski/tshandy
|
9ea2c4b8e31be4c6e06a88f02dc80df638c42986
|
3f703933fd6dd61558ad5cf0f81ef06d86d8a8b3
|
refs/heads/master
| 2021-01-12T09:08:29.503323
| 2016-12-22T01:26:20
| 2016-12-22T01:26:20
| 76,772,463
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 788
|
r
|
tools_choice_comp.R
|
output$tools_choice_comp <- renderUI({
div(
selectInput("current_tool", label = "Visualization tools:",
choices = list("Time-serie" = "time_serie_comp",
'Profiles' = 'profile_comp',
'Versus plot' = 'versus_comp',
'Categorical' = 'categorical_comp',
'App state info' = 'app_state_comp'),
#'Auto-correlation' = 'acf_comp',
#'Cross-correlation',
#'Versus plot',
#'Histogram',
#'Power spectrum',
#'3D plot',
#'peak plot',
selected = 1))
})
|
f5b457115c9aba1b105b46268b25cc361eda2f99
|
bb0fb51530335b10a2e64557fb6c950be61b7968
|
/Rscripts/10.5.WilcoxinTest_FilteredData.R
|
3e9c10d4fac5c3040333c8e9ac5811c627ff5c42
|
[] |
no_license
|
kahot/HCV_project
|
bd0450d07c84906b13b3cf6b442de68cdc7f3e44
|
0047c945f9f1522ebbda2b1cb3adf7742ce01a9e
|
refs/heads/master
| 2022-03-24T06:01:24.189668
| 2019-12-16T17:13:03
| 2019-12-16T17:13:03
| 187,297,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,846
|
r
|
10.5.WilcoxinTest_FilteredData.R
|
library(tidyverse)
library(zoo)
library(purrr)
source("Rscripts/baseRscript.R")
HCVFiles3<-list.files("Output1A/Overview3/",pattern="overview3.csv")
# #### read the filtered MF files from 10.1.2
TsMutFreq <-read.csv("Output1A/MutFreq.filtered/Filtered.Ts.Q35.csv",stringsAsFactors = F,row.names=1)
Tv1.MutFreq<-read.csv("Output1A/MutFreq.filtered/Filtered.Tv1.MutFreq.Q35.csv",stringsAsFactors = F,row.names=1)
Tv2.MutFreq<-read.csv("Output1A/MutFreq.filtered/Filtered.Tv2.MutFreq.Q35.csv",stringsAsFactors = F,row.names=1)
Tvs.MutFreq<-read.csv("Output1A/MutFreq.filtered/Filtered.Tvs.MutFreq.Q35.csv",stringsAsFactors = F,row.names=1)
AllMutFreq <-read.csv("Output1A/MutFreq.filtered/Filtered.AllMutFreq.Q35.csv", stringsAsFactors = F,row.names=1)
mf.files<-list()
mf.files[[1]]<-TsMutFreq
mf.files[[2]]<-Tv1.MutFreq
mf.files[[3]]<-Tv2.MutFreq
mf.files[[4]]<-Tvs.MutFreq
mf.files[[5]]<-AllMutFreq
names(mf.files)[1]<-"TransMutFreq"
names(mf.files)[2]<-"Tv1.MutFreq"
names(mf.files)[3]<-"Tv2.MutFreq"
names(mf.files)[4]<-"Tvs.MutFreq"
names(mf.files)[5]<-"AllMutFreq"
s<-length(HCVFiles3)
#### 1) Transition Mutations (mean)
# 1.1) Summary
Ts<-mf.files[[1]]
Ts<-Ts[Ts$pos>=342, ]
mean(Ts$mean[Ts$Type=="syn"]) #0.008093379
mean(Ts$mean[Ts$Type=="nonsyn"]) #0.003348242
mean(Ts$mean[Ts$Type=="stop"]) #0.002022381
table(Ts$Type)
#nonsyn stop syn
# 5193 220 2545
std.error(Ts$mean[Ts$Type=="syn"]) #9.066418e-05
std.error(Ts$mean[Ts$Type=="nonsyn"]) # 2.768315e-05
std.error(Ts$mean[Ts$Type=="stop"]) # 9.204526e-05
r1<-wilcox.test(Ts$mean[Ts$Type=="syn"], Ts$mean[Ts$Type=="nonsyn"], alternative = "greater", paired = FALSE)
r2<-wilcox.test(Ts$mean[Ts$Type=="nonsyn"], Ts$mean[Ts$Type=="stop"], alternative = "greater", paired = FALSE)
r1[[3]] #P=0
r2[[3]] #P= 1.933446e-41
#CpG creating vs Non-CpG creating
T2<-Ts[Ts$ref=="a"|Ts$ref=="t",]
r3<-wilcox.test(T2$mean[T2$Type=="syn"&T2$makesCpG==0], T2$mean[T2$Type=="syn"&T2$makesCpG==1], alternative = "greater", paired = FALSE)
r4<-wilcox.test(T2$mean[T2$Type=="nonsyn"&T2$makesCpG==0], T2$mean[T2$Type=="nonsyn"&T2$makesCpG==1], alternative = "greater", paired = FALSE)
r3[[3]] #P=0.005231179
r4[[3]] #P=3.893716e-12
##################
# 1.2) run Wilcoxin Test on means based on mutation types
WilcoxTest.results.nt<-data.frame(matrix(ncol=3,nrow=6))
colnames(WilcoxTest.results.nt)<-c("nt","test","P.value")
# 1) transition Mutations, using 'mean'
dat<-mf_files[[1]]
#dat<-TransMutFreq_filtered
dat<-dat[dat$pos>=342, ]
which(is.na(dat$freq.Ts))
dat<-dat[1:8236, ]#7895
ty<-which(colnames(dat)=="Type");fname="Transition"
m<-data.frame()
se<-data.frame()
m_CpG<-data.frame()
se_CpG<-data.frame()
m_nonCpG<-data.frame()
se_nonCpG<-data.frame()
table(dat$ref)
#a c g t
#1556 2431 2310 1661
for (typeofsite in c("syn", "nonsyn","stop")){
for (wtnt in c("a", "t", "c", "g")){
mutrate<- dat$mean[dat[,ty]==typeofsite & dat$ref==wtnt]
m[typeofsite,wtnt]<-mean(mutrate[!is.na(mutrate)])
se[typeofsite,wtnt]<-std.error(mutrate[!is.na(mutrate)])
m_NonCpG<-dat$mean[dat$Type==typeofsite & dat$ref==wtnt & dat$makesCpG==0]
m_nonCpG[typeofsite,wtnt]<-mean(m_NonCpG[!is.na(m_NonCpG)])
se_nonCpG[typeofsite,wtnt]<-std.error(m_NonCpG[!is.na(m_NonCpG)])
mu_CpG<-dat$mean[dat$Type==typeofsite & dat$ref==wtnt & dat$makesCpG==1]
m_CpG[typeofsite,wtnt]<-mean(mu_CpG[!is.na(mu_CpG)])
se_CpG[typeofsite,wtnt]<-std.error(mu_CpG[!is.na(mu_CpG)])
vectorname<-paste0(typeofsite,"_",wtnt)
assign(vectorname, mutrate)
vname1<<-paste0(typeofsite,"_",wtnt,"_noncpg")
assign(vname1, m_NonCpG)
vname2<<-paste0(typeofsite,"_",wtnt,"_cpg")
assign(vname2, mu_CpG)
}
}
#rownames(m)<-c("syn","nonsyn","stop")
rownames(se)<-c("syn_se","nonsyn_se","stop_se")
rownames(m_nonCpG)<-c("syn_noncpg","nonsyn_noncpg","stop_noncpg")
rownames(se_nonCpG)<-c("syn_noncpg_se","nonsyn_noncpg_se","stop_noncpg_se")
rownames(m_CpG)<-c("syn_cpg","nonsyn_cpg","stop_cpg")
rownames(se_CpG)<-c("syn_cpg_se","nonsyn_cpg_se","stop_cpg_se")
MFbyType<-rbind(m,se,m_nonCpG,se_nonCpG,m_CpG,se_CpG)
MFbyType2<-t(MFbyType)
MFbyType2<-data.frame(MFbyType2)
write.csv(MFbyType2,"Output1A/SummaryStats/TransitionMF_byNt_byType_mean.csv")
#run Wilcoxin Test
for (i in c("a","t","c","g")) {
if (i=="a"|i=="t"){
syncpg<-get(paste0("syn_",i,"_cpg"))
synnoncpg<-get(paste0("syn_",i,"_noncpg"))
nonsyncpg<-get(paste0("nonsyn_",i,"_cpg"))
nonsynnoncpg<-get(paste0("nonsyn_",i,"_noncpg"))
if (i=="a"){
result1<-wilcox.test(syncpg, synnoncpg, alternative = "less", paired = FALSE)
result2<-wilcox.test(nonsyncpg,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 1:2){
result<-get(paste0('result',r))
WilcoxTest.results.nt$nt[r]<-i
WilcoxTest.results.nt$test[r]<-result[[7]]
WilcoxTest.results.nt$P.value[r]<-result[[3]]}
}
if (i=="t"){
result3<-wilcox.test(syncpg, synnoncpg, alternative = "less", paired = FALSE)
result4<-wilcox.test(nonsyncpg,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 3:4){
result<-get(paste0('result',r))
WilcoxTest.results.nt$nt[r]<-i
WilcoxTest.results.nt$test[r]<-result[[7]]
WilcoxTest.results.nt$P.value[r]<-result[[3]]}
}
}
else { synnoncpg<-get(paste0("syn_",i,"_noncpg"))
nonsynnoncpg<-get(paste0("nonsyn_",i,"_noncpg"))
if (i =="c") {
result5<-wilcox.test(synnoncpg,nonsynnoncpg, alternative = "greater", paired = FALSE)
WilcoxTest.results.nt$nt[5]<-i
WilcoxTest.results.nt$test[5]<-result5[[7]]
WilcoxTest.results.nt$P.value[5]<-result5[[3]] }
if (i =="g") {
result6<-wilcox.test(synnoncpg,nonsynnoncpg, alternative = "greater", paired = FALSE)
WilcoxTest.results.nt$nt[6]<-i
WilcoxTest.results.nt$test[6]<-result6[[7]]
WilcoxTest.results.nt$P.value[6]<-result6[[3]] }
}
}
write.csv(WilcoxTest.results.nt,paste0("Output1A/SummaryStats/WilcoxTestResults_Ts_eachNT_mean.csv"))
al<-mf.files[[5]]
mean(al$mean) #0.005771959
tvs<-mf.files[[4]]
tvs<-tvs[tvs$pos>=342,]
mean(tvs$mean) #0.0009650088
# 2) Transversion with mean ###
## 2.1) Summary Stats
Tv1<-mf.files[[2]]
Tv1<-Tv1[Tv1$pos>=342, ]
Tv2<-mf.files[[3]]
Tv2<-Tv2[Tv2$pos>=342, ]
All<-c(Tv1$mean,Tv2$mean) #0.0004825044
Syn<-c(Tv1$mean[Tv1$Type.tv1=="syn"],Tv2$mean[Tv2$Type.tv2=="syn"])
Nonsyn <- c(Tv1$mean[Tv1$Type.tv1=="nonsyn"],Tv2$mean[Tv2$Type.tv2=="nonsyn"])
Stop <- c(Tv1$mean[Tv1$Type.tv1=="stop"],Tv2$mean[Tv2$Type.tv2=="stop"])
mean(All) #0.0004825044
mean(Syn) #0.0007535948
mean(Nonsyn) #0.0004099817
mean(Stop) #0.0005612278
c(length(Nonsyn),length(Stop),length(Syn))
#nonsyn stop syn
# 12184 625 3107
std.error(Syn) # 1.508687e-05
std.error(Nonsyn) # 4.921358e-06
std.error(Stop) # 1.833373e-05
r1<-wilcox.test(Syn, Nonsyn, alternative = "greater", paired = FALSE)
r2<-wilcox.test(Nonsyn, Stop, alternative = "greater", paired = FALSE)
wilcox.test(Nonsyn, Stop, alternative = "less", paired = FALSE)
r1[[3]] #2.811567e-175
r2[[3]] #1
Syncpg<-c(Tv1$mean[Tv1$Type.tv1=="syn"&Tv1$makesCpG.tv1==1],Tv2$mean[Tv2$Type.tv2=="syn"&Tv2$makesCpG.tv2==1])
SynNoncpg<-c(Tv1$mean[Tv1$Type.tv1=="syn"&Tv1$makesCpG.tv1==0],Tv2$mean[Tv2$Type.tv2=="syn"&Tv2$makesCpG.tv2==0])
Nonsyncpg <- c(Tv1$mean[Tv1$Type.tv1=="nonsyn"&Tv1$makesCpG.tv1==1],Tv2$mean[Tv2$Type.tv2=="nonsyn"&Tv2$makesCpG.tv2==1])
NNcpg <- c(Tv1$mean[Tv1$Type.tv1=="nonsyn"&Tv1$makesCpG.tv1==0],Tv2$mean[Tv2$Type.tv2=="nonsyn"&Tv2$makesCpG.tv2==0])
r3<-wilcox.test(SynNoncpg,Syncpg, alternative = "greater", paired = FALSE)
r4<-wilcox.test(NNcpg,Nonsyncpg, alternative = "greater", paired = FALSE)
r3[[3]] #[1] 5.038601e-143
r4[[3]] #[1] 1.390009e-237
## 2.2) Run Wilcoxon test on each NT, transversion
dat1<-mf_filtered[[2]]
dat1<-dat1[dat1$pos>=342, ]
dat2<-mf_filtered[[3]]
dat2<-dat2[dat2$pos>=342, ]
m<-data.frame()
se<-data.frame()
m_nonCpG<-data.frame()
se_nonCpG<-data.frame()
mr_CpG<-data.frame()
se_CpG<-data.frame()
for (typeofsite in c("syn", "nonsyn","stop")){
for (wtnt in c("a", "t", "c", "g")){
mr1<- dat1$mean[dat1$Type.tv1==typeofsite & dat1$ref==wtnt]
mr2<- dat2$mean[dat2$Type.tv2==typeofsite & dat1$ref==wtnt]
m_NonCpG1<-dat1$mean[dat1$Type.tv1==typeofsite & dat1$ref==wtnt & dat1$makesCpG.tv1==0]
m_NonCpG2<-dat2$mean[dat2$Type.tv2==typeofsite & dat2$ref==wtnt & dat2$makesCpG.tv2==0]
m_CpG1<-dat1$mean[dat1$Type.tv1==typeofsite & dat1$ref==wtnt & dat1$makesCpG.tv1==1]
m_CpG2<-dat2$mean[dat2$Type.tv2==typeofsite & dat2$ref==wtnt & dat2$makesCpG.tv2==1]
mr<-c(mr1,mr2)
m_NonCpG<-c(m_NonCpG1,m_NonCpG2)
m_CpG<-c(m_CpG1,m_CpG2)
m[typeofsite,wtnt]<-mean(mr[!is.na(mr)])
se[typeofsite,wtnt]<-std.error(mr[!is.na(mr)])
m_nonCpG[typeofsite,wtnt]<-mean(m_NonCpG[!is.na(m_NonCpG)])
se_nonCpG[typeofsite,wtnt]<-std.error(m_NonCpG[!is.na(m_NonCpG)])
mr_CpG[typeofsite,wtnt]<-mean(m_CpG[!is.na(m_CpG)])
se_CpG[typeofsite,wtnt]<-std.error(m_CpG[!is.na(m_CpG)])
vectorname<-paste0(typeofsite,"_",wtnt)
assign(vectorname, mr)
vname1<<-paste0(typeofsite,"_",wtnt,"_noncpg")
assign(vname1, m_NonCpG)
vname2<<-paste0(typeofsite,"_",wtnt,"_cpg")
assign(vname2, m_CpG)
}
}
rownames(se)<-c("syn_se","nonsyn_se","stop_se")
rownames(m_nonCpG)<-c("syn_noncpg","nonsyn_noncpg","stop_noncpg")
rownames(se_nonCpG)<-c("syn_noncpg_se","nonsyn_noncpg_se","stop_noncpg_se")
rownames(mr_CpG)<-c("syn_cpg","nonsyn_cpg","stop_cpg")
rownames(se_CpG)<-c("syn_cpg_se","nonsyn_cpg_se","stop_cpg_se")
MFbyType.tv<-rbind(m,se,mr_CpG,se_CpG,m_nonCpG,se_nonCpG)
MFbyType.tv2<-t(MFbyType.tv)
write.csv(MFbyType.tv2,paste0("Output1A/SummaryStats/Transversion_MF_byNt_byType_mean.csv"))
# CpG vs nonCpG
WilcoxTest.results.nt.tv<-data.frame(matrix(ncol=3,nrow=8))
colnames(WilcoxTest.results.nt.tv)<-c("nt","test","P.value")
for (i in c("a","t","c","g")) {
syncpg<-get(paste0("syn_",i,"_cpg"))
synnoncpg<-get(paste0("syn_",i,"_noncpg"))
nonsyncpg<-get(paste0("nonsyn_",i,"_cpg"))
nonsynnoncpg<-get(paste0("nonsyn_",i,"_noncpg"))
if (i=="a"){
if (length(syncpg)==0) next
else{result1<-wilcox.test(syncpg, synnoncpg, alternative = "less", paired = FALSE)
result2<-wilcox.test(nonsyncpg,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 1:2){
result<-get(paste0('result',r))
WilcoxTest.results.nt.tv$nt[r]<-i
WilcoxTest.results.nt.tv$test[r]<-result[[7]]
WilcoxTest.results.nt.tv$P.value[r]<-result[[3]]}
}}
if (i=="t"){
if (length(syncpg)==0) next
else{ result3<-wilcox.test(syncpg, synnoncpg, alternative = "less", paired = FALSE)
result4<-wilcox.test(nonsyncpg,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 3:4){
result<-get(paste0('result',r))
WilcoxTest.results.nt.tv$nt[r]<-i
WilcoxTest.results.nt.tv$test[r]<-result[[7]]
WilcoxTest.results.nt.tv$P.value[r]<-result[[3]]}
}}
if (i=="c"){
if (length(syncpg)==0) next
else{ result5<-wilcox.test(syncpg, synnoncpg, alternative = "less", paired = FALSE)
result6<-wilcox.test(nonsyncpg,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 5:6){
result<-get(paste0('result',r))
WilcoxTest.results.nt.tv$nt[r]<-i
WilcoxTest.results.nt.tv$test[r]<-result[[7]]
WilcoxTest.results.nt.tv$P.value[r]<-result[[3]]}
}}
if (i=="g"){
if (length(syncpg)==0) next
else{ result7<-wilcox.test(syncpg, synnoncpg, alternative = "less", paired = FALSE)
result8<-wilcox.test(nonsyncpg,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 7:8){
result<-get(paste0('result',r))
WilcoxTest.results.nt.tv$nt[r]<-i
WilcoxTest.results.nt.tv$test[r]<-result[[7]]
WilcoxTest.results.nt.tv$P.value[r]<-result[[3]]}
}}
}
write.csv(WilcoxTest.results.nt.tv,paste0("Output1A/SummaryStats/WilcoxTestResults_TV_eachNT_mean.csv"))
#####################
## Not sure if we need to run these:
# 3) use all data to create summary / run Wilcoxon Test (using all values,not just the mean of 195 files) on Transition
source("Rscripts/MutationFreqSum.filtered.R")
##3.1 Transition
nuc.mf2<-data.frame("s.cpg"=matrix(nrow=4))
rownames(nuc.mf2)<-c("a","t","c","g")
WilcoxTest.nt.mf2<-data.frame(matrix(ncol=3,nrow=6))
colnames(WilcoxTest.nt.mf2)<-c("nt","test","P.value")
k=1
for (i in c("A","T","C","G")) {
if (i=="A"|i=="T"){
syncpg1<-get(paste0(i,"_syn_cpg"))
synnoncpg1<-get(paste0(i,"_syn_noncpg"))
nonsyncpg1<-get(paste0(i,"_nonsyn_cpg"))
nonsynnoncpg1<-get(paste0(i,"_nonsyn_noncpg"))
nuc.mf2$s.cpg[k]<-mean(syncpg1, na.rm=T)
nuc.mf2$s.cpg.se[k]<-std.error(syncpg1, na.rm=T)
nuc.mf2$s.ncpg[k]<-mean(synnoncpg1, na.rm=T)
nuc.mf2$s.ncpg.se[k]<-std.error(synnoncpg1, na.rm=T)
nuc.mf2$ns.cpg[k]<-mean(nonsyncpg1, na.rm=T)
nuc.mf2$ns.cpg.se[k]<-std.error(nonsyncpg1, na.rm=T)
nuc.mf2$ns.ncpg[k]<-mean(nonsynnoncpg1, na.rm=T)
nuc.mf2$ns.ncpg.se[k]<-std.error(nonsynnoncpg1, na.rm=T)
#nuc.mf2$stop.ncpg[k]<-NA
#nuc.mf2$stop.ncpg.se[k]<-NA
if (i=="A"){
result1<-wilcox.test(syncpg1, synnoncpg1, alternative = "less", paired = FALSE)
result2<-wilcox.test(nonsyncpg1,nonsynnoncpg1,alternative = "less", paired = FALSE)
for (r in 1:2){
result<-get(paste0('result',r))
WilcoxTest.nt.mf2$nt[r]<-i
WilcoxTest.nt.mf2$test[r]<-result[[7]]
WilcoxTest.nt.mf2$P.value[r]<-result[[3]]}
}
if (i=="T"){
result3<-wilcox.test(syncpg1, synnoncpg1, alternative = "less", paired = FALSE)
result4<-wilcox.test(nonsyncpg1,nonsynnoncpg1,alternative = "less", paired = FALSE)
for (r in 3:4){
result<-get(paste0('result',r))
WilcoxTest.nt.mf2$nt[r]<-i
WilcoxTest.nt.mf2$test[r]<-result[[7]]
WilcoxTest.nt.mf2$P.value[r]<-result[[3]]}
}
}
else {
synnoncpg1<-get(paste0(i,"_syn_noncpg"))
nonsynnoncpg1<-get(paste0(i,"_nonsyn_noncpg"))
nuc.mf2$s.cpg[k]<-NA
nuc.mf2$s.cpg.se[k]<-NA
nuc.mf2$s.ncpg[k]<-mean(synnoncpg1, na.rm=T)
nuc.mf2$s.ncpg.se[k]<-std.error(synnoncpg1, na.rm=T)
nuc.mf2$ns.cpg[k]<-NA
nuc.mf2$ns.cpg.se[k]<-NA
nuc.mf2$ns.ncpg[k]<-mean(nonsynnoncpg1, na.rm=T)
nuc.mf2$ns.ncpg.se[k]<-std.error(nonsynnoncpg1, na.rm=T)
if (i =="C") {
result5<-wilcox.test(synnoncpg1,nonsynnoncpg1, alternative = "greater", paired = FALSE)
WilcoxTest.nt.mf2$nt[5]<-i
WilcoxTest.nt.mf2$test[5]<-result5[[7]]
WilcoxTest.nt.mf2$P.value[5]<-result5[[3]] }
if (i =="G") {
result6<-wilcox.test(synnoncpg1,nonsynnoncpg1, alternative = "greater", paired = FALSE)
WilcoxTest.nt.mf2$nt[6]<-i
WilcoxTest.nt.mf2$test[6]<-result6[[7]]
WilcoxTest.nt.mf2$P.value[6]<-result6[[3]] }
}
k=k+1
}
write.csv(WilcoxTest.nt.mf2,paste0("Output/SummaryStats/WilcoxTestResults_Transition_eachNT_all.csv"))
write.csv(nuc.mf2,paste0("Output/SummaryStats/TransitionMF_byNT_byType_all.csv"))
wilcoxtest2<-data.frame("test"=matrix(nrow=4))
Typelist2<-list()
syn_all<-c(A_syn,T_syn,C_syn,G_syn)
Typelist2[[1]]<-syn_all; names(Typelist2)[1]<-"syn_all"
nonsyn_all<-c(A_nonsyn,T_nonsyn,C_nonsyn,G_nonsyn)
Typelist2[[2]]<-nonsyn_all; names(Typelist2)[2]<-"nonsyn_all"
stop_all<-c(A_stop,T_stop,C_stop,G_stop)
Typelist2[[3]]<-stop_all; names(Typelist2)[3]<-"stop_all"
syn_allCpG<-c(A_syn_cpg,T_syn_cpg)
Typelist2[[4]]<-syn_allCpG; names(Typelist2)[4]<-"syn_allCpG"
syn_allnonCpG<-c(A_syn_noncpg,T_syn_noncpg)
Typelist2[[5]]<-syn_allnonCpG; names(Typelist2)[5]<-"syn_allnonCpG"
nonsyn_allCpG<-c(A_nonsyn_cpg,T_nonsyn_cpg)
Typelist2[[6]]<-nonsyn_allCpG; names(Typelist2)[6]<-"nonsyn_allCpG"
nonsyn_allnonCpG<-c(A_nonsyn_noncpg,T_nonsyn_noncpg)
Typelist2[[7]]<-nonsyn_allnonCpG; names(Typelist2)[7]<-"nonsyn_allnonCpG"
re1<-wilcox.test(syn_all,nonsyn_all, alternative = "greater", paired = FALSE)
wilcoxtest2$test[1]<-re1[[7]]
wilcoxtest2$P.value[1]<-re1[[3]]
re2<-wilcox.test(nonsyn_all,stop_all, alternative = "greater", paired = FALSE)
wilcoxtest2$test[2]<-re2[[7]]
wilcoxtest2$P.value[2]<-re2[[3]]
re3<-wilcox.test(syn_allCpG,syn_allnonCpG, alternative = "less", paired = FALSE)
wilcoxtest2$test[3]<-re3[[7]]
wilcoxtest2$P.value[3]<-re3[[3]]
re4<-wilcox.test(nonsyn_allCpG,nonsyn_allnonCpG, alternative = "less", paired = FALSE)
wilcoxtest2$test[4]<-re4[[7]]
wilcoxtest2$P.value[4]<-re4[[3]]
write.csv(wilcoxtest2,"Output/SummaryStats/WilcoxTestResults_by_Type_MF_Ts_all.csv")
Type.mf1<-data.frame("mean"=matrix(nrow=7))
for (i in 1:7){
rownames(Type.mf1)[i]<-names(Typelist2)[i]
Type.mf1$mean[i]<-mean(Typelist2[[i]],na.rm=T)
Type.mf1$se[i]<-std.error(Typelist2[[i]], na.rm=T)
}
write.csv(Type.mf1,paste0("Output/SummaryStats/MutFreq_byType_Summary_all.csv"))
##################################
##3.2 Transversion (all)
nuc.mft<-data.frame("s.cpg"=matrix(nrow=4))
rownames(nuc.mft)<-c("a","t","c","g")
WilcoxTest.nt.mft<-data.frame(matrix(ncol=3,nrow=8))
colnames(WilcoxTest.nt.mft)<-c("nt","test","P.value")
k=1
for (i in c("A","T","C","G")) {
if (i=="A"|i=="G"){
syncpg1<-get(paste0(i,"_tv1_syn_cpg"))
#syncpg2<-get(paste0(i,"_tv2_syn_cpg"))
synnoncpg1<-get(paste0(i,"_tv1_syn_noncpg"))
synnoncpg2<-get(paste0(i,"_tv2_syn_noncpg"))
nonsyncpg1<-get(paste0(i,"_tv1_nonsyn_cpg"))
#nonsyncpg2<-get(paste0(i,"_tv2_nonsyn_cpg"))
nonsynnoncpg1<-get(paste0(i,"_tv1_nonsyn_noncpg"))
nonsynnoncpg2<-get(paste0(i,"_tv2_nonsyn_noncpg"))
synnoncpg<-c(synnoncpg1,synnoncpg2)
nonsynnoncpg<-c(nonsynnoncpg1,nonsynnoncpg2)
nuc.mft$s.cpg[k]<-mean(syncpg1, na.rm=T)
nuc.mft$s.cpg.se[k]<-std.error(syncpg1, na.rm=T)
nuc.mft$s.ncpg[k]<-mean(synnoncpg, na.rm=T)
nuc.mft$s.ncpg.se[k]<-std.error(synnoncpg, na.rm=T)
nuc.mft$ns.cpg[k]<-mean(nonsyncpg1, na.rm=T)
nuc.mft$ns.cpg.se[k]<-std.error(nonsyncpg1, na.rm=T)
nuc.mft$ns.ncpg[k]<-mean(nonsynnoncpg, na.rm=T)
nuc.mft$ns.ncpg.se[k]<-std.error(nonsynnoncpg, na.rm=T)
#nuc.mft$stop.ncpg[k]<-NA
#nuc.mft$stop.ncpg.se[k]<-NA
if (i=="A"){
result1<-wilcox.test(syncpg1, synnoncpg, alternative = "less", paired = FALSE)
result2<-wilcox.test(nonsyncpg1,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 1:2){
result<-get(paste0('result',r))
WilcoxTest.nt.mft$nt[r]<-i
WilcoxTest.nt.mft$test[r]<-result[[7]]
WilcoxTest.nt.mft$P.value[r]<-result[[3]]}
}
if (i=="G"){
result7<-wilcox.test(syncpg1, synnoncpg, alternative = "less", paired = FALSE)
result8<-wilcox.test(nonsyncpg1,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 7:8){
result<-get(paste0('result',r))
WilcoxTest.nt.mft$nt[r]<-i
WilcoxTest.nt.mft$test[r]<-result[[7]]
WilcoxTest.nt.mft$P.value[r]<-result[[3]]}
}
}
if (i=="T"|i=="C") {
syncpg2<-get(paste0(i,"_tv2_syn_cpg"))
synnoncpg1<-get(paste0(i,"_tv1_syn_noncpg"))
synnoncpg2<-get(paste0(i,"_tv2_syn_noncpg"))
nonsyncpg2<-get(paste0(i,"_tv2_nonsyn_cpg"))
nonsynnoncpg1<-get(paste0(i,"_tv1_nonsyn_noncpg"))
nonsynnoncpg2<-get(paste0(i,"_tv2_nonsyn_noncpg"))
synnoncpg<-c(synnoncpg1,synnoncpg2)
nonsynnoncpg<-c(nonsynnoncpg1,nonsynnoncpg2)
nuc.mft$s.cpg[k]<-mean(syncpg2, na.rm=T)
nuc.mft$s.cpg.se[k]<-std.error(syncpg2, na.rm=T)
nuc.mft$s.ncpg[k]<-mean(synnoncpg, na.rm=T)
nuc.mft$s.ncpg.se[k]<-std.error(synnoncpg, na.rm=T)
nuc.mft$ns.cpg[k]<-mean(nonsyncpg2, na.rm=T)
nuc.mft$ns.cpg.se[k]<-std.error(nonsyncpg2, na.rm=T)
nuc.mft$ns.ncpg[k]<-mean(nonsynnoncpg, na.rm=T)
nuc.mft$ns.ncpg.se[k]<-std.error(nonsynnoncpg, na.rm=T)
if (i =="T") {
result3<-wilcox.test(syncpg2, synnoncpg, alternative = "less", paired = FALSE)
result4<-wilcox.test(nonsyncpg2,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 3:4){
result<-get(paste0('result',r))
WilcoxTest.nt.mft$nt[r]<-i
WilcoxTest.nt.mft$test[r]<-result[[7]]
WilcoxTest.nt.mft$P.value[r]<-result[[3]]}
}
if (i =="C") {
result5<-wilcox.test(syncpg2, synnoncpg, alternative = "less", paired = FALSE)
result6<-wilcox.test(nonsyncpg2,nonsynnoncpg,alternative = "less", paired = FALSE)
for (r in 5:6){
result<-get(paste0('result',r))
WilcoxTest.nt.mft$nt[r]<-i
WilcoxTest.nt.mft$test[r]<-result[[7]]
WilcoxTest.nt.mft$P.value[r]<-result[[3]]}
}
}
k=k+1
}
write.csv(WilcoxTest.nt.mft,paste0("Output/SummaryStats/WilcoxTestResults_TV_eachNT_all.csv"))
write.csv(nuc.mft,paste0("Output/SummaryStats/TransversionMF_byNT_byType_all.csv"))
wilcoxtest3<-data.frame("test"=matrix(nrow=4))
Typelist3<-list()
syn_all<-c(A_tv1_syn,T_tv1_syn,C_tv1_syn,G_tv1_syn,A_tv2_syn,T_tv2_syn,C_tv2_syn,G_tv2_syn)
Typelist3[[1]]<-syn_all; names(Typelist3)[1]<-"syn_all"
nonsyn_all<-c(A_tv1_nonsyn,T_tv1_nonsyn,C_tv1_nonsyn,G_tv1_nonsyn,A_tv2_nonsyn,T_tv2_nonsyn,C_tv2_nonsyn,G_tv2_nonsyn)
Typelist3[[2]]<-nonsyn_all; names(Typelist3)[2]<-"nonsyn_all"
syn_allCpG<-c(A_tv1_syn_cpg,T_tv2_syn_cpg,G_tv1_syn_cpg,C_tv2_syn_cpg )
Typelist3[[3]]<-syn_allCpG; names(Typelist3)[3]<-"syn_allCpG"
syn_allnonCpG<-c(A_tv1_syn_noncpg,T_tv1_syn_noncpg,G_tv1_syn_noncpg,C_tv1_syn_noncpg,A_tv2_syn_noncpg,T_tv2_syn_noncpg,G_tv2_syn_noncpg,C_tv2_syn_noncpg)
Typelist3[[4]]<-syn_allnonCpG; names(Typelist3)[4]<-"syn_allnonCpG"
nonsyn_allCpG<-c(A_tv1_nonsyn_cpg,T_tv2_nonsyn_cpg,G_tv1_nonsyn_cpg,C_tv2_nonsyn_cpg)
Typelist3[[5]]<-nonsyn_allCpG; names(Typelist3)[5]<-"nonsyn_allCpG"
nonsyn_allnonCpG<-c(A_tv1_nonsyn_noncpg,T_tv1_nonsyn_noncpg,G_tv1_nonsyn_noncpg,C_tv1_nonsyn_noncpg,A_tv2_nonsyn_noncpg,T_tv2_nonsyn_noncpg,G_tv2_nonsyn_noncpg,C_tv2_nonsyn_noncpg)
Typelist3[[6]]<-nonsyn_allnonCpG; names(Typelist3)[6]<-"nonsyn_allnonCpG"
stop_all<-c(A_tv1_stop,T_tv1_stop,C_tv1_stop,G_tv1_stop,A_tv2_stop,T_tv2_stop,C_tv2_stop,G_tv2_stop)
Typelist3[[7]]<-stop_all; names(Typelist3)[7]<-"stop_all"
re1<-wilcox.test(syn_all,nonsyn_all, alternative = "greater", paired = FALSE)
wilcoxtest3$test[1]<-re1[[7]]
wilcoxtest3$P.value[1]<-re1[[3]]
re2<-wilcox.test(nonsyn_all,stop_all, alternative = "greater", paired = FALSE)
wilcoxtest3$test[2]<-re2[[7]]
wilcoxtest3$P.value[2]<-re2[[3]]
re3<-wilcox.test(syn_allCpG,syn_allnonCpG, alternative = "less", paired = FALSE)
wilcoxtest3$test[3]<-re3[[7]]
wilcoxtest3$P.value[3]<-re3[[3]]
re4<-wilcox.test(nonsyn_allCpG,nonsyn_allnonCpG, alternative = "less", paired = FALSE)
wilcoxtest3$test[4]<-re4[[7]]
wilcoxtest3$P.value[4]<-re4[[3]]
write.csv(wilcoxtest3,"Output/SummaryStats/WilcoxTestResults_by_Type_MF_Transversion_all.csv")
Type.mf2<-data.frame("mean"=matrix(nrow=7))
for (i in 1:7){
rownames(Type.mf2)[i]<-names(Typelist3)[i]
Type.mf2$mean[i]<-mean(Typelist3[[i]],na.rm=T)
Type.mf2$se[i]<-std.error(Typelist3[[i]], na.rm=T)
}
write.csv(Type.mf2,paste0("Output/SummaryStats/MutFreq_byType_Summary_Transversion_all.csv"))
####### plot to visually assure ###
Type.mf2$names<-c("Syn","Nonsyn","Syn_CpG", "Syn_nonCpG","Nonsyn_CpG","Nonsyn_nonCpG","Stop")
x=seq(1,7, by=1)
plot(x, Type.mf2$mean, xaxt="n",main="", xlab ="",ylim=c(0,0.0012),pch=".")
#axis(1, at=1:7, labels=Type.mf2$names, las = 1, cex.axis = 0.8)
text(cex=1,x=x, y=-0.0001, labels=Type.mf2$names, xpd=TRUE, srt=35,adj= 1)
bar<-0.02
segments(x,(Type.mf2$mean-Type.mf2$se),x,(Type.mf2$mean+Type.mf2$se))
segments(x-bar,(Type.mf2$mean-Type.mf2$se),x+bar,(Type.mf2$mean-Type.mf2$se))
segments(x-bar,(Type.mf2$mean+Type.mf2$se),x+bar,(Type.mf2$mean+Type.mf2$se))
##################################
|
dbf306e59e4d6acc24845708e9d5bb313b24b411
|
b1fba55d02d3998d29185383019989d93d6c46a2
|
/R/NRRR.plot.RegSurface.r
|
920477365d592a95b74e363266d837ecaf5cf010
|
[
"MIT"
] |
permissive
|
xliu-stat/NRRR
|
ebb0ed1be0cf2304ab851b2aa06de721b765af2f
|
e51d9df7500b287f8c4daa8839f4cc1782525cef
|
refs/heads/master
| 2023-02-11T02:08:58.543146
| 2021-01-07T03:49:27
| 2021-01-07T03:49:27
| 320,651,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,623
|
r
|
NRRR.plot.RegSurface.r
|
#' @title
#' Plot heatmap for the functional regression surface
#'
#' @description
#' This function creates heatmaps for the functional regression surface in a
#' multivariate functional linear regression. Based on the fitting results from the
#' nested reduced-rank regression, different kinds of regression surfaces
#' (at the original scale or the latent scale) can be visualized to give a
#' clear illustration of the functional correlation between the user-specified
#' predictor (or latent predictor) trajectory and response
#' (or latent response) trajectory.
#'
#'
#' @usage
#' NRRR.plot.reg(Ag, Bg, Al, Bl, rx, ry, sseq, phi, tseq, psi,
#' x_ind, y_ind, x_lab = NULL, y_lab = NULL,
#' tseq_index = NULL, sseq_index = NULL,
#' method = c("latent", "x_original",
#' "y_original", "original")[1])
#'
#'
#' @param Ag,Bg,Al,Bl,rx,ry the estimated U, V, A, B, rx and ry from a NRRR fitting.
#' @param sseq the sequence of time points at which the predictor trajectory is observed.
#' @param phi the set of basis functions to expand the predictor trajectory.
#' @param tseq the sequence of time points at which the response trajectory is observed.
#' @param psi the set of basis functions to expand the response trajectory.
#' @param x_ind,y_ind two indices to locate the regression surface for which the heat map is to be drawn.
#' If \code{method = "original"}, then \eqn{0 < x_ind <= p, 0 < y_ind <= d}
#' and the function plots \eqn{C_{x_ind,y_ind}(s,t)} in Eq. (1) of the NRRR paper.
#' If \code{method = "latent"}, then \eqn{0 < x_ind <= rx, 0 < y_ind <= ry}
#' and the function plots \eqn{C^*_{x_ind,y_ind}(s,t)} in Eq. (2) of the NRRR paper.
#' If \code{method = "y_original"}, then \eqn{0 < x_ind <= rx, 0 < y_ind <= d}.
#' If \code{method = "x_original"}, then \eqn{0 < x_ind <= p, 0 < y_ind <= ry}.
#' @param x_lab,y_lab the user-specified x-axis (with x_lab for predictor) and
#' y-axis (with y_lab for response) label,
#' and it should be given as a character string, e.g., x_lab = "Temperature".
#' @param tseq_index,sseq_index the user-specified x-axis (with sseq_index for predictor)
#' and y-axis (with tseq_index for response) tick marks, and it should be
#' given as a vector of character strings of the same length as sseq or tseq, respectively.
#' @param method 'original': the function plots the correlation heatmap between the original
#' functional response \eqn{y_i(t)} and the original functional predictor \eqn{x_j(s)};
#' 'latent': the function plots the correlation heatmap between
#' the latent functional response \eqn{y^*_i(t)} and the latent functional predictor \eqn{x^*_j(s)};
#' 'y_original': the function plots the correlation heatmap between \eqn{y_i(t)} and \eqn{x^*_j(s)};
#' 'x_original': the function plots the correlation heatmap between \eqn{y^*_i(t)} and \eqn{x_j(s)}.
#'
#' @return A ggplot2 object.
#'
#' @details
#' More details and the examples of its usage can be found in the vignette of electricity demand analysis.
#'
#' @references
#' Liu, X., Ma, S., & Chen, K. (2020). Multivariate Functional Regression via Nested Reduced-Rank Regularization.
#' arXiv: Methodology.
#'
#' @import ggplot2
#' @importFrom reshape2 melt
#' @export
NRRR.plot.reg <- function(Ag, Bg, Al, Bl, rx, ry,
sseq, phi, tseq, psi,
x_ind, y_ind,
x_lab = NULL, y_lab = NULL,
tseq_index = NULL, sseq_index = NULL,
method = c("latent", "x_original", "y_original","original")[1]
){
ry <- ry
rx <- rx
Al <- Al
Bl <- Bl
Ag <- Ag
Bg <- Bg
p <- dim(Bg)[1]
d <- dim(Ag)[1]
ns <- dim(phi)[1]
nt <- dim(psi)[1]
jx <- dim(phi)[2]
jy <- dim(psi)[2]
if (method == "original" & any(c(0 > x_ind, x_ind > p, 0 > y_ind, y_ind > d))) stop("when 'original' is selected, 0 < x_ind <= p, 0 < y_ind <= d")
if (method == "latent" & any(c(0 > x_ind, x_ind > rx, 0 > y_ind, y_ind > ry))) stop("when 'latent' is selected, 0 < x_ind <= rx, 0 < y_ind <= ry")
if (method == "y_original" & any(c(0 > x_ind, x_ind > rx, 0 > y_ind, y_ind > d))) stop("when 'y_original' is selected, 0 < x_ind <= rx, 0 < y_ind <= d")
if (method == "x_original" & any(c(0 > x_ind, x_ind > p, 0 > y_ind, y_ind > ry))) stop("when 'x_original' is selected, 0 < x_ind <= p, 0 < y_ind <= ry")
Jpsi <- matrix(nrow = jy, ncol = jy, 0)
tdiff <- (tseq - c(0, tseq[-nt]))
for (t in 1:nt) Jpsi <- Jpsi + psi[t, ] %*% t(psi[t, ]) * tdiff[t]
eJpsi <- eigen(Jpsi)
Jpsihalf <- eJpsi$vectors %*% diag(sqrt(eJpsi$values)) %*% t(eJpsi$vectors)
Jpsihalfinv <- eJpsi$vectors %*% diag(1 / sqrt(eJpsi$values)) %*% t(eJpsi$vectors)
alindex <- rep(1:ry,jy)
Alstar <- Al[order(alindex),]
blindex <- rep(1:rx,jx)
Blstar <- Bl[order(blindex),]
Alstar <- kronecker(diag(ry),Jpsihalfinv)%*%Alstar
Core <- Alstar%*%t(Blstar)
comp <- array(dim = c(ry, rx, nt, ns), NA)
for (i in 1:ry){
for (j in 1:rx){
comp[i,j,,] <- psi%*%Core[c(((i-1)*jy + 1):(i*jy)),
c(((j-1)*jx + 1):(j*jx))]%*%t(phi) # nt \times ns
}
}
if (method == "original"){
comp.ori <- array(dim = c(d, p, nt, ns), NA)
for (i in 1:nt){
for (j in 1:ns){
comp.ori[ , , i, j] <- Ag%*%comp[ , , i, j]%*%t(Bg)
}
}
comp <- comp.ori
} else if (method == "y_original"){
comp.ori <- array(dim = c(d, rx, nt, ns), NA)
for (i in 1:nt){
for (j in 1:ns){
comp.ori[ , , i, j] <- Ag%*%comp[ , , i, j]
}
}
comp <- comp.ori
} else if (method == "x_original"){
comp.ori <- array(dim = c(ry, p, nt, ns), NA)
for (i in 1:nt){
for (j in 1:ns){
comp.ori[ , , i, j] <- comp[ , , i, j]%*%t(Bg)
}
}
comp <- comp.ori
}
# heatmap for coefficient function
# library(ggplot2)
# library(reshape2)
dat <- reshape2::melt(as.matrix(comp[y_ind, x_ind, , ]))
dat$Var1 <- factor(dat$Var1)
dat$Var2 <- factor(dat$Var2)
if (is.null(tseq_index)) {
levels(dat$Var1) <- factor(tseq)
y_breaks <- tseq[seq(1,nt,2)]
} else {
levels(dat$Var1) <- tseq_index
y_breaks <- tseq_index[seq(1,nt,2)]
}
if (is.null(sseq_index)) {
levels(dat$Var2) <- factor(sseq)
x_breaks <- sseq[seq(1,ns,2)]
} else {
levels(dat$Var2) <- sseq_index
x_breaks <- sseq_index[seq(1,ns,2)]
}
# Var1 <- dat$Var1
# Var2 <- dat$Var2
# value <- dat$value
ggplot2::ggplot(dat, aes(Var2, Var1, fill = value)) + geom_tile() +
scale_fill_gradient2(low = "blue", mid = "white",
high = "red", midpoint = 0, limits= range(comp),
space = "Lab", name = "",
na.value = "grey50", guide = "colourbar", aesthetics = "fill")+
scale_x_discrete(breaks = x_breaks) +
scale_y_discrete(breaks = y_breaks) +
ylab(ifelse(is.null(y_lab), "response (t)", y_lab)) +
xlab(ifelse(is.null(x_lab), "predictor (s)", x_lab)) +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(axis.text=element_text(size=10),axis.title=element_text(size=13),
plot.title = element_text(hjust = 0.5, size = 15))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.